2 * native hashtable management.
4 * SMP scalability work:
5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
15 #include <linux/spinlock.h>
16 #include <linux/bitops.h>
18 #include <linux/threads.h>
19 #include <linux/smp.h>
21 #include <asm/machdep.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgtable.h>
25 #include <asm/tlbflush.h>
27 #include <asm/cputable.h>
29 #include <asm/kexec.h>
30 #include <asm/ppc-opcode.h>
32 #include <misc/cxl-base.h>
35 #define DBG_LOW(fmt...) udbg_printf(fmt)
37 #define DBG_LOW(fmt...)
41 #define HPTE_LOCK_BIT 3
43 #define HPTE_LOCK_BIT (56+3)
46 DEFINE_RAW_SPINLOCK(native_tlbie_lock);
48 static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
55 * We need 14 to 65 bits of va for a tlibe of 4K page
56 * With vpn we ignore the lower VPN_SHIFT bits already.
57 * And top two bits are already ignored because we can
58 * only accomodate 76 bits in a 64 bit vpn with a VPN_SHIFT
61 va = vpn << VPN_SHIFT;
63 * clear top 16 bits of 64bit va, non SLS segment
64 * Older versions of the architecture (2.02 and earler) require the
65 * masking of the top 16 bits.
67 if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
68 va &= ~(0xffffULL << 48);
72 /* clear out bits after (52) [0....52.....63] */
73 va &= ~((1ul << (64 - 52)) - 1);
75 sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
76 ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
78 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
79 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
83 /* We need 14 to 14 + i bits of va */
84 penc = mmu_psize_defs[psize].penc[apsize];
85 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
90 * We don't need all the bits, but rest of the bits
91 * must be ignored by the processor.
92 * vpn cover upto 65 bits of va. (0...65) and we need
95 va |= (vpn & 0xfe); /* AVAL */
97 asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
98 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
104 static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
110 /* VPN_SHIFT can be atmost 12 */
111 va = vpn << VPN_SHIFT;
113 * clear top 16 bits of 64 bit va, non SLS segment
114 * Older versions of the architecture (2.02 and earler) require the
115 * masking of the top 16 bits.
117 if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
118 va &= ~(0xffffULL << 48);
122 /* clear out bits after(52) [0....52.....63] */
123 va &= ~((1ul << (64 - 52)) - 1);
125 sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
126 ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
128 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
129 : : "r"(va) : "memory");
132 /* We need 14 to 14 + i bits of va */
133 penc = mmu_psize_defs[psize].penc[apsize];
134 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
139 * We don't need all the bits, but rest of the bits
140 * must be ignored by the processor.
141 * vpn cover upto 65 bits of va. (0...65) and we need
146 asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
147 : : "r"(va) : "memory");
153 static inline void tlbie(unsigned long vpn, int psize, int apsize,
154 int ssize, int local)
156 unsigned int use_local;
157 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
159 use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use();
162 use_local = mmu_psize_defs[psize].tlbiel;
163 if (lock_tlbie && !use_local)
164 raw_spin_lock(&native_tlbie_lock);
165 asm volatile("ptesync": : :"memory");
167 __tlbiel(vpn, psize, apsize, ssize);
168 asm volatile("ptesync": : :"memory");
170 __tlbie(vpn, psize, apsize, ssize);
171 asm volatile("eieio; tlbsync; ptesync": : :"memory");
173 if (lock_tlbie && !use_local)
174 raw_spin_unlock(&native_tlbie_lock);
177 static inline void native_lock_hpte(struct hash_pte *hptep)
179 unsigned long *word = (unsigned long *)&hptep->v;
182 if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
184 while(test_bit(HPTE_LOCK_BIT, word))
189 static inline void native_unlock_hpte(struct hash_pte *hptep)
191 unsigned long *word = (unsigned long *)&hptep->v;
193 clear_bit_unlock(HPTE_LOCK_BIT, word);
196 static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
197 unsigned long pa, unsigned long rflags,
198 unsigned long vflags, int psize, int apsize, int ssize)
200 struct hash_pte *hptep = htab_address + hpte_group;
201 unsigned long hpte_v, hpte_r;
204 if (!(vflags & HPTE_V_BOLTED)) {
205 DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
206 " rflags=%lx, vflags=%lx, psize=%d)\n",
207 hpte_group, vpn, pa, rflags, vflags, psize);
210 for (i = 0; i < HPTES_PER_GROUP; i++) {
211 if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) {
212 /* retry with lock held */
213 native_lock_hpte(hptep);
214 if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID))
216 native_unlock_hpte(hptep);
222 if (i == HPTES_PER_GROUP)
225 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
226 hpte_r = hpte_encode_r(pa, psize, apsize, ssize) | rflags;
228 if (!(vflags & HPTE_V_BOLTED)) {
229 DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
233 hptep->r = cpu_to_be64(hpte_r);
234 /* Guarantee the second dword is visible before the valid bit */
237 * Now set the first dword including the valid bit
238 * NOTE: this also unlocks the hpte
240 hptep->v = cpu_to_be64(hpte_v);
242 __asm__ __volatile__ ("ptesync" : : : "memory");
244 return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
247 static long native_hpte_remove(unsigned long hpte_group)
249 struct hash_pte *hptep;
252 unsigned long hpte_v;
254 DBG_LOW(" remove(group=%lx)\n", hpte_group);
256 /* pick a random entry to start at */
257 slot_offset = mftb() & 0x7;
259 for (i = 0; i < HPTES_PER_GROUP; i++) {
260 hptep = htab_address + hpte_group + slot_offset;
261 hpte_v = be64_to_cpu(hptep->v);
263 if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
264 /* retry with lock held */
265 native_lock_hpte(hptep);
266 hpte_v = be64_to_cpu(hptep->v);
267 if ((hpte_v & HPTE_V_VALID)
268 && !(hpte_v & HPTE_V_BOLTED))
270 native_unlock_hpte(hptep);
277 if (i == HPTES_PER_GROUP)
280 /* Invalidate the hpte. NOTE: this also unlocks it */
286 static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
287 unsigned long vpn, int bpsize,
288 int apsize, int ssize, unsigned long flags)
290 struct hash_pte *hptep = htab_address + slot;
291 unsigned long hpte_v, want_v;
292 int ret = 0, local = 0;
294 want_v = hpte_encode_avpn(vpn, bpsize, ssize);
296 DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
297 vpn, want_v & HPTE_V_AVPN, slot, newpp);
299 hpte_v = be64_to_cpu(hptep->v);
301 * We need to invalidate the TLB always because hpte_remove doesn't do
302 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
303 * random entry from it. When we do that we don't invalidate the TLB
304 * (hpte_remove) because we assume the old translation is still
305 * technically "valid".
307 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
308 DBG_LOW(" -> miss\n");
311 native_lock_hpte(hptep);
312 /* recheck with locks held */
313 hpte_v = be64_to_cpu(hptep->v);
314 if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
315 !(hpte_v & HPTE_V_VALID))) {
318 DBG_LOW(" -> hit\n");
319 /* Update the HPTE */
320 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
321 ~(HPTE_R_PPP | HPTE_R_N)) |
322 (newpp & (HPTE_R_PPP | HPTE_R_N |
325 native_unlock_hpte(hptep);
328 if (flags & HPTE_LOCAL_UPDATE)
331 * Ensure it is out of the tlb too if it is not a nohpte fault
333 if (!(flags & HPTE_NOHPTE_UPDATE))
334 tlbie(vpn, bpsize, apsize, ssize, local);
339 static long native_hpte_find(unsigned long vpn, int psize, int ssize)
341 struct hash_pte *hptep;
345 unsigned long want_v, hpte_v;
347 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
348 want_v = hpte_encode_avpn(vpn, psize, ssize);
350 /* Bolted mappings are only ever in the primary group */
351 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
352 for (i = 0; i < HPTES_PER_GROUP; i++) {
353 hptep = htab_address + slot;
354 hpte_v = be64_to_cpu(hptep->v);
356 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
366 * Update the page protection bits. Intended to be used to create
367 * guard pages for kernel data structures on pages which are bolted
368 * in the HPT. Assumes pages being operated on will not be stolen.
370 * No need to lock here because we should be the only user.
372 static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
373 int psize, int ssize)
378 struct hash_pte *hptep;
380 vsid = get_kernel_vsid(ea, ssize);
381 vpn = hpt_vpn(ea, vsid, ssize);
383 slot = native_hpte_find(vpn, psize, ssize);
385 panic("could not find page to bolt\n");
386 hptep = htab_address + slot;
388 /* Update the HPTE */
389 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
390 ~(HPTE_R_PPP | HPTE_R_N)) |
391 (newpp & (HPTE_R_PPP | HPTE_R_N)));
393 * Ensure it is out of the tlb too. Bolted entries base and
394 * actual page size will be same.
396 tlbie(vpn, psize, psize, ssize, 0);
399 static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
400 int bpsize, int apsize, int ssize, int local)
402 struct hash_pte *hptep = htab_address + slot;
403 unsigned long hpte_v;
404 unsigned long want_v;
407 local_irq_save(flags);
409 DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
411 want_v = hpte_encode_avpn(vpn, bpsize, ssize);
412 native_lock_hpte(hptep);
413 hpte_v = be64_to_cpu(hptep->v);
416 * We need to invalidate the TLB always because hpte_remove doesn't do
417 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
418 * random entry from it. When we do that we don't invalidate the TLB
419 * (hpte_remove) because we assume the old translation is still
420 * technically "valid".
422 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
423 native_unlock_hpte(hptep);
425 /* Invalidate the hpte. NOTE: this also unlocks it */
428 /* Invalidate the TLB */
429 tlbie(vpn, bpsize, apsize, ssize, local);
431 local_irq_restore(flags);
434 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
435 static void native_hugepage_invalidate(unsigned long vsid,
437 unsigned char *hpte_slot_array,
438 int psize, int ssize, int local)
441 struct hash_pte *hptep;
442 int actual_psize = MMU_PAGE_16M;
443 unsigned int max_hpte_count, valid;
444 unsigned long flags, s_addr = addr;
445 unsigned long hpte_v, want_v, shift;
446 unsigned long hidx, vpn = 0, hash, slot;
448 shift = mmu_psize_defs[psize].shift;
449 max_hpte_count = 1U << (PMD_SHIFT - shift);
451 local_irq_save(flags);
452 for (i = 0; i < max_hpte_count; i++) {
453 valid = hpte_valid(hpte_slot_array, i);
456 hidx = hpte_hash_index(hpte_slot_array, i);
459 addr = s_addr + (i * (1ul << shift));
460 vpn = hpt_vpn(addr, vsid, ssize);
461 hash = hpt_hash(vpn, shift, ssize);
462 if (hidx & _PTEIDX_SECONDARY)
465 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
466 slot += hidx & _PTEIDX_GROUP_IX;
468 hptep = htab_address + slot;
469 want_v = hpte_encode_avpn(vpn, psize, ssize);
470 native_lock_hpte(hptep);
471 hpte_v = be64_to_cpu(hptep->v);
473 /* Even if we miss, we need to invalidate the TLB */
474 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
475 native_unlock_hpte(hptep);
477 /* Invalidate the hpte. NOTE: this also unlocks it */
480 * We need to do tlb invalidate for all the address, tlbie
481 * instruction compares entry_VA in tlb with the VA specified
484 tlbie(vpn, psize, actual_psize, ssize, local);
486 local_irq_restore(flags);
489 static void native_hugepage_invalidate(unsigned long vsid,
491 unsigned char *hpte_slot_array,
492 int psize, int ssize, int local)
494 WARN(1, "%s called without THP support\n", __func__);
498 static inline int __hpte_actual_psize(unsigned int lp, int psize)
503 /* start from 1 ignoring MMU_PAGE_4K */
504 for (i = 1; i < MMU_PAGE_COUNT; i++) {
507 if (mmu_psize_defs[psize].penc[i] == -1)
510 * encoding bits per actual page size
511 * PTE LP actual page size
518 shift = mmu_psize_defs[i].shift - LP_SHIFT;
521 mask = (1 << shift) - 1;
522 if ((lp & mask) == mmu_psize_defs[psize].penc[i])
528 static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
529 int *psize, int *apsize, int *ssize, unsigned long *vpn)
531 unsigned long avpn, pteg, vpi;
532 unsigned long hpte_v = be64_to_cpu(hpte->v);
533 unsigned long hpte_r = be64_to_cpu(hpte->r);
534 unsigned long vsid, seg_off;
535 int size, a_size, shift;
536 /* Look at the 8 bit LP value */
537 unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
539 if (!(hpte_v & HPTE_V_LARGE)) {
541 a_size = MMU_PAGE_4K;
543 for (size = 0; size < MMU_PAGE_COUNT; size++) {
545 /* valid entries have a shift value */
546 if (!mmu_psize_defs[size].shift)
549 a_size = __hpte_actual_psize(lp, size);
554 /* This works for all page sizes, and for 256M and 1T segments */
555 if (cpu_has_feature(CPU_FTR_ARCH_300))
556 *ssize = hpte_r >> HPTE_R_3_0_SSIZE_SHIFT;
558 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
560 shift = mmu_psize_defs[size].shift;
562 avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
563 pteg = slot / HPTES_PER_GROUP;
564 if (hpte_v & HPTE_V_SECONDARY)
568 case MMU_SEGSIZE_256M:
569 /* We only have 28 - 23 bits of seg_off in avpn */
570 seg_off = (avpn & 0x1f) << 23;
572 /* We can find more bits from the pteg value */
574 vpi = (vsid ^ pteg) & htab_hash_mask;
575 seg_off |= vpi << shift;
577 *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
580 /* We only have 40 - 23 bits of seg_off in avpn */
581 seg_off = (avpn & 0x1ffff) << 23;
584 vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
585 seg_off |= vpi << shift;
587 *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
597 * clear all mappings on kexec. All cpus are in real mode (or they will
598 * be when they isi), and we are the only one left. We rely on our kernel
599 * mapping being 0xC0's and the hardware ignoring those two real bits.
601 * This must be called with interrupts disabled.
603 * Taking the native_tlbie_lock is unsafe here due to the possibility of
604 * lockdep being on. On pre POWER5 hardware, not taking the lock could
605 * cause deadlock. POWER5 and newer not taking the lock is fine. This only
606 * gets called during boot before secondary CPUs have come up and during
607 * crashdump and all bets are off anyway.
609 * TODO: add batching support when enabled. remember, no dynamic memory here,
610 * although there is the control page available...
612 static void native_hpte_clear(void)
614 unsigned long vpn = 0;
615 unsigned long slot, slots;
616 struct hash_pte *hptep = htab_address;
617 unsigned long hpte_v;
618 unsigned long pteg_count;
619 int psize, apsize, ssize;
621 pteg_count = htab_hash_mask + 1;
623 slots = pteg_count * HPTES_PER_GROUP;
625 for (slot = 0; slot < slots; slot++, hptep++) {
627 * we could lock the pte here, but we are the only cpu
628 * running, right? and for crash dump, we probably
629 * don't want to wait for a maybe bad cpu.
631 hpte_v = be64_to_cpu(hptep->v);
634 * Call __tlbie() here rather than tlbie() since we can't take the
637 if (hpte_v & HPTE_V_VALID) {
638 hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
640 __tlbie(vpn, psize, apsize, ssize);
644 asm volatile("eieio; tlbsync; ptesync":::"memory");
648 * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
649 * the lock all the time
651 static void native_flush_hash_range(unsigned long number, int local)
654 unsigned long hash, index, hidx, shift, slot;
655 struct hash_pte *hptep;
656 unsigned long hpte_v;
657 unsigned long want_v;
660 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
661 unsigned long psize = batch->psize;
662 int ssize = batch->ssize;
665 local_irq_save(flags);
667 for (i = 0; i < number; i++) {
671 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
672 hash = hpt_hash(vpn, shift, ssize);
673 hidx = __rpte_to_hidx(pte, index);
674 if (hidx & _PTEIDX_SECONDARY)
676 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
677 slot += hidx & _PTEIDX_GROUP_IX;
678 hptep = htab_address + slot;
679 want_v = hpte_encode_avpn(vpn, psize, ssize);
680 native_lock_hpte(hptep);
681 hpte_v = be64_to_cpu(hptep->v);
682 if (!HPTE_V_COMPARE(hpte_v, want_v) ||
683 !(hpte_v & HPTE_V_VALID))
684 native_unlock_hpte(hptep);
687 } pte_iterate_hashed_end();
690 if (mmu_has_feature(MMU_FTR_TLBIEL) &&
691 mmu_psize_defs[psize].tlbiel && local) {
692 asm volatile("ptesync":::"memory");
693 for (i = 0; i < number; i++) {
697 pte_iterate_hashed_subpages(pte, psize,
699 __tlbiel(vpn, psize, psize, ssize);
700 } pte_iterate_hashed_end();
702 asm volatile("ptesync":::"memory");
704 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
707 raw_spin_lock(&native_tlbie_lock);
709 asm volatile("ptesync":::"memory");
710 for (i = 0; i < number; i++) {
714 pte_iterate_hashed_subpages(pte, psize,
716 __tlbie(vpn, psize, psize, ssize);
717 } pte_iterate_hashed_end();
719 asm volatile("eieio; tlbsync; ptesync":::"memory");
722 raw_spin_unlock(&native_tlbie_lock);
725 local_irq_restore(flags);
728 static int native_register_proc_table(unsigned long base, unsigned long page_size,
729 unsigned long table_size)
731 unsigned long patb1 = base << 25; /* VSID */
733 patb1 |= (page_size << 5); /* sllp */
736 partition_tb->patb1 = cpu_to_be64(patb1);
740 void __init hpte_init_native(void)
742 mmu_hash_ops.hpte_invalidate = native_hpte_invalidate;
743 mmu_hash_ops.hpte_updatepp = native_hpte_updatepp;
744 mmu_hash_ops.hpte_updateboltedpp = native_hpte_updateboltedpp;
745 mmu_hash_ops.hpte_insert = native_hpte_insert;
746 mmu_hash_ops.hpte_remove = native_hpte_remove;
747 mmu_hash_ops.hpte_clear_all = native_hpte_clear;
748 mmu_hash_ops.flush_hash_range = native_flush_hash_range;
749 mmu_hash_ops.hugepage_invalidate = native_hugepage_invalidate;
751 if (cpu_has_feature(CPU_FTR_ARCH_300))
752 ppc_md.register_process_table = native_register_proc_table;