std r0,0(r1); \
ptesync; \
ld r0,0(r1); \
-1: cmp cr0,r0,r0; \
+1: cmpd cr0,r0,r0; \
bne 1b; \
IDLE_INST; \
b .
ld reg,PACAKBASE(r13); /* get high part of &label */ \
ori reg,reg,(FIXED_SYMBOL_ABS_ADDR(label))@l;
+#define __LOAD_HANDLER(reg, label) \
+ ld reg,PACAKBASE(r13); \
+ ori reg,reg,(ABS_ADDR(label))@l;
+
/* Exception register prefixes */
#define EXC_HV H
#define EXC_STD
#define kvmppc_interrupt kvmppc_interrupt_pr
#endif
+#ifdef CONFIG_RELOCATABLE
+#define BRANCH_TO_COMMON(reg, label) \
+ __LOAD_HANDLER(reg, label); \
+ mtctr reg; \
+ bctr
+
+#else
+#define BRANCH_TO_COMMON(reg, label) \
+ b label
+
+#endif
+
#define __KVM_HANDLER_PROLOG(area, n) \
BEGIN_FTR_SECTION_NESTED(947) \
ld r10,area+EX_CFAR(r13); \
return cpumask_subset(mm_cpumask(mm),
topology_sibling_cpumask(smp_processor_id()));
}
+
+static inline int mm_is_thread_local(struct mm_struct *mm)
+{
+ return cpumask_equal(mm_cpumask(mm),
+ cpumask_of(smp_processor_id()));
+}
+
#else
static inline int mm_is_core_local(struct mm_struct *mm)
{
return 1;
}
+
+static inline int mm_is_thread_local(struct mm_struct *mm)
+{
+ return 1;
+}
#endif
#endif /* __KERNEL__ */
/* No virt vectors corresponding with 0x0..0x100 */
EXC_VIRT_NONE(0x4000, 0x4100)
-EXC_REAL_BEGIN(system_reset, 0x100, 0x200)
- SET_SCRATCH0(r13)
+
#ifdef CONFIG_PPC_P7_NAP
-BEGIN_FTR_SECTION
- /* Running native on arch 2.06 or later, check if we are
- * waking up from nap/sleep/winkle.
+ /*
+ * If running native on arch 2.06 or later, check if we are waking up
+ * from nap/sleep/winkle, and branch to idle handler.
*/
- mfspr r13,SPRN_SRR1
- rlwinm. r13,r13,47-31,30,31
- beq 9f
+#define IDLETEST(n) \
+ BEGIN_FTR_SECTION ; \
+ mfspr r10,SPRN_SRR1 ; \
+ rlwinm. r10,r10,47-31,30,31 ; \
+ beq- 1f ; \
+ cmpwi cr3,r10,2 ; \
+ BRANCH_TO_COMMON(r10, system_reset_idle_common) ; \
+1: \
+ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
+#else
+#define IDLETEST NOTEST
+#endif
- cmpwi cr3,r13,2
- GET_PACA(r13)
+EXC_REAL_BEGIN(system_reset, 0x100, 0x200)
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
+ IDLETEST, 0x100)
+
+EXC_REAL_END(system_reset, 0x100, 0x200)
+EXC_VIRT_NONE(0x4100, 0x4200)
+
+#ifdef CONFIG_PPC_P7_NAP
+EXC_COMMON_BEGIN(system_reset_idle_common)
bl pnv_restore_hyp_resource
li r0,PNV_THREAD_RUNNING
blt cr3,2f
b pnv_wakeup_loss
2: b pnv_wakeup_noloss
+#endif
-9:
-END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
-#endif /* CONFIG_PPC_P7_NAP */
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
- NOTEST, 0x100)
-EXC_REAL_END(system_reset, 0x100, 0x200)
-EXC_VIRT_NONE(0x4100, 0x4200)
EXC_COMMON(system_reset_common, 0x100, system_reset_exception)
#ifdef CONFIG_PPC_PSERIES
TRAMP_KVM(PACA_EXGEN, 0xb00)
EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
-
-#define LOAD_SYSCALL_HANDLER(reg) \
- ld reg,PACAKBASE(r13); \
- ori reg,reg,(ABS_ADDR(system_call_common))@l;
+#define LOAD_SYSCALL_HANDLER(reg) \
+ __LOAD_HANDLER(reg, system_call_common)
/* Syscall routine is used twice, in reloc-off and reloc-on paths */
#define SYSCALL_PSERIES_1 \
* Threads will spin in HMT_LOW until the lock bit is cleared.
* r14 - pointer to core_idle_state
* r15 - used to load contents of core_idle_state
+ * r9 - used as a temporary variable
*/
core_idle_lock_held:
bne 3b
HMT_MEDIUM
lwarx r15,0,r14
+ andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
+ bne core_idle_lock_held
blr
/*
std r9,_MSR(r1)
std r1,PACAR1(r13)
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- /* Tell KVM we're entering idle */
- li r4,KVM_HWTHREAD_IN_IDLE
- stb r4,HSTATE_HWTHREAD_STATE(r13)
-#endif
-
/*
* Go to real mode to do the nap, as required by the architecture.
* Also, we need to be in real mode before setting hwthread_state,
.globl pnv_enter_arch207_idle_mode
pnv_enter_arch207_idle_mode:
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ /* Tell KVM we're entering idle */
+ li r4,KVM_HWTHREAD_IN_IDLE
+ /******************************************************/
+ /* N O T E W E L L ! ! ! N O T E W E L L */
+ /* The following store to HSTATE_HWTHREAD_STATE(r13) */
+ /* MUST occur in real mode, i.e. with the MMU off, */
+ /* and the MMU must stay off until we clear this flag */
+ /* and test HSTATE_HWTHREAD_REQ(r13) in the system */
+ /* reset interrupt vector in exceptions-64s.S. */
+ /* The reason is that another thread can switch the */
+ /* MMU to a guest context whenever this flag is set */
+ /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */
+ /* that would potentially cause this thread to start */
+ /* executing instructions from guest memory in */
+ /* hypervisor mode, leading to a host crash or data */
+ /* corruption, or worse. */
+ /******************************************************/
+ stb r4,HSTATE_HWTHREAD_STATE(r13)
+#endif
stb r3,PACA_THREAD_IDLE_STATE(r13)
cmpwi cr3,r3,PNV_THREAD_SLEEP
bge cr3,2f
* r3 - requested stop state
*/
power_enter_stop:
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ /* Tell KVM we're entering idle */
+ li r4,KVM_HWTHREAD_IN_IDLE
+ /* DO THIS IN REAL MODE! See comment above. */
+ stb r4,HSTATE_HWTHREAD_STATE(r13)
+#endif
/*
* Check if the requested state is a deep idle state.
*/
/* Ensure that restore_math() will restore */
if (msr_diff & MSR_FP)
current->thread.load_fp = 1;
-#ifdef CONFIG_ALIVEC
+#ifdef CONFIG_ALTIVEC
if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
current->thread.load_vec = 1;
#endif
#include <asm/ppc-opcode.h>
#include <asm/pnv-pci.h>
#include <asm/opal.h>
+#include <asm/smp.h>
#include "book3s_xics.h"
if (unlikely(pid == MMU_NO_CONTEXT))
goto no_context;
- if (!mm_is_core_local(mm)) {
+ if (!mm_is_thread_local(mm)) {
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
if (unlikely(pid == MMU_NO_CONTEXT))
goto no_context;
- if (!mm_is_core_local(mm)) {
+ if (!mm_is_thread_local(mm)) {
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
pid = mm ? mm->context.id : 0;
if (unlikely(pid == MMU_NO_CONTEXT))
goto bail;
- if (!mm_is_core_local(mm)) {
+ if (!mm_is_thread_local(mm)) {
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
{
unsigned long pid;
unsigned long addr;
- int local = mm_is_core_local(mm);
+ int local = mm_is_thread_local(mm);
unsigned long ap = mmu_get_ap(psize);
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;
cxl_ctx_get();
if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
+ put_pid(ctx->glpid);
put_pid(ctx->pid);
+ ctx->glpid = ctx->pid = NULL;
cxl_adapter_context_put(ctx->afu->adapter);
cxl_ctx_put();
goto out;
ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF);
+ /*
+ * Increment the mapped context count for adapter. This also checks
+ * if adapter_context_lock is taken.
+ */
+ rc = cxl_adapter_context_get(ctx->afu->adapter);
+ if (rc) {
+ afu_release_irqs(ctx, ctx);
+ goto out;
+ }
+
/*
* We grab the PID here and not in the file open to allow for the case
* where a process (master, some daemon, etc) has opened the chardev on
ctx->pid = get_task_pid(current, PIDTYPE_PID);
ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
- /*
- * Increment the mapped context count for adapter. This also checks
- * if adapter_context_lock is taken.
- */
- rc = cxl_adapter_context_get(ctx->afu->adapter);
- if (rc) {
- afu_release_irqs(ctx, ctx);
- goto out;
- }
trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
amr))) {
afu_release_irqs(ctx, ctx);
cxl_adapter_context_put(ctx->afu->adapter);
+ put_pid(ctx->glpid);
+ put_pid(ctx->pid);
+ ctx->glpid = ctx->pid = NULL;
goto out;
}