patch-2.1.124 linux/include/asm-sparc64/mmu_context.h
Next file: linux/include/asm-sparc64/ns87303.h
Previous file: linux/include/asm-sparc64/io.h
Back to the patch index
Back to the overall index
- Lines: 114
- Date:
Sun Oct 4 10:22:44 1998
- Orig file:
v2.1.123/linux/include/asm-sparc64/mmu_context.h
- Orig date:
Thu Aug 6 14:06:34 1998
diff -u --recursive --new-file v2.1.123/linux/include/asm-sparc64/mmu_context.h linux/include/asm-sparc64/mmu_context.h
@@ -1,4 +1,4 @@
-/* $Id: mmu_context.h,v 1.26 1998/07/31 10:42:38 jj Exp $ */
+/* $Id: mmu_context.h,v 1.31 1998/09/24 03:22:01 davem Exp $ */
#ifndef __SPARC64_MMU_CONTEXT_H
#define __SPARC64_MMU_CONTEXT_H
@@ -25,25 +25,47 @@
/* Initialize/destroy the context related info for a new mm_struct
* instance.
*/
-#define init_new_context(mm) ((mm)->context = NO_CONTEXT)
-#define destroy_context(mm) do { \
- if ((mm)->context != NO_CONTEXT) { \
+#define init_new_context(__mm) ((__mm)->context = NO_CONTEXT)
+
+/* Kernel threads like rpciod and nfsd drop their mm, and then use
+ * init_mm, when this happens we must make sure the tsk->tss.ctx is
+ * updated as well. Otherwise we have disasters relating to
+ * set_fs/get_fs usage later on.
+ *
+ * Also we can only clear the mmu_context_bmap bit when this is
+ * the final reference to the address space.
+ */
+#define destroy_context(__mm) do { \
+ if ((__mm)->context != NO_CONTEXT && \
+ atomic_read(&(__mm)->count) == 1) { \
spin_lock(&scheduler_lock); \
- if (!(((mm)->context ^ tlb_context_cache) & CTX_VERSION_MASK)) \
- clear_bit((mm)->context & ~(CTX_VERSION_MASK), \
+ if (!(((__mm)->context ^ tlb_context_cache) & CTX_VERSION_MASK))\
+ clear_bit((__mm)->context & ~(CTX_VERSION_MASK), \
mmu_context_bmap); \
spin_unlock(&scheduler_lock); \
- (mm)->context = NO_CONTEXT; \
+ (__mm)->context = NO_CONTEXT; \
+ if(current->mm == (__mm)) { \
+ current->tss.ctx = 0; \
+ spitfire_set_secondary_context(0); \
+ __asm__ __volatile__("flush %g6"); \
+ } \
} \
} while (0)
-extern __inline__ void get_mmu_context(struct task_struct *tsk)
+/* This routine must called with interrupts off,
+ * this is necessary to guarentee that the current->tss.ctx
+ * to CPU secontary context register relationship is maintained
+ * when traps can happen.
+ *
+ * Also the caller must flush the current set of user windows
+ * to the stack (if necessary) before we get here.
+ */
+extern __inline__ void __get_mmu_context(struct task_struct *tsk)
{
register unsigned long paddr asm("o5");
register unsigned long pgd_cache asm("o4");
struct mm_struct *mm = tsk->mm;
- flushw_user();
if(!(tsk->tss.flags & SPARC_FLAG_KTHREAD) &&
!(tsk->flags & PF_EXITING)) {
unsigned long ctx = tlb_context_cache;
@@ -65,28 +87,43 @@
spitfire_set_secondary_context(tsk->tss.ctx);
__asm__ __volatile__("flush %g6");
paddr = __pa(mm->pgd);
- if(tsk->tss.flags & SPARC_FLAG_32BIT)
+ if((tsk->tss.flags & (SPARC_FLAG_32BIT|SPARC_FLAG_KTHREAD)) ==
+ (SPARC_FLAG_32BIT))
pgd_cache = (unsigned long) mm->pgd[0];
else
pgd_cache = 0;
__asm__ __volatile__("
- rdpr %%pstate, %%o3
- wrpr %%o3, %2, %%pstate
+ rdpr %%pstate, %%o2
+ andn %%o2, %2, %%o3
+ wrpr %%o3, %5, %%pstate
mov %4, %%g4
mov %0, %%g7
stxa %1, [%%g4] %3
- wrpr %%o3, 0x0, %%pstate
+ wrpr %%o2, 0x0, %%pstate
" : /* no outputs */
- : "r" (paddr), "r" (pgd_cache), "i" (PSTATE_MG|PSTATE_IE),
- "i" (ASI_DMMU), "i" (TSB_REG)
- : "o3");
+ : "r" (paddr), "r" (pgd_cache), "i" (PSTATE_IE),
+ "i" (ASI_DMMU), "i" (TSB_REG), "i" (PSTATE_MG)
+ : "o2", "o3");
}
+/* Now we define this as a do nothing macro, because the only
+ * generic user right now is the scheduler, and we handle all
+ * the atomicity issues by having switch_to() call the above
+ * function itself.
+ */
+#define get_mmu_context(x) do { } while(0)
+
/*
* After we have set current->mm to a new value, this activates
* the context for the new mm so we see the new mappings.
*/
-#define activate_context(tsk) get_mmu_context(tsk)
+#define activate_context(__tsk) \
+do { unsigned long __flags; \
+ __save_and_cli(__flags); \
+ flushw_user(); \
+ __get_mmu_context(__tsk); \
+ __restore_flags(__flags); \
+} while(0)
#endif /* !(__ASSEMBLY__) */
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov