patch-2.0.21-2.1.0 linux/include/asm-m68k/pgtable.h
Next file: linux/include/asm-m68k/posix_types.h
Previous file: linux/include/asm-m68k/page.h
Back to the patch index
Back to the overall index
- Lines: 272
- Date:
Wed Sep 25 10:47:41 1996
- Orig file:
lx2.0/v2.0.21/linux/include/asm-m68k/pgtable.h
- Orig date:
Thu May 16 09:05:11 1996
diff -u --recursive --new-file lx2.0/v2.0.21/linux/include/asm-m68k/pgtable.h linux/include/asm-m68k/pgtable.h
@@ -1,6 +1,8 @@
#ifndef _M68K_PGTABLE_H
#define _M68K_PGTABLE_H
+#include<asm/setup.h>
+
#ifndef __ASSEMBLY__
/*
@@ -8,17 +10,20 @@
* the m68k page table tree.
*/
-#define __flush_tlb() \
-do { \
- if (m68k_is040or060) \
- __asm__ __volatile__(".word 0xf510\n"::); /* pflushan */ \
- else \
- __asm__ __volatile__("pflusha\n"::); \
-} while (0)
+/*
+ * flush all atc entries (user-space entries only for the 680[46]0).
+ */
+static inline void __flush_tlb(void)
+{
+ if (CPU_IS_040_OR_060)
+ __asm__ __volatile__(".word 0xf510\n"::); /* pflushan */
+ else
+ __asm__ __volatile__("pflusha\n"::);
+}
static inline void __flush_tlb_one(unsigned long addr)
{
- if (m68k_is040or060) {
+ if (CPU_IS_040_OR_060) {
register unsigned long a0 __asm__ ("a0") = addr;
__asm__ __volatile__(".word 0xf508" /* pflush (%a0) */
: : "a" (a0));
@@ -27,7 +32,17 @@
}
#define flush_tlb() __flush_tlb()
-#define flush_tlb_all() flush_tlb()
+
+/*
+ * flush all atc entries (both kernel and user-space entries).
+ */
+static inline void flush_tlb_all(void)
+{
+ if (CPU_IS_040_OR_060)
+ __asm__ __volatile__(".word 0xf518\n"::); /* pflusha */
+ else
+ __asm__ __volatile__("pflusha\n"::);
+}
static inline void flush_tlb_mm(struct mm_struct *mm)
{
@@ -53,7 +68,12 @@
* within a page table are directly modified. Thus, the following
* hook is made available.
*/
-#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
+#define set_pte(pteptr, pteval) do{ \
+ ((*(pteptr)) = (pteval)); \
+ if (CPU_IS_060) \
+ __asm__ __volatile__(".word 0xf518\n"::); /* pflusha */ \
+ } while(0)
+
/* PMD_SHIFT determines the size of the area a second-level page table can map */
#define PMD_SHIFT 22
@@ -120,40 +140,49 @@
#define _DESCTYPE_MASK 0x003
#define _CACHEMASK040 (~0x060)
-#define _TABLE_MASK (0xfffffff0)
+#define _TABLE_MASK (0xfffffe00)
#define _PAGE_TABLE (_PAGE_SHORT)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NOCACHE)
#ifndef __ASSEMBLY__
-#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | _PAGE_CACHE040)
-#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_CACHE040)
-#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | _PAGE_CACHE040)
-#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | _PAGE_CACHE040)
-#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_CACHE040)
+extern unsigned long mm_cachebits;
+
+#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | mm_cachebits)
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | mm_cachebits)
+#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | mm_cachebits)
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | mm_cachebits)
+#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | mm_cachebits)
+
+/* Alternate definitions that are compile time constants, for
+ initializing protection_map. The cachebits are fixed later. */
+#define PAGE_NONE_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
+#define PAGE_SHARED_C __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
+#define PAGE_COPY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
+#define PAGE_READONLY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
/*
* The m68k can't do page protection for execute, and considers that the same are read.
* Also, write permissions imply read permissions. This is the closest we can get..
*/
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY
-#define __P101 PAGE_READONLY
-#define __P110 PAGE_COPY
-#define __P111 PAGE_COPY
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY
-#define __S101 PAGE_READONLY
-#define __S110 PAGE_SHARED
-#define __S111 PAGE_SHARED
+#define __P000 PAGE_NONE_C
+#define __P001 PAGE_READONLY_C
+#define __P010 PAGE_COPY_C
+#define __P011 PAGE_COPY_C
+#define __P100 PAGE_READONLY_C
+#define __P101 PAGE_READONLY_C
+#define __P110 PAGE_COPY_C
+#define __P111 PAGE_COPY_C
+
+#define __S000 PAGE_NONE_C
+#define __S001 PAGE_READONLY_C
+#define __S010 PAGE_SHARED_C
+#define __S011 PAGE_SHARED_C
+#define __S100 PAGE_READONLY_C
+#define __S101 PAGE_READONLY_C
+#define __S110 PAGE_SHARED_C
+#define __S111 PAGE_SHARED_C
/* zero page used for uninitialized stuff */
extern unsigned long empty_zero_page;
@@ -295,9 +324,8 @@
tsk->tss.crp[0] = 0x80000000 | _PAGE_SHORT;
tsk->tss.crp[1] = tsk->tss.pagedir_p;
if (tsk == current) {
- if (m68k_is040or060)
- __asm__ __volatile__ (".word 0xf510\n\t" /* pflushan */
- "movel %0@,%/d0\n\t"
+ if (CPU_IS_040_OR_060)
+ __asm__ __volatile__ ("movel %0@,%/d0\n\t"
".long 0x4e7b0806\n\t"
/* movec d0,urp */
: : "a" (&tsk->tss.crp[1])
@@ -349,11 +377,17 @@
extern inline void nocache_page (unsigned long vaddr)
{
- if (m68k_is040or060) {
+ if (CPU_IS_040_OR_060) {
pgd_t *dir;
pmd_t *pmdp;
pte_t *ptep;
+ if(CPU_IS_060)
+ __asm__ __volatile__ ("movel %0,%/a0\n\t"
+ ".word 0xf470"
+ : : "g" (VTOP(vaddr))
+ : "a0");
+
dir = pgd_offset_k(vaddr);
pmdp = pmd_offset(dir,vaddr);
ptep = pte_offset(pmdp,vaddr);
@@ -363,7 +397,7 @@
static inline void cache_page (unsigned long vaddr)
{
- if (m68k_is040or060) {
+ if (CPU_IS_040_OR_060) {
pgd_t *dir;
pmd_t *pmdp;
pte_t *ptep;
@@ -514,10 +548,10 @@
#define flush_icache() \
do { \
- if (m68k_is040or060) \
- asm ("nop; .word 0xf498 /* cinva %%ic */"); \
+ if (CPU_IS_040_OR_060) \
+ asm __volatile__ ("nop; .word 0xf498 /* cinva %%ic */"); \
else \
- asm ("movec %/cacr,%/d0;" \
+ asm __volatile__ ("movec %/cacr,%/d0;" \
"oriw %0,%/d0;" \
"movec %/d0,%/cacr" \
: /* no outputs */ \
@@ -552,7 +586,7 @@
process changes. */
#define __flush_cache_all() \
do { \
- if (m68k_is040or060) \
+ if (CPU_IS_040_OR_060) \
__asm__ __volatile__ ("nop; .word 0xf478\n" ::); \
else \
__asm__ __volatile__ ("movec %%cacr,%%d0\n\t" \
@@ -563,7 +597,7 @@
#define __flush_cache_030() \
do { \
- if (m68k_is040or060 == 0) \
+ if (CPU_IS_020_OR_030) \
__asm__ __volatile__ ("movec %%cacr,%%d0\n\t" \
"orw %0,%%d0\n\t" \
"movec %%d0,%%cacr" \
@@ -574,7 +608,11 @@
extern inline void flush_cache_mm(struct mm_struct *mm)
{
+#if FLUSH_VIRTUAL_CACHE_040
if (mm == current->mm) __flush_cache_all();
+#else
+ if (mm == current->mm) __flush_cache_030();
+#endif
}
extern inline void flush_cache_range(struct mm_struct *mm,
@@ -582,9 +620,11 @@
unsigned long end)
{
if (mm == current->mm){
- if (m68k_is040or060)
+#if FLUSH_VIRTUAL_CACHE_040
+ if (CPU_IS_040_OR_060)
cache_push_v(start, end-start);
else
+#endif
__flush_cache_030();
}
}
@@ -593,9 +633,11 @@
unsigned long vmaddr)
{
if (vma->vm_mm == current->mm){
- if (m68k_is040or060)
+#if FLUSH_VIRTUAL_CACHE_040
+ if (CPU_IS_040_OR_060)
cache_push_v(vmaddr, PAGE_SIZE);
else
+#endif
__flush_cache_030();
}
}
@@ -603,7 +645,7 @@
/* Push the page at kernel virtual address and clear the icache */
extern inline void flush_page_to_ram (unsigned long address)
{
- if (m68k_is040or060) {
+ if (CPU_IS_040_OR_060) {
register unsigned long tmp __asm ("a0") = VTOP(address);
__asm__ __volatile__ ("nop\n\t"
".word 0xf470 /* cpushp %%dc,(%0) */\n\t"
@@ -620,7 +662,7 @@
/* Push n pages at kernel virtual address and clear the icache */
extern inline void flush_pages_to_ram (unsigned long address, int n)
{
- if (m68k_is040or060) {
+ if (CPU_IS_040_OR_060) {
while (n--) {
register unsigned long tmp __asm ("a0") = VTOP(address);
__asm__ __volatile__ ("nop\n\t"
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov