patch-2.1.101 linux/arch/ppc/mm/init.c
Next file: linux/arch/ppc/pmac_defconfig
Previous file: linux/arch/ppc/mm/fault.c
Back to the patch index
Back to the overall index
- Lines: 1205
- Date:
Fri May 8 00:18:23 1998
- Orig file:
v2.1.100/linux/arch/ppc/mm/init.c
- Orig date:
Thu May 7 22:51:47 1998
diff -u --recursive --new-file v2.1.100/linux/arch/ppc/mm/init.c linux/arch/ppc/mm/init.c
@@ -1,5 +1,5 @@
/*
- * arch/ppc/mm/init.c
+ * $Id: init.c,v 1.94 1998/05/06 02:07:36 paulus Exp $
*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
@@ -33,30 +33,25 @@
#include <linux/swap.h>
#include <linux/stddef.h>
#include <linux/vmalloc.h>
+#include <linux/init.h>
+#ifdef CONFIG_BLK_DEV_INITRD
+#include <linux/blk.h> /* for initrd_* */
+#endif
+
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/residual.h>
-#ifdef CONFIG_BLK_DEV_INITRD
-#include <linux/blk.h> /* for initrd_* */
-#endif
+#include <asm/uaccess.h>
#ifdef CONFIG_8xx
#include <asm/8xx_immap.h>
#endif
#ifdef CONFIG_MBX
#include <asm/mbx.h>
#endif
-
-#ifndef CONFIG_8xx
-unsigned long _SDR1;
-PTE *Hash, *Hash_end;
-unsigned long Hash_size, Hash_mask;
-#endif /* CONFIG_8xx */
-
-/* ifdef APUS specific stuff until the merge is completed. -jskov */
-#ifdef CONFIG_APUS
+#ifdef CONFIG_APUS /* ifdef APUS specific stuff until the merge is completed. -jskov */
#include <asm/setup.h>
#include <asm/amigahw.h>
#endif
@@ -69,67 +64,60 @@
extern char _start[], _end[];
extern char etext[], _stext[];
extern char __init_begin, __init_end;
+extern char __prep_begin, __prep_end;
+extern char __pmac_begin, __pmac_end;
+extern char __openfirmware_begin, __openfirmware_end;
extern RESIDUAL res;
char *klimit = _end;
struct device_node *memory_node;
unsigned long ioremap_base;
unsigned long ioremap_bot;
+unsigned long avail_start;
#ifndef __SMP__
struct pgtable_cache_struct quicklists;
#endif
-#ifndef CONFIG_8xx
-static void hash_init(void);
-#endif /* CONFIG_8xx */
-static void mapin_ram(void);
+void MMU_init(void);
static void *MMU_get_page(void);
+unsigned long *prep_find_end_of_memory(void);
+unsigned long *pmac_find_end_of_memory(void);
+extern unsigned long *find_end_of_memory(void);
+static void mapin_ram(void);
void map_page(struct task_struct *, unsigned long va,
unsigned long pa, int flags);
extern void die_if_kernel(char *,struct pt_regs *,long);
extern void show_net_buffers(void);
-extern unsigned long *find_end_of_memory(void);
extern struct task_struct *current_set[NR_CPUS];
+#ifndef CONFIG_8xx
+unsigned long _SDR1;
+PTE *Hash, *Hash_end;
+unsigned long Hash_size, Hash_mask;
+static void hash_init(void);
+union ubat { /* BAT register values to be loaded */
+ BAT bat;
+ P601_BAT bat_601;
+ u32 word[2];
+} BATS[4][2]; /* 4 pairs of IBAT, DBAT */
+
+struct batrange { /* stores address ranges mapped by BATs */
+ unsigned long start;
+ unsigned long limit;
+ unsigned long phys;
+} bat_addrs[4];
+#endif /* CONFIG_8xx */
#ifdef CONFIG_MBX
-/* This is a big hack that may not yet work correctly.
- * The MBX8xx boards have a single DIMM socket for additional memory.
- * Although it appears you can set magical locations in the serial
- * EEPROM to get EPPC-Bug to configure this memory, there are no tools
- * (i.e. commands) to make this easy. If you screw up, you will most
- * likely end up with a board that will not boot until you find a
- * way to program the EEPROM correctly. I decided to simply program
- * the memory controller here to add the additional memory.
- * The reason this may not work correctly is that depending upon the
- * on-board and DIMM memory size, there may be holes in the physical
- * address space. This is the case for me, I have a 4 MB local memory
- * and a 32 MB DIMM.
- * The DIMM is 64 bits wide, and we see it as two banks of 32 bit
- * memory. The holes are caused by the requirement to map the
- * memory on a natural alignment, that is a 16 MB bank must begin on
- * a 16 MB boundary. The DIMM_SIZE below represents the size of the
- * bank, which is the total size divided by two.
- * Although I may not have all of this working, the intention is to
- * mark all of the page maps in the "hole" as reserved, and adjust
- * num_physpages accordingly. In the current implementation, this
- * seems to work, but there are some assumptions about contiguous
- * memory. The correct solution is to modify the memory allocators
- * to know about holes, but that will have to wait for another day.
- *
- * define DIMM_8xx to enable this feature.
- * define DIMM_SIZE to reflect the bank size (DIMM size divided by two).
- */
-/*#define DIMM_8xx 1 */
-#define DIMM_SIZE (16 * 1024 * 1024)
+void set_mbx_memory(void);
#endif /* CONFIG_MBX */
/*
* this tells the system to map all of ram with the segregs
* (i.e. page tables) instead of the bats.
+ * -- Cort
*/
#undef MAP_RAM_WITH_SEGREGS 1
-
-/* optimization for 603 to load the tlb directly from the linux table */
+/* optimization for 603 to load the tlb directly from the linux table -- Cort */
#define NO_RELOAD_HTAB 1 /* change in kernel/head.S too! */
void __bad_pte(pmd_t *pmd)
@@ -177,7 +165,8 @@
pte_t * __bad_pagetable(void)
{
- memset((void *)empty_bad_page_table, 0, PAGE_SIZE);
+ /*memset((void *)empty_bad_page_table, 0, PAGE_SIZE);*/
+ __clear_user((void *)empty_bad_page_table, PAGE_SIZE);
return (pte_t *) empty_bad_page_table;
}
@@ -185,10 +174,355 @@
pte_t __bad_page(void)
{
- memset((void *)empty_bad_page, 0, PAGE_SIZE);
+ /*memset((void *)empty_bad_page, 0, PAGE_SIZE);*/
+ __clear_user((void *)empty_bad_page, PAGE_SIZE);
return pte_mkdirty(mk_pte(empty_bad_page, PAGE_SHARED));
}
+void show_mem(void)
+{
+ int i,free = 0,total = 0,reserved = 0;
+ int shared = 0, cached = 0;
+ struct task_struct *p;
+
+ printk("Mem-info:\n");
+ show_free_areas();
+ printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
+ i = max_mapnr;
+ while (i-- > 0) {
+ total++;
+ if (PageReserved(mem_map+i))
+ reserved++;
+ else if (PageSwapCache(mem_map+i))
+ cached++;
+ else if (!atomic_read(&mem_map[i].count))
+ free++;
+ else
+ shared += atomic_read(&mem_map[i].count) - 1;
+ }
+ printk("%d pages of RAM\n",total);
+ printk("%d free pages\n",free);
+ printk("%d reserved pages\n",reserved);
+ printk("%d pages shared\n",shared);
+ printk("%d pages swap cached\n",cached);
+ printk("%d pages in page table cache\n",(int)pgtable_cache_size);
+ show_buffers();
+#ifdef CONFIG_NET
+ show_net_buffers();
+#endif
+ printk("%-8s %3s %3s %8s %8s %8s %9s %8s", "Process", "Pid", "Cnt",
+ "Ctx", "Ctx<<4", "Last Sys", "pc", "task");
+#ifdef __SMP__
+ printk(" %3s", "CPU");
+#endif /* __SMP__ */
+ printk("\n");
+ for_each_task(p)
+ {
+ printk("%-8.8s %3d %3d %8ld %8ld %8ld %c%08lx %08lx ",
+ p->comm,p->pid,
+ p->mm->count,p->mm->context,
+ p->mm->context<<4, p->tss.last_syscall,
+ user_mode(p->tss.regs) ? 'u' : 'k', p->tss.regs->nip,
+ (ulong)p);
+ {
+ int iscur = 0;
+#ifdef __SMP__
+ printk("%3d ", p->processor);
+ if ( (p->processor != NO_PROC_ID) &&
+ (p == current_set[p->processor]) )
+
+#else
+ if ( p == current )
+#endif /* __SMP__ */
+ {
+ iscur = 1;
+ printk("current");
+ }
+ if ( p == last_task_used_math )
+ {
+ if ( iscur )
+ printk(",");
+ printk("last math");
+ }
+ printk("\n");
+ }
+ }
+}
+
+void si_meminfo(struct sysinfo *val)
+{
+ int i;
+
+ i = max_mapnr;
+ val->totalram = 0;
+ val->sharedram = 0;
+ val->freeram = nr_free_pages << PAGE_SHIFT;
+ val->bufferram = buffermem;
+ while (i-- > 0) {
+ if (PageReserved(mem_map+i))
+ continue;
+ val->totalram++;
+ if (!atomic_read(&mem_map[i].count))
+ continue;
+ val->sharedram += atomic_read(&mem_map[i].count) - 1;
+ }
+ val->totalram <<= PAGE_SHIFT;
+ val->sharedram <<= PAGE_SHIFT;
+ return;
+}
+
+void *
+ioremap(unsigned long addr, unsigned long size)
+{
+ return __ioremap(addr, size, _PAGE_NO_CACHE);
+}
+
+void *
+__ioremap(unsigned long addr, unsigned long size, unsigned long flags)
+{
+ unsigned long p, v, i;
+
+ /*
+ * Choose an address to map it to.
+ * Once the vmalloc system is running, we use it.
+ * Before then, we map addresses >= ioremap_base
+ * virt == phys; for addresses below this we use
+ * space going down from ioremap_base (ioremap_bot
+ * records where we're up to).
+ *
+ * We should also look out for a frame buffer and
+ * map it with a free BAT register, if there is one.
+ */
+ p = addr & PAGE_MASK;
+ size = PAGE_ALIGN(addr + size) - p;
+ if (size == 0)
+ return NULL;
+
+ if (mem_init_done) {
+ struct vm_struct *area;
+ area = get_vm_area(size);
+ if (area == 0)
+ return NULL;
+ v = VMALLOC_VMADDR(area->addr);
+ } else {
+ if (p >= ioremap_base)
+ v = p;
+ else
+ v = (ioremap_bot -= size);
+ }
+
+ flags |= pgprot_val(PAGE_KERNEL);
+ if (flags & (_PAGE_NO_CACHE | _PAGE_WRITETHRU))
+ flags |= _PAGE_GUARDED;
+ for (i = 0; i < size; i += PAGE_SIZE)
+ map_page(&init_task, v+i, p+i, flags);
+
+ return (void *) (v + (addr & ~PAGE_MASK));
+}
+
+void iounmap(void *addr)
+{
+ /* XXX todo */
+}
+
+unsigned long iopa(unsigned long addr)
+{
+ unsigned long idx;
+ pmd_t *pd;
+ pte_t *pg;
+#ifndef CONFIG_8xx
+ int b;
+#endif
+ idx = addr & ~PAGE_MASK;
+ addr = addr & PAGE_MASK;
+
+#ifndef CONFIG_8xx
+ /* Check the BATs */
+ for (b = 0; b < 4; ++b)
+ if (addr >= bat_addrs[b].start && addr <= bat_addrs[b].limit)
+#ifndef CONFIG_APUS
+ return bat_addrs[b].phys | idx;
+#else
+ /* Do a more precise remapping of virtual address */
+ /* --Carsten */
+ return (bat_addrs[b].phys - bat_addrs[b].start + addr) | idx;
+#endif /* CONFIG_APUS */
+#endif /* CONFIG_8xx */
+ /* Do we have a page table? */
+ if (init_task.mm->pgd == NULL)
+ return 0;
+
+ /* Use upper 10 bits of addr to index the first level map */
+ pd = (pmd_t *) (init_task.mm->pgd + (addr >> PGDIR_SHIFT));
+ if (pmd_none(*pd))
+ return 0;
+
+ /* Use middle 10 bits of addr to index the second-level map */
+ pg = pte_offset(pd, addr);
+ return (pte_val(*pg) & PAGE_MASK) | idx;
+}
+
+void
+map_page(struct task_struct *tsk, unsigned long va,
+ unsigned long pa, int flags)
+{
+ pmd_t *pd;
+ pte_t *pg;
+#ifndef CONFIG_8xx
+ int b;
+#endif
+ if (tsk->mm->pgd == NULL) {
+ /* Allocate upper level page map */
+ tsk->mm->pgd = (pgd_t *) MMU_get_page();
+ }
+ /* Use upper 10 bits of VA to index the first level map */
+ pd = (pmd_t *) (tsk->mm->pgd + (va >> PGDIR_SHIFT));
+ if (pmd_none(*pd)) {
+#ifndef CONFIG_8xx
+ /*
+ * Need to allocate second-level table, but first
+ * check whether this address is already mapped by
+ * the BATs; if so, don't bother allocating the page.
+ */
+ for (b = 0; b < 4; ++b) {
+ if (va >= bat_addrs[b].start
+ && va <= bat_addrs[b].limit) {
+ /* XXX should check the phys address matches */
+ return;
+ }
+ }
+#endif /* CONFIG_8xx */
+ pg = (pte_t *) MMU_get_page();
+ pmd_val(*pd) = (unsigned long) pg;
+ }
+ /* Use middle 10 bits of VA to index the second-level map */
+ pg = pte_offset(pd, va);
+ set_pte(pg, mk_pte_phys(pa & PAGE_MASK, __pgprot(flags)));
+#ifndef CONFIG_8xx
+ flush_hash_page(0, va);
+#endif
+}
+
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb_all() flushes all processes TLBs
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_range(mm, start, end) flushes a range of pages
+ *
+ * since the hardware hash table functions as an extension of the
+ * tlb as far as the linux tables are concerned, flush it too.
+ * -- Cort
+ */
+
+/*
+ * Flush all tlb/hash table entries (except perhaps for those
+ * mapping RAM starting at PAGE_OFFSET, since they never change).
+ */
+void
+local_flush_tlb_all(void)
+{
+#ifndef CONFIG_8xx
+ __clear_user(Hash, Hash_size);
+ /*memset(Hash, 0, Hash_size);*/
+ _tlbia();
+#else
+ asm volatile ("tlbia" : : );
+#endif
+}
+
+/*
+ * Flush all the (user) entries for the address space described
+ * by mm. We can't rely on mm->mmap describing all the entries
+ * that might be in the hash table.
+ */
+void
+local_flush_tlb_mm(struct mm_struct *mm)
+{
+#ifndef CONFIG_8xx
+ mm->context = NO_CONTEXT;
+ if (mm == current->mm)
+ activate_context(current);
+#else
+ asm volatile ("tlbia" : : );
+#endif
+}
+
+void
+local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
+{
+#ifndef CONFIG_8xx
+ if (vmaddr < TASK_SIZE)
+ flush_hash_page(vma->vm_mm->context, vmaddr);
+ else
+ flush_hash_page(0, vmaddr);
+#else
+ asm volatile ("tlbia" : : );
+#endif
+}
+
+
+/*
+ * for each page addr in the range, call MMU_invalidate_page()
+ * if the range is very large and the hash table is small it might be
+ * faster to do a search of the hash table and just invalidate pages
+ * that are in the range but that's for study later.
+ * -- Cort
+ */
+void
+local_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+#ifndef CONFIG_8xx
+ start &= PAGE_MASK;
+
+ if (end - start > 20 * PAGE_SIZE)
+ {
+ flush_tlb_mm(mm);
+ return;
+ }
+
+ for (; start < end && start < TASK_SIZE; start += PAGE_SIZE)
+ {
+ flush_hash_page(mm->context, start);
+ }
+#else
+ asm volatile ("tlbia" : : );
+#endif
+}
+
+/*
+ * The context counter has overflowed.
+ * We set mm->context to NO_CONTEXT for all mm's in the system.
+ * We assume we can get to all mm's by looking as tsk->mm for
+ * all tasks in the system.
+ */
+void
+mmu_context_overflow(void)
+{
+#ifndef CONFIG_8xx
+ struct task_struct *tsk;
+
+ printk(KERN_DEBUG "mmu_context_overflow\n");
+ read_lock(&tasklist_lock);
+ for_each_task(tsk) {
+ if (tsk->mm)
+ tsk->mm->context = NO_CONTEXT;
+ }
+ read_unlock(&tasklist_lock);
+ flush_hash_segments(0x10, 0xffffff);
+ next_mmu_context = 0;
+ /* make sure current always has a context */
+ current->mm->context = MUNGE_CONTEXT(++next_mmu_context);
+ set_context(current->mm->context);
+#else
+ /* We set the value to -1 because it is pre-incremented before
+ * before use.
+ */
+ next_mmu_context = -1;
+#endif
+}
+
/*
* The following stuff defines a data structure for representing
* areas of memory as an array of (address, length) pairs, and
@@ -211,8 +545,8 @@
/*
* Scan a region for a piece of a given size with the required alignment.
*/
-void *
-find_mem_piece(unsigned size, unsigned align)
+__initfunc(void *
+find_mem_piece(unsigned size, unsigned align))
{
int i;
unsigned a, e;
@@ -235,9 +569,9 @@
/*
* Remove some memory from an array of pieces
*/
-static void
+__initfunc(static void
remove_mem_piece(struct mem_pieces *mp, unsigned start, unsigned size,
- int must_exist)
+ int must_exist))
{
int i, j;
unsigned end, rs, re;
@@ -291,8 +625,7 @@
}
}
-static void
-print_mem_pieces(struct mem_pieces *mp)
+__initfunc(static void print_mem_pieces(struct mem_pieces *mp))
{
int i;
@@ -311,8 +644,7 @@
static void coalesce_mem_pieces(struct mem_pieces *);
static void append_mem_piece(struct mem_pieces *, unsigned, unsigned);
-static void
-sort_mem_pieces(struct mem_pieces *mp)
+__initfunc(static void sort_mem_pieces(struct mem_pieces *mp))
{
unsigned long a, s;
int i, j;
@@ -330,8 +662,7 @@
}
}
-static void
-coalesce_mem_pieces(struct mem_pieces *mp)
+__initfunc(static void coalesce_mem_pieces(struct mem_pieces *mp))
{
unsigned long a, e;
int i, j, d;
@@ -353,8 +684,8 @@
/*
* Add some memory to an array of pieces
*/
-static void
-append_mem_piece(struct mem_pieces *mp, unsigned start, unsigned size)
+__initfunc(static void
+ append_mem_piece(struct mem_pieces *mp, unsigned start, unsigned size))
{
struct reg_property *rp;
@@ -369,8 +700,7 @@
* Read in a property describing some pieces of memory.
*/
-static void
-get_mem_prop(char *name, struct mem_pieces *mp)
+__initfunc(static void get_mem_prop(char *name, struct mem_pieces *mp))
{
struct reg_property *rp;
int s;
@@ -389,423 +719,16 @@
coalesce_mem_pieces(mp);
}
-/*
- * On systems with Open Firmware, collect information about
- * physical RAM and which pieces are already in use.
- * At this point, we have (at least) the first 8MB mapped with a BAT.
- * Our text, data, bss use something over 1MB, starting at 0.
- * Open Firmware may be using 1MB at the 4MB point.
- */
-unsigned long *pmac_find_end_of_memory(void)
-{
- unsigned long a, total;
- unsigned long kstart, ksize;
- int i;
-
- memory_node = find_devices("memory");
- if (memory_node == NULL) {
- printk(KERN_ERR "can't find memory node\n");
- abort();
- }
-
- /*
- * Find out where physical memory is, and check that it
- * starts at 0 and is contiguous. It seems that RAM is
- * always physically contiguous on Power Macintoshes,
- * because MacOS can't cope if it isn't.
- *
- * Supporting discontiguous physical memory isn't hard,
- * it just makes the virtual <-> physical mapping functions
- * more complicated (or else you end up wasting space
- * in mem_map).
- */
- get_mem_prop("reg", &phys_mem);
- if (phys_mem.n_regions == 0)
- panic("No RAM??");
- a = phys_mem.regions[0].address;
- if (a != 0)
- panic("RAM doesn't start at physical address 0");
- total = phys_mem.regions[0].size;
- if (phys_mem.n_regions > 1) {
- printk("RAM starting at 0x%x is not contiguous\n",
- phys_mem.regions[1].address);
- printk("Using RAM from 0 to 0x%lx\n", total-1);
- phys_mem.n_regions = 1;
- }
-
- /* record which bits the prom is using */
- get_mem_prop("available", &phys_avail);
- prom_mem = phys_mem;
- for (i = 0; i < phys_avail.n_regions; ++i)
- remove_mem_piece(&prom_mem, phys_avail.regions[i].address,
- phys_avail.regions[i].size, 1);
-
- /*
- * phys_avail records memory we can use now.
- * prom_mem records memory allocated by the prom that we
- * don't want to use now, but we'll reclaim later.
- * Make sure the kernel text/data/bss is in neither.
- */
- kstart = __pa(_stext); /* should be 0 */
- ksize = PAGE_ALIGN(klimit - _stext);
- remove_mem_piece(&phys_avail, kstart, ksize, 0);
- remove_mem_piece(&prom_mem, kstart, ksize, 0);
- remove_mem_piece(&phys_avail, 0, 0x4000, 0);
- remove_mem_piece(&prom_mem, 0, 0x4000, 0);
-
- return __va(total);
-}
-#endif /* CONFIG_8xx */
-
-#ifdef CONFIG_APUS
-#define HARDWARE_MAPPED_SIZE (512*1024)
-unsigned long *apus_find_end_of_memory(void)
-{
- unsigned long kstart, ksize;
-
- /* Add the chunk that ADOS does not see. Removed again below. */
- m68k_memory[0].size += HARDWARE_MAPPED_SIZE;
-
- append_mem_piece(&phys_mem, m68k_memory[0].addr, m68k_memory[0].size);
-
- phys_avail = phys_mem;
- kstart = __pa(_stext);
- ksize = PAGE_ALIGN(klimit - _stext);
- remove_mem_piece(&phys_avail, kstart, ksize, 1);
-
- /* Remove the upper HARDWARE_MAPPED_SIZE bytes where the address
- * range 0xfff00000-0xfffx0000 is mapped to.
- * We do it this way to ensure that the memory registered in the
- * system has a power-of-two size.
- */
- remove_mem_piece(&phys_avail,
- (m68k_memory[0].addr + m68k_memory[0].size
- - HARDWARE_MAPPED_SIZE),
- HARDWARE_MAPPED_SIZE, 1);
-
- /* FIXME:APUS: Only handles one block of memory! Problem is
- * that the VTOP/PTOV code in head.S would be a mess if it had
- * to handle more than one block.
- */
- return __va(m68k_memory[0].addr + m68k_memory[0].size);
-}
-#endif
-
-/*
- * Find some memory for setup_arch to return.
- * We use the last chunk of available memory as the area
- * that setup_arch returns, making sure that there are at
- * least 32 pages unused before this for MMU_get_page to use.
- */
-unsigned long avail_start;
-
-unsigned long find_available_memory(void)
-{
- int i;
- unsigned long a, free;
- unsigned long start, end;
-
- free = 0;
- if (_machine == _MACH_mbx) {
- /* Return the first, not the last region, because we
- * may not yet have properly initialized the additonal
- * memory DIMM.
- */
- a = PAGE_ALIGN(phys_avail.regions[0].address);
- avail_start = (unsigned long) __va(a);
- return avail_start;
- }
-
- for (i = 0; i < phys_avail.n_regions - 1; ++i) {
- start = phys_avail.regions[i].address;
- end = start + phys_avail.regions[i].size;
- free += (end & PAGE_MASK) - PAGE_ALIGN(start);
- }
- a = PAGE_ALIGN(phys_avail.regions[i].address);
- if (free < 32 * PAGE_SIZE)
- a += 32 * PAGE_SIZE - free;
- avail_start = (unsigned long) __va(a);
- return avail_start;
-}
-
-void show_mem(void)
-{
- int i,free = 0,total = 0,reserved = 0;
- int shared = 0, cached = 0;
- struct task_struct *p;
-
- printk("Mem-info:\n");
- show_free_areas();
- printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
- i = max_mapnr;
- while (i-- > 0) {
- total++;
- if (PageReserved(mem_map+i))
- reserved++;
- else if (PageSwapCache(mem_map+i))
- cached++;
- else if (!atomic_read(&mem_map[i].count))
- free++;
- else
- shared += atomic_read(&mem_map[i].count) - 1;
- }
- printk("%d pages of RAM\n",total);
- printk("%d free pages\n",free);
- printk("%d reserved pages\n",reserved);
- printk("%d pages shared\n",shared);
- printk("%d pages swap cached\n",cached);
- printk("%d pages in page table cache\n",(int)pgtable_cache_size);
- show_buffers();
-#ifdef CONFIG_NET
- show_net_buffers();
-#endif
- printk("%-8s %3s %3s %8s %8s %8s %9s %8s", "Process", "Pid", "Cnt",
- "Ctx", "Ctx<<4", "Last Sys", "pc", "task");
-#ifdef __SMP__
- printk(" %3s", "CPU");
-#endif /* __SMP__ */
- printk("\n");
- for_each_task(p)
- {
- printk("%-8.8s %3d %3d %8ld %8ld %8ld %c%08lx %08lx ",
- p->comm,p->pid,
- p->mm->count,p->mm->context,
- p->mm->context<<4, p->tss.last_syscall,
- user_mode(p->tss.regs) ? 'u' : 'k', p->tss.regs->nip,
- (ulong)p);
- {
- int iscur = 0;
-#ifdef __SMP__
- printk("%3d ", p->processor);
- if ( (p->processor != NO_PROC_ID) &&
- (p == current_set[p->processor]) )
-
-#else
- if ( p == current )
-#endif /* __SMP__ */
- {
- iscur = 1;
- printk("current");
- }
- if ( p == last_task_used_math )
- {
- if ( iscur )
- printk(",");
- printk("last math");
- }
- printk("\n");
- }
- }
-}
-
-extern unsigned long free_area_init(unsigned long, unsigned long);
-
-/*
- * paging_init() sets up the page tables - in fact we've already done this.
- */
-unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
-{
- /*
- * Grab some memory for bad_page and bad_pagetable to use.
- */
- empty_bad_page = PAGE_ALIGN(start_mem);
- empty_bad_page_table = empty_bad_page + PAGE_SIZE;
- start_mem = empty_bad_page + 2 * PAGE_SIZE;
-
- /* note: free_area_init uses its second argument
- to size the mem_map array. */
- start_mem = free_area_init(start_mem, end_mem);
- return start_mem;
-}
-
-void mem_init(unsigned long start_mem, unsigned long end_mem)
-{
- unsigned long addr;
- int i;
- unsigned long a, lim;
- int codepages = 0;
- int datapages = 0;
- int initpages = 0;
- extern unsigned int rtas_data, rtas_size;
-
- end_mem &= PAGE_MASK;
- high_memory = (void *) end_mem;
- max_mapnr = MAP_NR(high_memory);
- num_physpages = max_mapnr; /* RAM is assumed contiguous */
-
- /* mark usable pages in the mem_map[] */
- start_mem = PAGE_ALIGN(start_mem);
-
-#ifndef CONFIG_8xx
- remove_mem_piece(&phys_avail, __pa(avail_start),
- start_mem - avail_start, 1);
-
- for (addr = PAGE_OFFSET; addr < end_mem; addr += PAGE_SIZE)
- set_bit(PG_reserved, &mem_map[MAP_NR(addr)].flags);
-
- for (i = 0; i < phys_avail.n_regions; ++i) {
- a = (unsigned long) __va(phys_avail.regions[i].address);
- lim = a + phys_avail.regions[i].size;
- a = PAGE_ALIGN(a);
- for (; a < lim; a += PAGE_SIZE)
- clear_bit(PG_reserved, &mem_map[MAP_NR(a)].flags);
- }
- phys_avail.n_regions = 0;
-
- /* free the prom's memory - no-op on prep */
- for (i = 0; i < prom_mem.n_regions; ++i) {
- a = (unsigned long) __va(prom_mem.regions[i].address);
- lim = a + prom_mem.regions[i].size;
- a = PAGE_ALIGN(a);
- for (; a < lim; a += PAGE_SIZE)
- clear_bit(PG_reserved, &mem_map[MAP_NR(a)].flags);
- }
- prom_trashed = 1;
-#else /* CONFIG_8xx */
- /* When we get here, all of the page maps have been set up and
- * Linux thinks we have contiguous memory. Since the MBX can
- * have memory holes, we need to compensate for that here.
- * The memory holes are currently pages marked reserved (all
- * pages right now are marked reserved).
- * All of the memory allocated by the kernel up to this point
- * had to come from region 0.
- */
-
- /* First, unreserve all memory from the page following start_mem
- * to the end of region 0.
- */
- for (addr = start_mem + PAGE_SIZE ;
- addr < (ulong) __va(phys_mem.regions[0].size);
- addr += PAGE_SIZE) {
- clear_bit(PG_reserved, &mem_map[MAP_NR(addr)].flags);
- }
-
- /* Now add any additional regions to the system.
- */
- for (i = 1; i < phys_avail.n_regions; ++i) {
- a = (unsigned long) __va(phys_avail.regions[i].address);
- lim = a + phys_avail.regions[i].size;
- a = PAGE_ALIGN(a);
- for (; a < lim; a += PAGE_SIZE)
- clear_bit(PG_reserved, &mem_map[MAP_NR(a)].flags);
- }
- phys_avail.n_regions = 0; /* Nothing available, kernel owns */
- /* Count up the size of the holes. We look for the space
- * between the end of one region and the start of the next.
- */
- lim = 0;
- for (i = 0; i < phys_mem.n_regions-1; ++i) {
- a = (unsigned long) phys_mem.regions[i].address;
- a += phys_mem.regions[i].size;
- lim += phys_mem.regions[i+1].address - a;
- }
-
- /* It appears that num_physpages is only used for quota checking,
- * when pages are locked down. We subtract the size of the holes
- * from it now.
- */
- num_physpages -= lim/PAGE_SIZE;
-#endif /* CONFIG_8xx */
-
- for (addr = PAGE_OFFSET; addr < end_mem; addr += PAGE_SIZE) {
- if (PageReserved(mem_map + MAP_NR(addr))) {
- if (addr < (ulong) etext)
- codepages++;
- else if (addr >= (unsigned long)&__init_begin
- && addr < (unsigned long)&__init_end)
- initpages++;
- else if (addr < (ulong) start_mem)
- datapages++;
- continue;
- }
- atomic_set(&mem_map[MAP_NR(addr)].count, 1);
-#ifdef CONFIG_BLK_DEV_INITRD
- if (!initrd_start ||
- addr < (initrd_start & PAGE_MASK) || addr >= initrd_end)
-#endif /* CONFIG_BLK_DEV_INITRD */
-#ifndef CONFIG_8xx
- if ( !rtas_data ||
- addr < (rtas_data & PAGE_MASK) ||
- addr >= (rtas_data+rtas_size))
#endif /* CONFIG_8xx */
- free_page(addr);
- }
-
- printk("Memory: %luk available (%dk kernel code, %dk data, %dk init) [%08x,%08lx]\n",
- (unsigned long) nr_free_pages << (PAGE_SHIFT-10),
- codepages << (PAGE_SHIFT-10),
- datapages << (PAGE_SHIFT-10),
- initpages << (PAGE_SHIFT-10),
- PAGE_OFFSET, end_mem);
- mem_init_done = 1;
-}
-
-/*
- * Unfortunately, we can't put initialization functions in their
- * own section and free that at this point, because gas gets some
- * relocations wrong if we do. :-( But this code is here for when
- * gas gets fixed.
- */
-void free_initmem(void)
-{
- unsigned long a;
- unsigned long num_freed_pages = 0;
-
- a = (unsigned long)(&__init_begin);
- for (; a < (unsigned long)(&__init_end); a += PAGE_SIZE) {
- clear_bit(PG_reserved, &mem_map[MAP_NR(a)].flags);
- atomic_set(&mem_map[MAP_NR(a)].count, 1);
- free_page(a);
- num_freed_pages++;
- }
-
- printk ("Freeing unused kernel memory: %ldk freed\n",
- (num_freed_pages * PAGE_SIZE) >> 10);
-}
-
-void si_meminfo(struct sysinfo *val)
-{
- int i;
-
- i = max_mapnr;
- val->totalram = 0;
- val->sharedram = 0;
- val->freeram = nr_free_pages << PAGE_SHIFT;
- val->bufferram = buffermem;
- while (i-- > 0) {
- if (PageReserved(mem_map+i))
- continue;
- val->totalram++;
- if (!atomic_read(&mem_map[i].count))
- continue;
- val->sharedram += atomic_read(&mem_map[i].count) - 1;
- }
- val->totalram <<= PAGE_SHIFT;
- val->sharedram <<= PAGE_SHIFT;
- return;
-}
#ifndef CONFIG_8xx
-union ubat { /* BAT register values to be loaded */
- BAT bat;
- P601_BAT bat_601;
- u32 word[2];
-} BATS[4][2]; /* 4 pairs of IBAT, DBAT */
-
-struct batrange { /* stores address ranges mapped by BATs */
- unsigned long start;
- unsigned long limit;
- unsigned long phys;
-} bat_addrs[4];
-
/*
* Set up one of the I/D BAT (block address translation) register pairs.
* The parameters are not checked; in particular size must be a power
* of 2 between 128k and 256M.
*/
-void
-setbat(int index, unsigned long virt, unsigned long phys,
- unsigned int size, int flags)
+__initfunc(void setbat(int index, unsigned long virt, unsigned long phys,
+ unsigned int size, int flags))
{
unsigned int bl;
int wimgxpp;
@@ -853,37 +776,6 @@
#define RAM_PAGE (_PAGE_RW)
#endif
-/*
- * This finds the amount of physical ram and does necessary
- * setup for prep. This is pretty architecture specific so
- * this will likely stay seperate from the pmac.
- * -- Cort
- */
-unsigned long *prep_find_end_of_memory(void)
-{
- unsigned long kstart, ksize;
- unsigned long total;
- total = res.TotalMemory;
-
- if (total == 0 )
- {
- /*
- * I need a way to probe the amount of memory if the residual
- * data doesn't contain it. -- Cort
- */
- printk("Ramsize from residual data was 0 -- Probing for value\n");
- total = 0x02000000;
- printk("Ramsize default to be %ldM\n", total>>20);
- }
- append_mem_piece(&phys_mem, 0, total);
- phys_avail = phys_mem;
- kstart = __pa(_stext); /* should be 0 */
- ksize = PAGE_ALIGN(klimit - _stext);
- remove_mem_piece(&phys_avail, kstart, ksize, 0);
- remove_mem_piece(&phys_avail, 0, 0x4000, 0);
-
- return (__va(total));
-}
#endif /* CONFIG_8xx */
/*
@@ -891,7 +783,7 @@
*/
#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
-static void mapin_ram()
+__initfunc(static void mapin_ram(void))
{
int i;
unsigned long v, p, s, f;
@@ -957,87 +849,64 @@
}
}
-#ifndef CONFIG_8xx
-/*
- * Initialize the hash table and patch the instructions in head.S.
- */
-static void hash_init(void)
+__initfunc(static void *MMU_get_page(void))
{
- int Hash_bits;
- unsigned long h, ramsize;
+ void *p;
- extern unsigned int hash_page_patch_A[], hash_page_patch_B[],
- hash_page_patch_C[], hash_page_patch_D[];
+ if (mem_init_done) {
+ p = (void *) __get_free_page(GFP_KERNEL);
+ if (p == 0)
+ panic("couldn't get a page in MMU_get_page");
+ } else {
+ p = find_mem_piece(PAGE_SIZE, PAGE_SIZE);
+ }
+ /*memset(p, 0, PAGE_SIZE);*/
+ __clear_user(p, PAGE_SIZE);
+ return p;
+}
- /*
- * Allow 64k of hash table for every 16MB of memory,
- * up to a maximum of 2MB.
- */
- ramsize = (ulong)end_of_DRAM - KERNELBASE;
- for (h = 64<<10; h < ramsize / 256 && h < 2<<20; h *= 2)
- ;
- Hash_size = h;
- Hash_mask = (h >> 6) - 1;
-
-#ifdef NO_RELOAD_HTAB
- /* shrink the htab since we don't use it on 603's -- Cort */
- switch (_get_PVR()>>16) {
- case 3: /* 603 */
- case 6: /* 603e */
- case 7: /* 603ev */
- Hash_size = 0;
- Hash_mask = 0;
- break;
- default:
- /* on 601/4 let things be */
- break;
- }
-#endif /* NO_RELOAD_HTAB */
-
- /* Find some memory for the hash table. */
- if ( Hash_size )
- Hash = find_mem_piece(Hash_size, Hash_size);
- else
- Hash = 0;
+__initfunc(void free_initmem(void))
+{
+ unsigned long a;
+ unsigned long num_freed_pages = 0, num_prep_pages = 0,
+ num_pmac_pages = 0;
- printk("Total memory = %ldMB; using %ldkB for hash table (at %p)\n",
- ramsize >> 20, Hash_size >> 10, Hash);
- if ( Hash_size )
- {
- memset(Hash, 0, Hash_size);
- Hash_end = (PTE *) ((unsigned long)Hash + Hash_size);
+#define FREESEC(START,END,CNT) do { \
+ a = (unsigned long)(&START); \
+ for (; a < (unsigned long)(&END); a += PAGE_SIZE) { \
+ clear_bit(PG_reserved, &mem_map[MAP_NR(a)].flags); \
+ atomic_set(&mem_map[MAP_NR(a)].count, 1); \
+ free_page(a); \
+ CNT++; \
+ } \
+} while (0)
- /*
- * Patch up the instructions in head.S:hash_page
- */
- Hash_bits = ffz(~Hash_size) - 6;
- hash_page_patch_A[0] = (hash_page_patch_A[0] & ~0xffff)
- | (__pa(Hash) >> 16);
- hash_page_patch_A[1] = (hash_page_patch_A[1] & ~0x7c0)
- | ((26 - Hash_bits) << 6);
- if (Hash_bits > 16)
- Hash_bits = 16;
- hash_page_patch_A[2] = (hash_page_patch_A[2] & ~0x7c0)
- | ((26 - Hash_bits) << 6);
- hash_page_patch_B[0] = (hash_page_patch_B[0] & ~0xffff)
- | (Hash_mask >> 10);
- hash_page_patch_C[0] = (hash_page_patch_C[0] & ~0xffff)
- | (Hash_mask >> 10);
- hash_page_patch_D[0] = (hash_page_patch_D[0] & ~0xffff)
- | (Hash_mask >> 10);
- /*
- * Ensure that the locations we've patched have been written
- * out from the data cache and invalidated in the instruction
- * cache, on those machines with split caches.
- */
- flush_icache_range((unsigned long) hash_page_patch_A,
- (unsigned long) (hash_page_patch_D + 1));
+ FREESEC(__init_begin,__init_end,num_freed_pages);
+ switch (_machine)
+ {
+ case _MACH_Pmac:
+ case _MACH_chrp:
+ FREESEC(__prep_begin,__prep_end,num_prep_pages);
+ break;
+ case _MACH_prep:
+ FREESEC(__pmac_begin,__pmac_end,num_pmac_pages);
+ FREESEC(__openfirmware_begin,__openfirmware_end,num_pmac_pages);
+ break;
+ case _MACH_mbx:
+ FREESEC(__pmac_begin,__pmac_end,num_pmac_pages);
+ FREESEC(__openfirmware_begin,__openfirmware_end,num_pmac_pages);
+ FREESEC(__prep_begin,__prep_end,num_prep_pages);
+ break;
}
- else
- Hash_end = 0;
-
+
+ printk ("Freeing unused kernel memory: %ldk init",
+ (num_freed_pages * PAGE_SIZE) >> 10);
+ if ( num_prep_pages )
+ printk(" %ldk prep",(num_prep_pages*PAGE_SIZE)>>10);
+ if ( num_pmac_pages )
+ printk(" %ldk pmac",(num_pmac_pages*PAGE_SIZE)>>10);
+ printk("\n");
}
-#endif /* CONFIG_8xx */
/*
* Do very early mm setup such as finding the size of memory
@@ -1046,8 +915,7 @@
* still be merged.
* -- Cort
*/
-void
-MMU_init(void)
+__initfunc(void MMU_init(void))
{
#ifndef CONFIG_8xx
if (have_of)
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov