[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
MNN (Re: powerpc dir)
> もうちょっとテストしたらとりあえずここに出しますね。
まだ多少 debug code をのこしたままですけど。
* 最初は #if でくくってたんですが、途中で挫折しちゃいました。
* 主記憶でないページ(I/O)の扱いをもうちょっとなんとかしたい。
# bank が実際にわかれているとちょっと速くなったはずなんだけど
# PowerMac はたぶん VM_PHYSSEG_MAX == 1...
*** macppc/include/vmparam.h.orig Wed Feb 18 21:13:38 1998
--- macppc/include/vmparam.h Thu May 28 15:54:46 1998
***************
*** 89,95 ****
#define VM_MBUF_SIZE (NMBCLUSTERS * CLBYTES)
#define VM_PHYS_SIZE (USRIOSIZE * CLBYTES)
! /*
! * MACHINE_NONCONTIG is necessary for NetBSD/powerpc.
! */
! #define MACHINE_NONCONTIG
--- 89,101 ----
#define VM_MBUF_SIZE (NMBCLUSTERS * CLBYTES)
#define VM_PHYS_SIZE (USRIOSIZE * CLBYTES)
! #define MACHINE_NEW_NONCONTIG
!
! struct pmap_physseg {
! struct pv_entry *pvent;
! char *attrs;
! };
!
! #define VM_PHYSSEG_MAX 32
! #define VM_PHYSSEG_STRAT VM_PSTRAT_BSEARCH
! #define VM_PHYSSEG_NOADD /* can't add RAM after vm_mem_init */
*** powerpc/powerpc/pmap.c.orig Thu May 28 15:51:04 1998
--- powerpc/powerpc/pmap.c Thu May 28 15:52:38 1998
***************
*** 177,182 ****
--- 177,218 ----
| which);
}
+ #if defined(MACHINE_NEW_NONCONTIG)
+ static __inline struct pv_entry *
+ pa_to_pv(pa)
+ vm_offset_t pa;
+ {
+ int bank, pg;
+
+ bank = vm_physseg_find(atop(pa), &pg);
+ if (bank == -1) {
+ #ifdef DIAGNOSTIC
+ // panic("pa_to_pv (pa = 0x%x)", pa);
+ printf("pa_to_pv (pa = 0x%x)", pa);
+ #endif
+ return NULL;
+ }
+ return &vm_physmem[bank].pmseg.pvent[pg];
+ }
+
+ static __inline char *
+ pa_to_attr(pa)
+ vm_offset_t pa;
+ {
+ int bank, pg;
+
+ bank = vm_physseg_find(atop(pa), &pg);
+ if (bank == -1) {
+ #ifdef DIAGNOSTIC
+ // panic("pa_to_attrib (pa = 0x%x)", pa);
+ printf("pa_to_attrib (pa = 0x%x)", pa);
+ #endif
+ return NULL;
+ }
+ return &vm_physmem[bank].pmseg.attrs[pg];
+ }
+ #endif
+
/*
* Try to insert page table entry *pt into the ptable at idx.
*
***************
*** 420,426 ****
for (i = 0; i < ptab_cnt; i++)
LIST_INIT(potable + i);
LIST_INIT(&pv_page_freelist);
!
/*
* Initialize kernel pmap and hardware.
*/
--- 456,467 ----
for (i = 0; i < ptab_cnt; i++)
LIST_INIT(potable + i);
LIST_INIT(&pv_page_freelist);
! #if defined(MACHINE_NEW_NONCONTIG)
! for (mp = avail; mp->size; mp++)
! vm_page_physload(atop(mp->start), atop(mp->start + mp->size),
! atop(mp->start), atop(mp->start + mp->size));
! #endif
!
/*
* Initialize kernel pmap and hardware.
*/
***************
*** 478,484 ****
vm_size_t sz;
vm_offset_t addr;
int i, s;
!
sz = (vm_size_t)((sizeof(struct pv_entry) + 1) * npgs);
sz = round_page(sz);
addr = (vm_offset_t)kmem_alloc(kernel_map, sz);
--- 519,529 ----
vm_size_t sz;
vm_offset_t addr;
int i, s;
! #if defined(MACHINE_NEW_NONCONTIG)
! int bank;
! char *attr;
! #endif
!
sz = (vm_size_t)((sizeof(struct pv_entry) + 1) * npgs);
sz = round_page(sz);
addr = (vm_offset_t)kmem_alloc(kernel_map, sz);
***************
*** 489,494 ****
--- 534,552 ----
LIST_INIT(&pv_page_freelist);
pmap_attrib = (char *)pv;
bzero(pv, npgs);
+
+ #if defined(MACHINE_NEW_NONCONTIG)
+ pv = pv_table;
+ attr = pmap_attrib;
+ for (bank = 0; bank < vm_nphysseg; bank++) {
+ sz = vm_physmem[bank].end - vm_physmem[bank].start;
+ vm_physmem[bank].pmseg.pvent = pv;
+ vm_physmem[bank].pmseg.attrs = attr;
+ pv += sz;
+ attr += sz;
+ }
+ #endif
+
pmap_initialized = 1;
splx(s);
}
***************
*** 496,501 ****
--- 554,560 ----
/*
* Return the index of the given page in terms of pmap_next_page() calls.
*/
+ #if !defined(MACHINE_NEW_NONCONTIG)
int
pmap_page_index(pa)
vm_offset_t pa;
***************
*** 512,517 ****
--- 571,577 ----
}
return -1;
}
+ #endif
/*
* How much virtual space is available to the kernel?
***************
*** 527,532 ****
--- 587,593 ----
*end = *start + SEGMENT_LENGTH;
}
+ #if !defined(MACHINE_NEW_NONCONTIG)
/*
* Return the number of possible page indices returned
* from pmap_page_index for any page provided by pmap_next_page.
***************
*** 558,563 ****
--- 619,625 ----
nextavail += NBPG;
return TRUE;
}
+ #endif
/*
* Create and return a physical map.
***************
*** 836,845 ****
* This returns whether this is the first mapping of a page.
*/
static inline int
! pmap_enter_pv(pteidx, va, pind)
int pteidx;
! vm_offset_t va;
! u_int pind;
{
struct pv_entry *pv, *npv;
int s, first;
--- 898,906 ----
* This returns whether this is the first mapping of a page.
*/
static inline int
! pmap_enter_pv(pteidx, va, pa)
int pteidx;
! vm_offset_t va, pa;
{
struct pv_entry *pv, *npv;
int s, first;
***************
*** 849,855 ****
s = splimp();
! pv = &pv_table[pind];
if (first = pv->pv_idx == -1) {
/*
* No entries yet, use header as the first entry.
--- 910,916 ----
s = splimp();
! pv = pa_to_pv(pa);
if (first = pv->pv_idx == -1) {
/*
* No entries yet, use header as the first entry.
***************
*** 873,898 ****
}
static void
! pmap_remove_pv(pteidx, va, pind, pte)
int pteidx;
! vm_offset_t va;
! int pind;
struct pte *pte;
{
struct pv_entry *pv, *npv;
!
! if (pind < 0)
! return;
/*
* First transfer reference/change bits to cache.
*/
! pmap_attrib[pind] |= (pte->pte_lo & (PTE_REF | PTE_CHG)) >> ATTRSHFT;
/*
* Remove from the PV table.
*/
! pv = &pv_table[pind];
/*
* If it is the first entry on the list, it is actually
--- 934,959 ----
}
static void
! pmap_remove_pv(pteidx, va, pa, pte)
int pteidx;
! vm_offset_t va, pa;
struct pte *pte;
{
struct pv_entry *pv, *npv;
! char *attr;
/*
* First transfer reference/change bits to cache.
*/
! attr = pa_to_attr(pa);
! if (attr == NULL)
! return;
! *attr |= (pte->pte_lo & (PTE_REF | PTE_CHG)) >> ATTRSHFT;
/*
* Remove from the PV table.
*/
! pv = pa_to_pv(pa);
/*
* If it is the first entry on the list, it is actually
***************
*** 936,947 ****
int idx, i, s;
pte_t pte;
struct pte_ovfl *po;
! struct mem_region *mp;
/*
* Have to remove any existing mapping first.
*/
! pmap_remove(pm, va, va + NBPG - 1);
/*
* Compute the HTAB index.
--- 997,1008 ----
int idx, i, s;
pte_t pte;
struct pte_ovfl *po;
! int bank;
/*
* Have to remove any existing mapping first.
*/
! pmap_remove(pm, va, va + NBPG);
/*
* Compute the HTAB index.
***************
*** 955,966 ****
pte.pte_hi = ((sr & SR_VSID) << PTE_VSID_SHFT)
| ((va & ADDR_PIDX) >> ADDR_API_SHFT);
pte.pte_lo = (pa & PTE_RPGN) | PTE_M | PTE_I | PTE_G;
! for (mp = mem; mp->size; mp++) {
! if (pa >= mp->start && pa < mp->start + mp->size) {
! pte.pte_lo &= ~(PTE_I | PTE_G);
! break;
! }
! }
if (prot & VM_PROT_WRITE)
pte.pte_lo |= PTE_RW;
else
--- 1016,1026 ----
pte.pte_hi = ((sr & SR_VSID) << PTE_VSID_SHFT)
| ((va & ADDR_PIDX) >> ADDR_API_SHFT);
pte.pte_lo = (pa & PTE_RPGN) | PTE_M | PTE_I | PTE_G;
!
! bank = vm_physseg_find(atop(pa), NULL);
! if (bank != -1)
! pte.pte_lo &= ~(PTE_I | PTE_G);
!
if (prot & VM_PROT_WRITE)
pte.pte_lo |= PTE_RW;
else
***************
*** 969,976 ****
/*
* Now record mapping for later back-translation.
*/
! if (pmap_initialized && (i = pmap_page_index(pa)) != -1)
! if (pmap_enter_pv(idx, va, i)) {
/*
* Flush the real memory from the cache.
*/
--- 1029,1036 ----
/*
* Now record mapping for later back-translation.
*/
! if (pmap_initialized && bank != -1)
! if (pmap_enter_pv(idx, va, pa)) {
/*
* Flush the real memory from the cache.
*/
***************
*** 1015,1021 ****
idx = pteidx(sr = ptesr(pm->pm_sr, va), va);
for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++)
if (ptematch(ptp, sr, va, PTE_VALID)) {
! pmap_remove_pv(idx, va, pmap_page_index(ptp->pte_lo), ptp);
ptp->pte_hi &= ~PTE_VALID;
asm volatile ("sync");
tlbie(va);
--- 1075,1081 ----
idx = pteidx(sr = ptesr(pm->pm_sr, va), va);
for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++)
if (ptematch(ptp, sr, va, PTE_VALID)) {
! pmap_remove_pv(idx, va, ptp->pte_lo, ptp);
ptp->pte_hi &= ~PTE_VALID;
asm volatile ("sync");
tlbie(va);
***************
*** 1023,1029 ****
}
for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0; ptp++)
if (ptematch(ptp, sr, va, PTE_VALID | PTE_HID)) {
! pmap_remove_pv(idx, va, pmap_page_index(ptp->pte_lo), ptp);
ptp->pte_hi &= ~PTE_VALID;
asm volatile ("sync");
tlbie(va);
--- 1083,1089 ----
}
for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0; ptp++)
if (ptematch(ptp, sr, va, PTE_VALID | PTE_HID)) {
! pmap_remove_pv(idx, va, ptp->pte_lo, ptp);
ptp->pte_hi &= ~PTE_VALID;
asm volatile ("sync");
tlbie(va);
***************
*** 1032,1038 ****
for (po = potable[idx].lh_first; po; po = npo) {
npo = po->po_list.le_next;
if (ptematch(&po->po_pte, sr, va, 0)) {
! pmap_remove_pv(idx, va, pmap_page_index(po->po_pte.pte_lo),
&po->po_pte);
LIST_REMOVE(po, po_list);
pofree(po, 1);
--- 1092,1098 ----
for (po = potable[idx].lh_first; po; po = npo) {
npo = po->po_list.le_next;
if (ptematch(&po->po_pte, sr, va, 0)) {
! pmap_remove_pv(idx, va, po->po_pte.pte_lo,
&po->po_pte);
LIST_REMOVE(po, po_list);
pofree(po, 1);
***************
*** 1134,1151 ****
pte_t *ptp;
struct pte_ovfl *po;
int i, s;
!
! i = pmap_page_index(pa);
! if (i < 0)
! return;
/*
* First modify bits in cache.
*/
! pmap_attrib[i] &= ~mask >> ATTRSHFT;
! pmap_attrib[i] |= val >> ATTRSHFT;
! pv = pv_table + i;
if (pv->pv_idx < 0)
return;
--- 1194,1212 ----
pte_t *ptp;
struct pte_ovfl *po;
int i, s;
! char *attr;
/*
* First modify bits in cache.
*/
! attr = pa_to_attr(pa);
! if (attr == NULL)
! return;
!
! *attr &= ~mask >> ATTRSHFT;
! *attr |= val >> ATTRSHFT;
! pv = pa_to_pv(pa);
if (pv->pv_idx < 0)
return;
***************
*** 1193,1211 ****
pte_t *ptp;
struct pte_ovfl *po;
int i, s, bits = 0;
!
! i = pmap_page_index(pa);
! if (i < 0)
! return 0;
/*
* First try the cache.
*/
! bits |= (pmap_attrib[i] << ATTRSHFT) & bit;
if (bits == bit)
return bits;
! pv = pv_table + i;
if (pv->pv_idx < 0)
return 0;
--- 1254,1272 ----
pte_t *ptp;
struct pte_ovfl *po;
int i, s, bits = 0;
! char *attr;
/*
* First try the cache.
*/
! attr = pa_to_attr(pa);
! if (attr == NULL)
! return 0;
! bits |= (*attr << ATTRSHFT) & bit;
if (bits == bit)
return bits;
! pv = pa_to_pv(pa);
if (pv->pv_idx < 0)
return 0;
***************
*** 1256,1281 ****
vm_offset_t va;
pte_t *ptp;
struct pte_ovfl *po, *npo;
! int i, s, pind, idx;
!
pa &= ~ADDR_POFF;
if (prot & VM_PROT_READ) {
ptemodify(pa, PTE_PP, PTE_RO);
return;
}
! pind = pmap_page_index(pa);
! if (pind < 0)
return;
s = splimp();
! while (pv_table[pind].pv_idx >= 0) {
! idx = pv_table[pind].pv_idx;
! va = pv_table[pind].pv_va;
for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++)
if ((ptp->pte_hi & PTE_VALID)
&& (ptp->pte_lo & PTE_RPGN) == pa) {
! pmap_remove_pv(idx, va, pind, ptp);
ptp->pte_hi &= ~PTE_VALID;
asm volatile ("sync");
tlbie(va);
--- 1317,1343 ----
vm_offset_t va;
pte_t *ptp;
struct pte_ovfl *po, *npo;
! int i, s, idx;
! struct pv_entry *pv;
!
pa &= ~ADDR_POFF;
if (prot & VM_PROT_READ) {
ptemodify(pa, PTE_PP, PTE_RO);
return;
}
! pv = pa_to_pv(pa);
! if (pv == NULL)
return;
s = splimp();
! while (pv->pv_idx >= 0) {
! idx = pv->pv_idx;
! va = pv->pv_va;
for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++)
if ((ptp->pte_hi & PTE_VALID)
&& (ptp->pte_lo & PTE_RPGN) == pa) {
! pmap_remove_pv(idx, va, pa, ptp);
ptp->pte_hi &= ~PTE_VALID;
asm volatile ("sync");
tlbie(va);
***************
*** 1285,1291 ****
for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0; ptp++)
if ((ptp->pte_hi & PTE_VALID)
&& (ptp->pte_lo & PTE_RPGN) == pa) {
! pmap_remove_pv(idx, va, pind, ptp);
ptp->pte_hi &= ~PTE_VALID;
asm volatile ("sync");
tlbie(va);
--- 1347,1353 ----
for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0; ptp++)
if ((ptp->pte_hi & PTE_VALID)
&& (ptp->pte_lo & PTE_RPGN) == pa) {
! pmap_remove_pv(idx, va, pa, ptp);
ptp->pte_hi &= ~PTE_VALID;
asm volatile ("sync");
tlbie(va);
***************
*** 1295,1301 ****
for (po = potable[idx].lh_first; po; po = npo) {
npo = po->po_list.le_next;
if ((po->po_pte.pte_lo & PTE_RPGN) == pa) {
! pmap_remove_pv(idx, va, pind, &po->po_pte);
LIST_REMOVE(po, po_list);
pofree(po, 1);
goto next;
--- 1357,1363 ----
for (po = potable[idx].lh_first; po; po = npo) {
npo = po->po_list.le_next;
if ((po->po_pte.pte_lo & PTE_RPGN) == pa) {
! pmap_remove_pv(idx, va, pa, &po->po_pte);
LIST_REMOVE(po, po_list);
pofree(po, 1);
goto next;
- Follow-Ups:
- Re: MNN
- From: sakamoto@cec.co.jp (Kazuki Sakamoto)