File indexing completed on 2025-05-11 08:23:54
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038 #include <rtems/bspIo.h>
0039
0040 #include <sys/param.h>
0041 #include <sys/types.h>
0042 #include <libcpu/spr.h>
0043 #include "bootldr.h"
0044 #include <libcpu/mmu.h>
0045 #include <limits.h>
0046
0047
0048 #define PAGE_ALIGN(addr) (((addr) + PAGE_MASK) & ~PAGE_MASK)
0049
0050 extern void (tlb_handlers)(void);
0051 extern void (_handler_glue)(void);
0052
0053
0054
0055
0056
0057
0058
0059
0060 typedef struct _map {
0061 struct _map *next;
0062 u_long base;
0063 u_long end;
0064 u_long firstpte;
0065 } map;
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086 #define MAP_FREE_SUBS 6
0087 #define MAP_USED_SUBS 7
0088
0089 #define MAP_FREE 4
0090 #define MAP_FREE_PHYS 12
0091 #define MAP_USED_PHYS 13
0092 #define MAP_FREE_VIRT 20
0093 #define MAP_USED_VIRT 21
0094 #define MAP_SUBS_PHYS 28
0095 #define MAP_PERM_PHYS 29
0096
0097 SPR_RW(SDR1);
0098 SPR_RO(DSISR);
0099 SPR_RO(PPC_DAR);
0100
0101
0102
0103 static map free_maps[4] = {{free_maps+1, 0, 0, MAP_FREE},
0104 {free_maps+2, 0, 0, MAP_FREE},
0105 {free_maps+3, 0, 0, MAP_FREE},
0106 {NULL, 0, 0, MAP_FREE}};
0107 struct _mm_private {
0108 void *sdr1;
0109 u_long hashmask;
0110 map *freemaps;
0111 map *mappings;
0112 map *physavail;
0113 map *physused;
0114 map *physperm;
0115 map *virtavail;
0116 map *virtused;
0117 map *sallocfree;
0118 map *sallocused;
0119 map *sallocphys;
0120 u_int hashcnt;
0121 } mm_private = {hashmask: 0xffc0,
0122 freemaps: free_maps+0};
0123
0124
0125 typedef struct _hash_entry {
0126 int key;
0127 u_long rpn;
0128 } hash_entry;
0129
0130 void print_maps(map *, const char *);
0131
0132
0133
0134
0135 void _handler(int vec, ctxt *p) {
0136 map *area;
0137 struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
0138 u_long vaddr, cause;
0139 if (vec==4 || vec==7) {
0140 vaddr = p->nip;
0141 cause = p->msr;
0142 } else {
0143 vaddr = _read_PPC_DAR();
0144 cause = _read_DSISR();
0145 }
0146
0147 if (vec==3 || vec==4) {
0148
0149 if (!(cause & 0x40000000)) {
0150 MMUon();
0151 printk("\nPanic: vector=%x, cause=%lx\n", vec, cause);
0152 hang("Memory protection violation at ", vaddr, p);
0153 }
0154
0155 for(area=mm->mappings; area; area=area->next) {
0156 if(area->base<=vaddr && vaddr<=area->end) break;
0157 }
0158
0159 if (area) {
0160 u_long hash, vsid, rpn;
0161 hash_entry volatile *hte, *_hte1;
0162 u_int i, alt=0, flushva;
0163
0164 vsid = _read_SR((void *)vaddr);
0165 rpn = (vaddr&PAGE_MASK)-area->base+area->firstpte;
0166 hash = vsid<<6;
0167 hash ^= (vaddr>>(PAGE_SHIFT-6))&0x3fffc0;
0168 hash &= mm->hashmask;
0169
0170
0171
0172 hte = (hash_entry *) ((u_long)(mm->sdr1)+hash);
0173 for (i=0; i<8; i++) {
0174 if (hte[i].key>=0) goto found;
0175 }
0176 hash ^= mm->hashmask;
0177 alt = 0x40; _hte1 = hte;
0178 hte = (hash_entry *) ((u_long)(mm->sdr1)+hash);
0179
0180 for (i=0; i<8; i++) {
0181 if (hte[i].key>=0) goto found;
0182 }
0183 alt = 0;
0184 hte = _hte1;
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194 i = mm->hashcnt;
0195 mm->hashcnt = (mm->hashcnt+1)%8;
0196
0197 flushva = (~(hash<<9)^((hte[i].key)<<5)) &0x3ff000;
0198 if (hte[i].key&0x40) flushva^=0x3ff000;
0199 flushva |= ((hte[i].key<<21)&0xf0000000)
0200 | ((hte[i].key<<22)&0x0fc00000);
0201 hte[i].key=0;
0202 asm volatile("sync; tlbie %0, 0; sync" : : "r" (flushva));
0203 found:
0204 hte[i].rpn = rpn;
0205 asm volatile("eieio": : );
0206 hte[i].key = 0x80000000|(vsid<<7)|alt|
0207 ((vaddr>>22)&0x3f);
0208 return;
0209 } else {
0210 MMUon();
0211 printk("\nPanic: vector=%x, cause=%lx\n", vec, cause);
0212 hang("\nInvalid memory access attempt at ", vaddr, p);
0213 }
0214 } else {
0215 MMUon();
0216 printk(
0217 "\nPanic: vector=%d, dsisr=%lx, faultaddr =%lx, "
0218 "msr=%lx opcode=%x\n", vec,
0219 cause, p->nip, p->msr, * ((unsigned int*) p->nip) );
0220 if (vec == 7) {
0221 unsigned int* ptr = ((unsigned int*) p->nip) - 4 * 10;
0222 for (; ptr <= (((unsigned int*) p->nip) + 4 * 10); ptr ++)
0223 printk("Hexdecimal code at address %p = %x\n", ptr, *ptr);
0224 }
0225 hang("Program or alignment exception at ", vaddr, p);
0226 }
0227 }
0228
0229
0230
0231
0232 static inline
0233 void free_map(map *p) {
0234 struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
0235 if (!p) return;
0236 p->next=mm->freemaps;
0237 mm->freemaps=p;
0238 p->firstpte=MAP_FREE;
0239 }
0240
0241
0242 static
0243 int insert_map(map **head, map *p) {
0244 map *q = *head;
0245 if (!p) return 0;
0246 if (q && (q->base < p->base)) {
0247 for(;q->next && q->next->base<p->base; q = q->next);
0248 if ((q->end >= p->base) ||
0249 (q->next && p->end>=q->next->base)) {
0250 free_map(p);
0251 printk("Overlapping areas!\n");
0252 return 1;
0253 }
0254 p->next = q->next;
0255 q->next = p;
0256 } else {
0257 if (q && (p->end >= q->base)) {
0258 free_map(p);
0259 printk("Overlapping areas!\n");
0260 return 1;
0261 }
0262 p->next = q;
0263 *head = p;
0264 }
0265 return 0;
0266 }
0267
0268
0269
0270 static
0271 map *remove_map(map **head, map *p) {
0272 map *q = *head;
0273
0274 if (!p || !q) return NULL;
0275 if (q==p) {
0276 *head = q->next;
0277 return p;
0278 }
0279 for(;q && q->next!=p; q=q->next);
0280 if (q) {
0281 q->next=p->next;
0282 return p;
0283 } else {
0284 return NULL;
0285 }
0286 }
0287
0288 static
0289 map *remove_map_at(map **head, void * vaddr) {
0290 map *p, *q = *head;
0291
0292 if (!vaddr || !q) return NULL;
0293 if (q->base==(u_long)vaddr) {
0294 *head = q->next;
0295 return q;
0296 }
0297 while (q->next && q->next->base != (u_long)vaddr) q=q->next;
0298 p=q->next;
0299 if (p) q->next=p->next;
0300 return p;
0301 }
0302
0303 static inline
0304 map * alloc_map_page(void) {
0305 map *from, *p;
0306 struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
0307
0308
0309
0310 for (from=mm->physavail; from && from->next; from=from->next);
0311 if (!from) return NULL;
0312
0313 from->end -= PAGE_SIZE;
0314
0315 mm->freemaps = (map *) (from->end+1);
0316
0317 for(p=mm->freemaps; p<mm->freemaps+PAGE_SIZE/sizeof(map)-1; p++) {
0318 p->next = p+1;
0319 p->firstpte = MAP_FREE;
0320 }
0321 (p-1)->next=0;
0322
0323
0324
0325
0326
0327 p->firstpte = MAP_PERM_PHYS;
0328 p->base=(u_long) mm->freemaps;
0329 p->end = p->base+PAGE_SIZE-1;
0330
0331 insert_map(&mm->physperm, p);
0332
0333 if (from->end+1 == from->base)
0334 free_map(remove_map(&mm->physavail, from));
0335
0336 return mm->freemaps;
0337 }
0338
0339 static
0340 map * alloc_map(void) {
0341 map *p;
0342 struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
0343
0344 p = mm->freemaps;
0345 if (!p) {
0346 p=alloc_map_page();
0347 }
0348
0349 if(p) mm->freemaps=p->next;
0350
0351 return p;
0352 }
0353
0354 static
0355 void coalesce_maps(map *p) {
0356 while(p) {
0357 if (p->next && (p->end+1 == p->next->base)) {
0358 map *q=p->next;
0359 p->end=q->end;
0360 p->next=q->next;
0361 free_map(q);
0362 } else {
0363 p = p->next;
0364 }
0365 }
0366 }
0367
0368
0369
0370
0371
0372
0373
0374
0375 #define STACK_PAGES 2
0376 static inline u_long
0377 find_next_zone(RESIDUAL *res, u_long lowpage, u_long flags) {
0378 u_long i, newmin=0, size=0;
0379 for(i=0; i<res->ActualNumMemSegs; i++) {
0380 if (res->Segs[i].Usage & flags
0381 && res->Segs[i].BasePage<lowpage
0382 && res->Segs[i].BasePage>newmin) {
0383 newmin=res->Segs[i].BasePage;
0384 size=res->Segs[i].PageCount;
0385 }
0386 }
0387 return newmin+size;
0388 }
0389
0390 static inline u_long
0391 find_zone_start(RESIDUAL *res, u_long highpage, u_long flags) {
0392 u_long i;
0393 int progress;
0394 do {
0395 progress=0;
0396 for (i=0; i<res->ActualNumMemSegs; i++) {
0397 if ( (res->Segs[i].BasePage+res->Segs[i].PageCount
0398 == highpage)
0399 && res->Segs[i].Usage & flags) {
0400 highpage=res->Segs[i].BasePage;
0401 progress=1;
0402 }
0403 }
0404 } while(progress);
0405 return highpage;
0406 }
0407
0408
0409
0410
0411
0412 MEM_MAP seg_fix[] = {
0413 { 0x2000, 0xFFF00, 0x00100 },
0414 { 0x0020, 0x02000, 0x7E000 },
0415 { 0x0008, 0x00800, 0x00168 },
0416 { 0x0004, 0x00000, 0x00005 },
0417 { 0x0001, 0x006F1, 0x0010F },
0418 { 0x0002, 0x006AD, 0x00044 },
0419 { 0x0010, 0x00005, 0x006A8 },
0420 { 0x0010, 0x00968, 0x00698 },
0421 { 0x0800, 0xC0000, 0x3F000 },
0422 { 0x0600, 0xBF800, 0x00800 },
0423 { 0x0500, 0x81000, 0x3E800 },
0424 { 0x0480, 0x80800, 0x00800 },
0425 { 0x0440, 0x80000, 0x00800 } };
0426
0427
0428
0429
0430
0431 static void
0432 fix_residual( RESIDUAL *res )
0433 {
0434 #if 0
0435 PPC_DEVICE *hostbridge;
0436 #endif
0437 int i;
0438
0439
0440 res->ActualNumMemSegs = sizeof(seg_fix)/sizeof(MEM_MAP);
0441 for (i=0; i<res->ActualNumMemSegs; i++) {
0442 res->Segs[i].Usage = seg_fix[i].Usage;
0443 res->Segs[i].BasePage = seg_fix[i].BasePage;
0444 res->Segs[i].PageCount = seg_fix[i].PageCount;
0445 }
0446
0447
0448
0449 #if 0
0450
0451 res->VitalProductData.CacheLineSize = 0;
0452
0453 if ( res->VitalProductData.TimeBaseDivisor == 0 ) {
0454 res->VitalProductData.TimeBaseDivisor = 4000;
0455 }
0456
0457
0458
0459
0460
0461 hostbridge=residual_find_device(PCIDEVICE, NULL,
0462 BridgeController,
0463 PCIBridge, -1, 0);
0464 if (hostbridge) {
0465 hostbridge->DeviceId.BusId = PROCESSORDEVICE;
0466 hostbridge->DeviceId.Interface = PCIBridgeIndirect;
0467 }
0468 #endif
0469 }
0470
0471
0472
0473
0474
0475 int early_setup(u_long image_size) {
0476 register RESIDUAL *res = bd->residual;
0477 u_long minpages = PAGE_ALIGN(image_size)>>PAGE_SHIFT;
0478
0479 if ( residual_fw_is_qemu( res ) ) {
0480
0481
0482
0483 int len = bd->r7 - bd->r6;
0484 if ( len > 0 ) {
0485 if ( len > sizeof(bd->cmd_line) - 1 )
0486 len = sizeof(bd->cmd_line) - 1;
0487 codemove(bd->cmd_line, bd->r6, len, bd->cache_lsize);
0488 bd->cmd_line[len] = 0;
0489 }
0490 }
0491
0492
0493 if ( res && res->VitalProductData.FirmwareSupplier == 0x10000 )
0494 fix_residual( res );
0495
0496
0497 if( !bd->of_entry && res &&
0498 res->ResidualLength <= sizeof(RESIDUAL) && res->Version == 0 ) {
0499 u_long lowpage=ULONG_MAX, highpage;
0500 u_long imghigh=0, stkhigh=0;
0501
0502
0503
0504
0505
0506
0507
0508 while((highpage =
0509 find_next_zone(res, lowpage, BootImage|Free))) {
0510 lowpage=find_zone_start(res, highpage, BootImage|Free);
0511 if ((highpage-lowpage)>minpages &&
0512 highpage>imghigh) {
0513 imghigh=highpage;
0514 highpage -=minpages;
0515 }
0516 if ((highpage-lowpage)>STACK_PAGES &&
0517 highpage>stkhigh) {
0518 stkhigh=highpage;
0519 highpage-=STACK_PAGES;
0520 }
0521 }
0522
0523 bd->image = (void *)((imghigh-minpages)<<PAGE_SHIFT);
0524 bd->stack=(void *) (stkhigh<<PAGE_SHIFT);
0525
0526
0527
0528
0529
0530
0531 bd->mover=(void *) (lowpage<<PAGE_SHIFT);
0532
0533
0534
0535
0536
0537
0538 bd->cache_lsize = 32;
0539 }
0540
0541
0542
0543 return 0;
0544 }
0545
0546 void * valloc(u_long size) {
0547 map *p, *q;
0548 struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
0549
0550 if (size==0) return NULL;
0551 size=PAGE_ALIGN(size)-1;
0552 for (p=mm->virtavail; p; p=p->next) {
0553 if (p->base+size <= p->end) break;
0554 }
0555 if(!p) return NULL;
0556 q=alloc_map();
0557 q->base=p->base;
0558 q->end=q->base+size;
0559 q->firstpte=MAP_USED_VIRT;
0560 insert_map(&mm->virtused, q);
0561 if (q->end==p->end) free_map(remove_map(&mm->virtavail, p));
0562 else p->base += size+1;
0563 return (void *)q->base;
0564 }
0565
0566 static
0567 void vflush(map *virtmap) {
0568 struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
0569 u_long i, limit=(mm->hashmask>>3)+8;
0570 hash_entry volatile *p=(hash_entry *) mm->sdr1;
0571
0572
0573
0574
0575
0576
0577 for (i=0; i<limit; i++) {
0578 if (p[i].key<0) {
0579 u_long va;
0580 va = ((i<<9)^((p[i].key)<<5)) &0x3ff000;
0581 if (p[i].key&0x40) va^=0x3ff000;
0582 va |= ((p[i].key<<21)&0xf0000000)
0583 | ((p[i].key<<22)&0x0fc00000);
0584 if (va>=virtmap->base && va<=virtmap->end) {
0585 p[i].key=0;
0586 asm volatile("sync; tlbie %0, 0; sync" : :
0587 "r" (va));
0588 }
0589 }
0590 }
0591 }
0592
0593 void vfree(void *vaddr) {
0594 map *physmap, *virtmap;
0595 struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
0596
0597
0598 asm volatile("sync": : : "memory");
0599
0600 virtmap = remove_map_at(&mm->virtused, vaddr);
0601 if (!virtmap) return;
0602
0603
0604 for (physmap=mm->mappings; physmap; ) {
0605 map *nextmap=physmap->next;
0606 if (physmap->base>=virtmap->base
0607 && physmap->base<virtmap->end) {
0608 free_map(remove_map(&mm->mappings, physmap));
0609 }
0610 physmap=nextmap;
0611 }
0612
0613 vflush(virtmap);
0614
0615 virtmap->firstpte= MAP_FREE_VIRT;
0616 insert_map(&mm->virtavail, virtmap);
0617 coalesce_maps(mm->virtavail);
0618 }
0619
0620 void vunmap(void *vaddr) {
0621 map *physmap, *virtmap;
0622 struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
0623
0624
0625 asm volatile("sync": : : "memory");
0626
0627
0628
0629
0630 for (virtmap=mm->virtused; virtmap; virtmap=virtmap->next) {
0631 if (virtmap->base<=(u_long)vaddr &&
0632 virtmap->end>=(u_long)vaddr) break;
0633 }
0634 if (!virtmap) return;
0635
0636 physmap = remove_map_at(&mm->mappings, vaddr);
0637 if(!physmap) return;
0638 vflush(physmap);
0639 free_map(physmap);
0640 }
0641
0642 int vmap(void *vaddr, u_long p, u_long size) {
0643 map *q;
0644 struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
0645
0646 size=PAGE_ALIGN(size);
0647 if(!size) return 1;
0648
0649 for (q=mm->virtused; q; q=q->next) {
0650 if ((q->base <= (u_long)vaddr) &&
0651 (q->end>=(u_long)vaddr+size -1)) break;
0652 }
0653 if (!q) return 1;
0654 q= alloc_map();
0655 if (!q) return 1;
0656 q->base = (u_long)vaddr;
0657 q->end = (u_long)vaddr+size-1;
0658 q->firstpte = p;
0659 return insert_map(&mm->mappings, q);
0660 }
0661
0662 static
0663 void create_identity_mappings(int type, int attr) {
0664 u_long lowpage=ULONG_MAX, highpage;
0665 struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
0666 RESIDUAL * res=bd->residual;
0667
0668 while((highpage = find_next_zone(res, lowpage, type))) {
0669 map *p;
0670 lowpage=find_zone_start(res, highpage, type);
0671 p=alloc_map();
0672
0673 lowpage = lowpage ? lowpage : 1;
0674 p->base=lowpage<<PAGE_SHIFT;
0675 p->end=(highpage<<PAGE_SHIFT)-1;
0676 p->firstpte = (lowpage<<PAGE_SHIFT)|attr;
0677 insert_map(&mm->mappings, p);
0678 }
0679 }
0680
0681 static inline
0682 void add_free_map(u_long base, u_long end) {
0683 map *q=NULL;
0684 struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
0685
0686 if (base<end) q=alloc_map();
0687 if (!q) return;
0688 q->base=base;
0689 q->end=end-1;
0690 q->firstpte=MAP_FREE_VIRT;
0691 insert_map(&mm->virtavail, q);
0692 }
0693
0694 static inline
0695 void create_free_vm(void) {
0696 map *p;
0697 struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
0698
0699 u_long vaddr=PAGE_SIZE;
0700 for(p=mm->mappings; p; p=p->next) {
0701 add_free_map(vaddr, p->base);
0702 vaddr=p->end+1;
0703 }
0704
0705 if (vaddr) add_free_map(vaddr,0);
0706 }
0707
0708
0709
0710
0711
0712 static inline
0713 void add_perm_map(u_long start, u_long size) {
0714 struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
0715 map *p=alloc_map();
0716 p->base = start;
0717 p->end = start + size - 1;
0718 p->firstpte = MAP_PERM_PHYS;
0719 insert_map(& mm->physperm , p);
0720 }
0721
0722 void mm_init(u_long image_size)
0723 {
0724 u_long lowpage=ULONG_MAX, highpage;
0725 struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
0726 RESIDUAL * res=bd->residual;
0727 int i;
0728 map *p;
0729
0730
0731
0732
0733
0734 while((highpage = find_next_zone(res, lowpage, BootImage|Free))) {
0735 lowpage=find_zone_start(res, highpage, BootImage|Free);
0736 if ( ( ((u_long)bd->image+PAGE_ALIGN(image_size))>>PAGE_SHIFT)
0737 == highpage) {
0738 highpage=(u_long)(bd->image)>>PAGE_SHIFT;
0739 add_perm_map((u_long)bd->image, image_size);
0740 }
0741 if ( (( u_long)bd->stack>>PAGE_SHIFT) == highpage) {
0742 highpage -= STACK_PAGES;
0743 add_perm_map(highpage<<PAGE_SHIFT,
0744 STACK_PAGES*PAGE_SIZE);
0745 }
0746
0747 if (lowpage<2) lowpage=2;
0748
0749 if (highpage>lowpage) {
0750 p = alloc_map();
0751 p->base = lowpage<<PAGE_SHIFT;
0752 p->end = (highpage<<PAGE_SHIFT)-1;
0753 p->firstpte=MAP_FREE_PHYS;
0754 insert_map(&mm->physavail, p);
0755 }
0756 }
0757
0758
0759 mm->sdr1=__palloc(0x10000, PA_PERM|16);
0760 _write_SDR1((u_long)mm->sdr1);
0761 memset(mm->sdr1, 0, 0x10000);
0762 mm->hashmask = 0xffc0;
0763
0764
0765 for (i=0; i<16; i++) _write_SR(i, (void *)(i<<28));
0766
0767
0768
0769
0770
0771 create_identity_mappings(BootImage|Free|FirmwareCode|FirmwareHeap|
0772 FirmwareStack, PTE_RAM);
0773 create_identity_mappings(SystemROM, PTE_ROM);
0774 create_identity_mappings(IOMemory|SystemIO|SystemRegs|
0775 PCIAddr|PCIConfig|ISAAddr, PTE_IO);
0776
0777 create_free_vm();
0778
0779
0780 codemove((void *) 0x300, _handler_glue, 0x100, bd->cache_lsize);
0781 codemove((void *) 0x400, _handler_glue, 0x100, bd->cache_lsize);
0782 codemove((void *) 0x600, _handler_glue, 0x100, bd->cache_lsize);
0783 codemove((void *) 0x700, _handler_glue, 0x100, bd->cache_lsize);
0784 }
0785
0786 void * salloc(u_long size) {
0787 map *p, *q;
0788 struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
0789
0790 if (size==0) return NULL;
0791
0792 size = (size+7)&~7;
0793
0794 for (p=mm->sallocfree; p; p=p->next) {
0795 if (p->base+size <= p->end) break;
0796 }
0797 if(!p) {
0798 void *m;
0799 m = __palloc(size, PA_SUBALLOC);
0800 p = alloc_map();
0801 if (!m && !p) return NULL;
0802 p->base = (u_long) m;
0803 p->firstpte = MAP_FREE_SUBS;
0804 p->end = (u_long)m+PAGE_ALIGN(size)-1;
0805 insert_map(&mm->sallocfree, p);
0806 coalesce_maps(mm->sallocfree);
0807 coalesce_maps(mm->sallocphys);
0808 };
0809 q=alloc_map();
0810 q->base=p->base;
0811 q->end=q->base+size-1;
0812 q->firstpte=MAP_USED_SUBS;
0813 insert_map(&mm->sallocused, q);
0814 if (q->end==p->end) free_map(remove_map(&mm->sallocfree, p));
0815 else p->base += size;
0816 memset((void *)q->base, 0, size);
0817 return (void *)q->base;
0818 }
0819
0820 void sfree(void *p) {
0821 map *q;
0822 struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
0823
0824 q=remove_map_at(&mm->sallocused, p);
0825 if (!q) return;
0826 q->firstpte=MAP_FREE_SUBS;
0827 insert_map(&mm->sallocfree, q);
0828 coalesce_maps(mm->sallocfree);
0829 }
0830
0831
0832
0833
0834
0835
0836
0837
0838 void * __palloc(u_long size, int flags)
0839 {
0840 u_long mask = ((1<<(flags&PA_ALIGN_MASK))-1);
0841 map *newmap, *frommap, *p, *splitmap=0;
0842 map **queue;
0843 u_long qflags;
0844 struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
0845
0846
0847
0848
0849 if (size & mask) return NULL;
0850 size = PAGE_ALIGN(size);
0851 if(!size) return NULL;
0852
0853 if (flags&PA_SUBALLOC) {
0854 queue = &mm->sallocphys;
0855 qflags = MAP_SUBS_PHYS;
0856 } else if (flags&PA_PERM) {
0857 queue = &mm->physperm;
0858 qflags = MAP_PERM_PHYS;
0859 } else {
0860 queue = &mm->physused;
0861 qflags = MAP_USED_PHYS;
0862 }
0863
0864
0865
0866
0867
0868 if (mask&PAGE_MASK) {
0869 splitmap=alloc_map();
0870 if (!splitmap) return NULL;
0871 }
0872
0873 for (p=mm->physavail, frommap=NULL; p; p=p->next) {
0874 u_long high = p->end;
0875 u_long limit = ((p->base+mask)&~mask) + size-1;
0876 if (high>=limit && ((p->base+mask)&~mask)+size>p->base) {
0877 frommap = p;
0878 if (flags&PA_LOW) break;
0879 }
0880 }
0881
0882 if (!frommap) {
0883 if (splitmap) free_map(splitmap);
0884 return NULL;
0885 }
0886
0887 newmap=alloc_map();
0888
0889 if (flags&PA_LOW) {
0890 newmap->base = (frommap->base+mask)&~mask;
0891 } else {
0892 newmap->base = (frommap->end +1 - size) & ~mask;
0893 }
0894
0895 newmap->end = newmap->base+size-1;
0896 newmap->firstpte = qflags;
0897
0898
0899
0900 if (splitmap) {
0901 splitmap->base=newmap->base+size;
0902 splitmap->end=frommap->end;
0903 splitmap->firstpte= MAP_FREE_PHYS;
0904 frommap->end=newmap->base-1;
0905 } else if (flags & PA_LOW) {
0906 frommap->base=newmap->base+size;
0907 } else {
0908 frommap->end=newmap->base-1;
0909 }
0910
0911
0912 if (frommap->base == frommap->end+1) {
0913 free_map(remove_map(&mm->physavail, frommap));
0914 }
0915
0916 if (splitmap) {
0917 if (splitmap->base == splitmap->end+1) {
0918 free_map(remove_map(&mm->physavail, splitmap));
0919 } else {
0920 insert_map(&mm->physavail, splitmap);
0921 }
0922 }
0923
0924 insert_map(queue, newmap);
0925 return (void *) newmap->base;
0926
0927 }
0928
0929 void pfree(void * p) {
0930 map *q;
0931 struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
0932 q=remove_map_at(&mm->physused, p);
0933 if (!q) return;
0934 q->firstpte=MAP_FREE_PHYS;
0935 insert_map(&mm->physavail, q);
0936 coalesce_maps(mm->physavail);
0937 }
0938
0939 #ifdef DEBUG
0940
0941 void print_maps(map *chain, const char *s) {
0942 map *p;
0943 printk("%s",s);
0944 for(p=chain; p; p=p->next) {
0945 printk(" %08lx-%08lx: %08lx\n",
0946 p->base, p->end, p->firstpte);
0947 }
0948 }
0949
0950 void print_all_maps(const char * s) {
0951 u_long freemaps;
0952 struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
0953 map *free;
0954 printk("%s",s);
0955 print_maps(mm->mappings, " Currently defined mappings:\n");
0956 print_maps(mm->physavail, " Currently available physical areas:\n");
0957 print_maps(mm->physused, " Currently used physical areas:\n");
0958 print_maps(mm->virtavail, " Currently available virtual areas:\n");
0959 print_maps(mm->virtused, " Currently used virtual areas:\n");
0960 print_maps(mm->physperm, " Permanently used physical areas:\n");
0961 print_maps(mm->sallocphys, " Physical memory used for salloc:\n");
0962 print_maps(mm->sallocfree, " Memory available for salloc:\n");
0963 print_maps(mm->sallocused, " Memory allocated through salloc:\n");
0964 for (freemaps=0, free=mm->freemaps; free; freemaps++, free=free->next);
0965 printk(" %ld free maps.\n", freemaps);
0966 }
0967
0968 void print_hash_table(void) {
0969 struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
0970 hash_entry *p=(hash_entry *) mm->sdr1;
0971 u_int i, valid=0;
0972 for (i=0; i<((mm->hashmask)>>3)+8; i++) {
0973 if (p[i].key<0) valid++;
0974 }
0975 printk("%u valid hash entries on pass 1.\n", valid);
0976 valid = 0;
0977 for (i=0; i<((mm->hashmask)>>3)+8; i++) {
0978 if (p[i].key<0) valid++;
0979 }
0980 printk("%u valid hash entries on pass 2.\n"
0981 " vpn:rpn_attr, p/s, pteg.i\n", valid);
0982 for (i=0; i<((mm->hashmask)>>3)+8; i++) {
0983 if (p[i].key<0) {
0984 u_int pteg=(i>>3);
0985 u_long vpn;
0986 vpn = (pteg^((p[i].key)>>7)) &0x3ff;
0987 if (p[i].key&0x40) vpn^=0x3ff;
0988 vpn |= ((p[i].key<<9)&0xffff0000)
0989 | ((p[i].key<<10)&0xfc00);
0990 printk("%08lx:%08lx, %s, %5d.%d\n",
0991 vpn, p[i].rpn, p[i].key&0x40 ? "sec" : "pri",
0992 pteg, i%8);
0993 }
0994 }
0995 }
0996
0997 #endif