Index: /usr/src/sys/powerpc/aim/mmu_oea64.c =================================================================== --- /usr/src/sys/powerpc/aim/mmu_oea64.c (revision 347549) +++ /usr/src/sys/powerpc/aim/mmu_oea64.c (working copy) @@ -503,7 +503,7 @@ register_t msr; vm_offset_t off; vm_paddr_t pa_base; - int i, j; + int i, istep, j; bzero(translations, sz); OF_getencprop(OF_finddevice("/"), "#address-cells", &acells, @@ -513,7 +513,8 @@ CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations"); sz /= sizeof(cell_t); - for (i = 0, j = 0; i < sz; j++) { + istep = (acells == 2) ? 5 : 4; + for (i = 0, j = 0; i+istep-1 < sz; j++) { translations[j].om_va = trans_cells[i++]; translations[j].om_len = trans_cells[i++]; translations[j].om_pa = trans_cells[i++]; @@ -523,7 +524,7 @@ } translations[j].om_mode = trans_cells[i++]; } - KASSERT(i == sz, ("Translations map has incorrect cell count (%d/%zd)", + KASSERT(i+sz%istep == sz, ("Translations map has incorrect cell count (%d/%zd)", i, sz)); sz = j; @@ -956,7 +957,9 @@ virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; /* - * Map the entire KVA range into the SLB. We must not fault there. + * Try to map the entire KVA range into the SLB. We must not + * fault there. But later ones may "randomly" replace + * earlier ones when the slb can not span the range. */ #ifdef __powerpc64__ for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH)