Lines 302-314
exclusion_bounce(bus_dma_tag_t dmat)
Link Here
|
302 |
} |
302 |
} |
303 |
|
303 |
|
304 |
/* |
304 |
/* |
305 |
* Return true if the given address does not fall on the alignment boundary. |
305 |
* Return true if the given physical address does not fall on the alignment |
|
|
306 |
* boundary. |
306 |
*/ |
307 |
*/ |
307 |
static __inline int |
308 |
static __inline int |
308 |
alignment_bounce(bus_dma_tag_t dmat, bus_addr_t addr) |
309 |
alignment_bounce(bus_dma_tag_t dmat, bus_addr_t paddr) |
309 |
{ |
310 |
{ |
310 |
|
311 |
|
311 |
return (addr & (dmat->alignment - 1)); |
312 |
return (paddr & (dmat->alignment - 1)); |
312 |
} |
313 |
} |
313 |
|
314 |
|
314 |
/* |
315 |
/* |
Lines 337-354
cacheline_bounce(bus_dmamap_t map, bus_addr_t addr, bus_size_t size)
Link Here
|
337 |
* This is used to quick-check whether we need to do the more expensive work of |
338 |
* This is used to quick-check whether we need to do the more expensive work of |
338 |
* checking the DMA page-by-page looking for alignment and exclusion bounces. |
339 |
* checking the DMA page-by-page looking for alignment and exclusion bounces. |
339 |
* |
340 |
* |
340 |
* Note that the addr argument might be either virtual or physical. It doesn't |
341 |
* Note that the paddr argument must be physical address because requested |
341 |
* matter because we only look at the low-order bits, which are the same in both |
342 |
* alignment can be bigger that page size. |
342 |
* address spaces. |
|
|
343 |
*/ |
343 |
*/ |
344 |
static __inline int |
344 |
static __inline int |
345 |
might_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t addr, |
345 |
might_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr, |
346 |
bus_size_t size) |
346 |
bus_size_t size) |
347 |
{ |
347 |
{ |
348 |
|
348 |
|
349 |
return ((dmat->flags & BUS_DMA_EXCL_BOUNCE) || |
349 |
return ((dmat->flags & BUS_DMA_EXCL_BOUNCE) || |
350 |
alignment_bounce(dmat, addr) || |
350 |
alignment_bounce(dmat, paddr) || |
351 |
cacheline_bounce(map, addr, size)); |
351 |
cacheline_bounce(map, paddr, size)); |
352 |
} |
352 |
} |
353 |
|
353 |
|
354 |
/* |
354 |
/* |
Lines 1028-1034
_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
Link Here
|
1028 |
counter_u64_add(maploads_total, 1); |
1028 |
counter_u64_add(maploads_total, 1); |
1029 |
counter_u64_add(maploads_physmem, 1); |
1029 |
counter_u64_add(maploads_physmem, 1); |
1030 |
|
1030 |
|
1031 |
if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) { |
1031 |
if (might_bounce(dmat, map, pmap_kextract((vm_offset_t)buf), buflen)) { |
1032 |
_bus_dmamap_count_phys(dmat, map, buf, buflen, flags); |
1032 |
_bus_dmamap_count_phys(dmat, map, buf, buflen, flags); |
1033 |
if (map->pagesneeded != 0) { |
1033 |
if (map->pagesneeded != 0) { |
1034 |
counter_u64_add(maploads_bounced, 1); |
1034 |
counter_u64_add(maploads_bounced, 1); |
Lines 1124-1130
_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
Link Here
|
1124 |
map->flags |= DMAMAP_MBUF; |
1124 |
map->flags |= DMAMAP_MBUF; |
1125 |
} |
1125 |
} |
1126 |
|
1126 |
|
1127 |
if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) { |
1127 |
if (might_bounce(dmat, map, pmap_kextract((vm_offset_t)buf), buflen)) { |
1128 |
_bus_dmamap_count_pages(dmat, pmap, map, buf, buflen, flags); |
1128 |
_bus_dmamap_count_pages(dmat, pmap, map, buf, buflen, flags); |
1129 |
if (map->pagesneeded != 0) { |
1129 |
if (map->pagesneeded != 0) { |
1130 |
counter_u64_add(maploads_bounced, 1); |
1130 |
counter_u64_add(maploads_bounced, 1); |