dect
/
linux-2.6
Archived
13
0
Fork 0

swiotlb: Make internal bookkeeping functions have 'swiotlb_tbl' prefix.

The functions that operate on io_tlb_list/io_tlb_start/io_tlb_orig_addr
have the prefix 'swiotlb_tbl' now.

See "swiotlb: swiotlb: add swiotlb_tbl_map_single library function" for
full description of patchset.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Acked-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Tested-by: Albert Herranz <albert_herranz@yahoo.es>
This commit is contained in:
Konrad Rzeszutek Wilk 2010-05-10 15:54:20 -04:00
parent abbceff7d7
commit bfc5501f6d
1 changed files with 13 additions and 11 deletions

View File

@ -61,8 +61,8 @@ enum dma_sync_target {
int swiotlb_force; int swiotlb_force;
/* /*
* Used to do a quick range check in unmap_single and * Used to do a quick range check in swiotlb_tbl_unmap_single and
* sync_single_*, to see if the memory was in fact allocated by this * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
* API. * API.
*/ */
static char *io_tlb_start, *io_tlb_end; static char *io_tlb_start, *io_tlb_end;
@ -492,7 +492,8 @@ map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
* dma_addr is the kernel virtual address of the bounce buffer to unmap. * dma_addr is the kernel virtual address of the bounce buffer to unmap.
*/ */
static void static void
do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size,
int dir)
{ {
unsigned long flags; unsigned long flags;
int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
@ -532,7 +533,7 @@ do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
} }
static void static void
sync_single(struct device *hwdev, char *dma_addr, size_t size, swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, size_t size,
int dir, int target) int dir, int target)
{ {
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
@ -580,8 +581,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
} }
if (!ret) { if (!ret) {
/* /*
* We are either out of memory or the device can't DMA * We are either out of memory or the device can't DMA to
* to GFP_DMA memory; fall back on map_single(), which * GFP_DMA memory; fall back on map_single(), which
* will grab memory from the lowest available address range. * will grab memory from the lowest available address range.
*/ */
ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
@ -599,7 +600,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
(unsigned long long)dev_addr); (unsigned long long)dev_addr);
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */ /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); swiotlb_tbl_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
return NULL; return NULL;
} }
*dma_handle = dev_addr; *dma_handle = dev_addr;
@ -617,8 +618,8 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
if (!is_swiotlb_buffer(paddr)) if (!is_swiotlb_buffer(paddr))
free_pages((unsigned long)vaddr, get_order(size)); free_pages((unsigned long)vaddr, get_order(size));
else else
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */ /* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */
do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); swiotlb_tbl_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
} }
EXPORT_SYMBOL(swiotlb_free_coherent); EXPORT_SYMBOL(swiotlb_free_coherent);
@ -708,7 +709,7 @@ static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
BUG_ON(dir == DMA_NONE); BUG_ON(dir == DMA_NONE);
if (is_swiotlb_buffer(paddr)) { if (is_swiotlb_buffer(paddr)) {
do_unmap_single(hwdev, phys_to_virt(paddr), size, dir); swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
return; return;
} }
@ -751,7 +752,8 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
BUG_ON(dir == DMA_NONE); BUG_ON(dir == DMA_NONE);
if (is_swiotlb_buffer(paddr)) { if (is_swiotlb_buffer(paddr)) {
sync_single(hwdev, phys_to_virt(paddr), size, dir, target); swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir,
target);
return; return;
} }