dect
/
linux-2.6
Archived
13
0
Fork 0

swiotlb: Do not export swiotlb_bounce since there are no external consumers

Currently swiotlb is the only consumer for swiotlb_bounce.  Since that is the
case it doesn't make much sense to be exporting it so make it a static
function only.

In addition we can save a few more lines of code by making it so that it
accepts the DMA address as a physical address instead of a virtual one.  This
is the last piece in essentially pushing all of the DMA address values to use
physical addresses in swiotlb.

In order to clarify things since we now have 2 physical addresses in use
inside of swiotlb_bounce I am renaming phys to orig_addr, and dma_addr to
tlb_addr.  This way is should be clear that orig_addr is contained within
io_orig_addr and tlb_addr is an address within the io_tlb_addr buffer.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
This commit is contained in:
Alexander Duyck 2012-10-15 10:19:55 -07:00 committed by Konrad Rzeszutek Wilk
parent fbfda893eb
commit af51a9f184
2 changed files with 16 additions and 22 deletions

View File

@ -53,9 +53,6 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev,
enum dma_sync_target target); enum dma_sync_target target);
/* Accessory functions. */ /* Accessory functions. */
extern void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
enum dma_data_direction dir);
extern void extern void
*swiotlb_alloc_coherent(struct device *hwdev, size_t size, *swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags); dma_addr_t *dma_handle, gfp_t flags);

View File

@ -355,14 +355,15 @@ static int is_swiotlb_buffer(phys_addr_t paddr)
/* /*
* Bounce: copy the swiotlb buffer back to the original dma location * Bounce: copy the swiotlb buffer back to the original dma location
*/ */
void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
{ {
unsigned long pfn = PFN_DOWN(phys); unsigned long pfn = PFN_DOWN(orig_addr);
unsigned char *vaddr = phys_to_virt(tlb_addr);
if (PageHighMem(pfn_to_page(pfn))) { if (PageHighMem(pfn_to_page(pfn))) {
/* The buffer does not have a mapping. Map it in and copy */ /* The buffer does not have a mapping. Map it in and copy */
unsigned int offset = phys & ~PAGE_MASK; unsigned int offset = orig_addr & ~PAGE_MASK;
char *buffer; char *buffer;
unsigned int sz = 0; unsigned int sz = 0;
unsigned long flags; unsigned long flags;
@ -373,25 +374,23 @@ void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
local_irq_save(flags); local_irq_save(flags);
buffer = kmap_atomic(pfn_to_page(pfn)); buffer = kmap_atomic(pfn_to_page(pfn));
if (dir == DMA_TO_DEVICE) if (dir == DMA_TO_DEVICE)
memcpy(dma_addr, buffer + offset, sz); memcpy(vaddr, buffer + offset, sz);
else else
memcpy(buffer + offset, dma_addr, sz); memcpy(buffer + offset, vaddr, sz);
kunmap_atomic(buffer); kunmap_atomic(buffer);
local_irq_restore(flags); local_irq_restore(flags);
size -= sz; size -= sz;
pfn++; pfn++;
dma_addr += sz; vaddr += sz;
offset = 0; offset = 0;
} }
} else if (dir == DMA_TO_DEVICE) {
memcpy(vaddr, phys_to_virt(orig_addr), size);
} else { } else {
if (dir == DMA_TO_DEVICE) memcpy(phys_to_virt(orig_addr), vaddr, size);
memcpy(dma_addr, phys_to_virt(phys), size);
else
memcpy(phys_to_virt(phys), dma_addr, size);
} }
} }
EXPORT_SYMBOL_GPL(swiotlb_bounce);
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
dma_addr_t tbl_dma_addr, dma_addr_t tbl_dma_addr,
@ -493,8 +492,7 @@ found:
for (i = 0; i < nslots; i++) for (i = 0; i < nslots; i++)
io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT); io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
swiotlb_bounce(orig_addr, phys_to_virt(tlb_addr), size, swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
DMA_TO_DEVICE);
return tlb_addr; return tlb_addr;
} }
@ -526,9 +524,8 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
/* /*
* First, sync the memory before unmapping the entry * First, sync the memory before unmapping the entry
*/ */
if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) if (orig_addr && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
swiotlb_bounce(orig_addr, phys_to_virt(tlb_addr), swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
size, DMA_FROM_DEVICE);
/* /*
* Return the buffer to the free list by setting the corresponding * Return the buffer to the free list by setting the corresponding
@ -569,14 +566,14 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
switch (target) { switch (target) {
case SYNC_FOR_CPU: case SYNC_FOR_CPU:
if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
swiotlb_bounce(orig_addr, phys_to_virt(tlb_addr), swiotlb_bounce(orig_addr, tlb_addr,
size, DMA_FROM_DEVICE); size, DMA_FROM_DEVICE);
else else
BUG_ON(dir != DMA_TO_DEVICE); BUG_ON(dir != DMA_TO_DEVICE);
break; break;
case SYNC_FOR_DEVICE: case SYNC_FOR_DEVICE:
if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
swiotlb_bounce(orig_addr, phys_to_virt(tlb_addr), swiotlb_bounce(orig_addr, tlb_addr,
size, DMA_TO_DEVICE); size, DMA_TO_DEVICE);
else else
BUG_ON(dir != DMA_FROM_DEVICE); BUG_ON(dir != DMA_FROM_DEVICE);