dect
/
linux-2.6
Archived
13
0
Fork 0

PCI: iova RB tree setup tweak

The following patch merges two functions into one allowing for a 3%
reduction in overhead in locating, allocating and inserting pages for
use in IOMMU operations.

Its a bit of a eye-crosser so I welcome any RB-tree / MM experts to take
a look.  It works by re-using some of the information gathered in the
search for the pages to use in setting up the IOTLB's in the insertion
of the iova structure into the RB tree.

Signed-off-by: <mgross@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
mark gross 2008-03-04 15:22:04 -08:00 committed by Greg Kroah-Hartman
parent c9e9e0bfc5
commit ddf02886cb
1 changed files with 36 additions and 14 deletions

View File

@ -73,10 +73,11 @@ iova_get_pad_size(int size, unsigned int limit_pfn)
return pad_size;
}
static int __alloc_iova_range(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn, struct iova *new, bool size_aligned)
static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
unsigned long size, unsigned long limit_pfn,
struct iova *new, bool size_aligned)
{
struct rb_node *curr = NULL;
struct rb_node *prev, *curr = NULL;
unsigned long flags;
unsigned long saved_pfn;
unsigned int pad_size = 0;
@ -85,8 +86,10 @@ static int __alloc_iova_range(struct iova_domain *iovad, unsigned long size,
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
saved_pfn = limit_pfn;
curr = __get_cached_rbnode(iovad, &limit_pfn);
prev = curr;
while (curr) {
struct iova *curr_iova = container_of(curr, struct iova, node);
if (limit_pfn < curr_iova->pfn_lo)
goto move_left;
else if (limit_pfn < curr_iova->pfn_hi)
@ -100,6 +103,7 @@ static int __alloc_iova_range(struct iova_domain *iovad, unsigned long size,
adjust_limit_pfn:
limit_pfn = curr_iova->pfn_lo - 1;
move_left:
prev = curr;
curr = rb_prev(curr);
}
@ -116,7 +120,33 @@ move_left:
new->pfn_lo = limit_pfn - (size + pad_size) + 1;
new->pfn_hi = new->pfn_lo + size - 1;
/* Insert the new_iova into domain rbtree by holding writer lock */
/* Add new node and rebalance tree. */
{
struct rb_node **entry = &((prev)), *parent = NULL;
/* Figure out where to put new node */
while (*entry) {
struct iova *this = container_of(*entry,
struct iova, node);
parent = *entry;
if (new->pfn_lo < this->pfn_lo)
entry = &((*entry)->rb_left);
else if (new->pfn_lo > this->pfn_lo)
entry = &((*entry)->rb_right);
else
BUG(); /* this should not happen */
}
/* Add new node and rebalance tree. */
rb_link_node(&new->node, parent, entry);
rb_insert_color(&new->node, &iovad->rbroot);
}
__cached_rbnode_insert_update(iovad, saved_pfn, new);
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
return 0;
}
@ -172,23 +202,15 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
size = __roundup_pow_of_two(size);
spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
ret = __alloc_iova_range(iovad, size, limit_pfn, new_iova,
size_aligned);
ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
new_iova, size_aligned);
spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
if (ret) {
spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
free_iova_mem(new_iova);
return NULL;
}
/* Insert the new_iova into domain rbtree by holding writer lock */
spin_lock(&iovad->iova_rbtree_lock);
iova_insert_rbtree(&iovad->rbroot, new_iova);
__cached_rbnode_insert_update(iovad, limit_pfn, new_iova);
spin_unlock(&iovad->iova_rbtree_lock);
spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
return new_iova;
}