dect
/
linux-2.6
Archived
13
0
Fork 0

drm/i915: Preallocate the drm_mm_node prior to manipulating the GTT drm_mm manager

As we may reap neighbouring objects in order to free up pages for
allocations, we need to be careful not to allocate in the middle of the
drm_mm manager. To accomplish this, we can simply allocate the
drm_mm_node up front and then use the combined search & insert
drm_mm routines, reducing our code footprint in the process.

Fixes (partially) i-g-t/gem_tiled_swapping

Reported-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
[danvet: Again fixup atomic bikeshed.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
Chris Wilson 2012-12-07 20:37:07 +00:00 committed by Daniel Vetter
parent b81034506f
commit dc9dd7a20f
1 changed files with 26 additions and 38 deletions

View File

@ -2890,7 +2890,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_mm_node *free_space;
struct drm_mm_node *node;
u32 size, fence_size, fence_alignment, unfenced_alignment;
bool mappable, fenceable;
int ret;
@ -2936,66 +2936,54 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
i915_gem_object_pin_pages(obj);
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (node == NULL) {
i915_gem_object_unpin_pages(obj);
return -ENOMEM;
}
search_free:
if (map_and_fenceable)
free_space = drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
size, alignment, obj->cache_level,
0, dev_priv->mm.gtt_mappable_end,
false);
ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
size, alignment, obj->cache_level,
0, dev_priv->mm.gtt_mappable_end);
else
free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
size, alignment, obj->cache_level,
false);
if (free_space != NULL) {
if (map_and_fenceable)
free_space =
drm_mm_get_block_range_generic(free_space,
size, alignment, obj->cache_level,
0, dev_priv->mm.gtt_mappable_end,
false);
else
free_space =
drm_mm_get_block_generic(free_space,
size, alignment, obj->cache_level,
false);
}
if (free_space == NULL) {
ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
size, alignment, obj->cache_level);
if (ret) {
ret = i915_gem_evict_something(dev, size, alignment,
obj->cache_level,
map_and_fenceable,
nonblocking);
if (ret) {
i915_gem_object_unpin_pages(obj);
return ret;
}
if (ret == 0)
goto search_free;
goto search_free;
}
if (WARN_ON(!i915_gem_valid_gtt_space(dev,
free_space,
obj->cache_level))) {
i915_gem_object_unpin_pages(obj);
drm_mm_put_block(free_space);
kfree(node);
return ret;
}
if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
i915_gem_object_unpin_pages(obj);
drm_mm_put_block(node);
return -EINVAL;
}
ret = i915_gem_gtt_prepare_object(obj);
if (ret) {
i915_gem_object_unpin_pages(obj);
drm_mm_put_block(free_space);
drm_mm_put_block(node);
return ret;
}
list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
obj->gtt_space = free_space;
obj->gtt_offset = free_space->start;
obj->gtt_space = node;
obj->gtt_offset = node->start;
fenceable =
free_space->size == fence_size &&
(free_space->start & (fence_alignment - 1)) == 0;
node->size == fence_size &&
(node->start & (fence_alignment - 1)) == 0;
mappable =
obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;