dect
/
linux-2.6
Archived
13
0
Fork 0

drm/ttm/radeon: add dma32 support.

This add support for using dma32 memory on gpus that really need it.

Currently IGPs are left without DMA32 but we might need to change
that unless we can fix rs690.

Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
Dave Airlie 2009-07-10 22:36:26 +10:00 committed by Dave Airlie
parent 61b576dbbe
commit ad49f50186
6 changed files with 36 additions and 8 deletions

View File

@ -624,6 +624,7 @@ struct radeon_device {
bool gpu_lockup;
bool shutdown;
bool suspend;
bool need_dma32;
};
int radeon_device_init(struct radeon_device *rdev,

View File

@ -450,6 +450,7 @@ int radeon_device_init(struct radeon_device *rdev,
uint32_t flags)
{
int r, ret;
int dma_bits;
DRM_INFO("radeon: Initializing kernel modesetting.\n");
rdev->shutdown = false;
@ -492,8 +493,20 @@ int radeon_device_init(struct radeon_device *rdev,
return r;
}
/* Report DMA addressing limitation */
r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
/* set DMA mask + need_dma32 flags.
* PCIE - can handle 40-bits.
* IGP - can handle 40-bits (in theory)
* AGP - generally dma32 is safest
* PCI - only dma32
*/
rdev->need_dma32 = false;
if (rdev->flags & RADEON_IS_AGP)
rdev->need_dma32 = true;
if (rdev->flags & RADEON_IS_PCI)
rdev->need_dma32 = true;
dma_bits = rdev->need_dma32 ? 32 : 40;
r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
if (r) {
printk(KERN_WARNING "radeon: No suitable DMA available.\n");
}

View File

@ -442,7 +442,8 @@ int radeon_ttm_init(struct radeon_device *rdev)
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&rdev->mman.bdev,
rdev->mman.mem_global_ref.object,
&radeon_bo_driver, DRM_FILE_PAGE_OFFSET);
&radeon_bo_driver, DRM_FILE_PAGE_OFFSET,
rdev->need_dma32);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r;

View File

@ -224,6 +224,9 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
TTM_ASSERT_LOCKED(&bo->mutex);
bo->ttm = NULL;
if (bdev->need_dma32)
page_flags |= TTM_PAGE_FLAG_DMA32;
switch (bo->type) {
case ttm_bo_type_device:
if (zero_alloc)
@ -1332,7 +1335,8 @@ EXPORT_SYMBOL(ttm_bo_device_release);
int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_mem_global *mem_glob,
struct ttm_bo_driver *driver, uint64_t file_page_offset)
struct ttm_bo_driver *driver, uint64_t file_page_offset,
bool need_dma32)
{
int ret = -EINVAL;
@ -1369,6 +1373,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
INIT_LIST_HEAD(&bdev->ddestroy);
INIT_LIST_HEAD(&bdev->swap_lru);
bdev->dev_mapping = NULL;
bdev->need_dma32 = need_dma32;
ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout);
ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
if (unlikely(ret != 0)) {

View File

@ -131,10 +131,15 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
static struct page *ttm_tt_alloc_page(unsigned page_flags)
{
if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
return alloc_page(GFP_HIGHUSER | __GFP_ZERO);
gfp_t gfp_flags = GFP_HIGHUSER;
return alloc_page(GFP_HIGHUSER);
if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
gfp_flags |= __GFP_ZERO;
if (page_flags & TTM_PAGE_FLAG_DMA32)
gfp_flags |= __GFP_DMA32;
return alloc_page(gfp_flags);
}
static void ttm_tt_free_user_pages(struct ttm_tt *ttm)

View File

@ -121,6 +121,7 @@ struct ttm_backend {
#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
#define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5)
#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
#define TTM_PAGE_FLAG_DMA32 (1 << 7)
enum ttm_caching_state {
tt_uncached,
@ -429,6 +430,8 @@ struct ttm_bo_device {
*/
struct delayed_work wq;
bool need_dma32;
};
/**
@ -648,7 +651,7 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_mem_global *mem_glob,
struct ttm_bo_driver *driver,
uint64_t file_page_offset);
uint64_t file_page_offset, bool need_dma32);
/**
* ttm_bo_reserve: