dect
/
linux-2.6
Archived
13
0
Fork 0

drm/nv50: implement global channel address space on new VM code

As of this commit, it's guaranteed that if an object is in VRAM that its
GPU virtual address will be constant.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
Ben Skeggs 2010-11-15 11:54:21 +10:00
parent f869ef8823
commit 4c13614298
13 changed files with 85 additions and 215 deletions

View File

@ -49,6 +49,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
DRM_ERROR("bo %p still attached to GEM object\n", bo);
nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
nouveau_vm_put(&nvbo->vma);
kfree(nvbo);
}
@ -113,6 +114,15 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
&align, &size);
align >>= PAGE_SHIFT;
if (!nvbo->no_vm && dev_priv->chan_vm) {
ret = nouveau_vm_get(dev_priv->chan_vm, size, 16,
NV_MEM_ACCESS_RW, &nvbo->vma);
if (ret) {
kfree(nvbo);
return ret;
}
}
nouveau_bo_placement_set(nvbo, flags, 0);
nvbo->channel = chan;
@ -125,6 +135,11 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
}
nvbo->channel = NULL;
if (nvbo->vma.node) {
if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
nvbo->bo.offset = nvbo->vma.offset;
}
*pnvbo = nvbo;
return 0;
}
@ -294,6 +309,11 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
if (ret)
return ret;
if (nvbo->vma.node) {
if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
nvbo->bo.offset = nvbo->vma.offset;
}
return 0;
}
@ -400,10 +420,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
if (dev_priv->card_type == NV_50)
man->gpu_offset = 0x40000000;
else
man->gpu_offset = 0;
man->gpu_offset = 0;
break;
case TTM_PL_TT:
man->func = &ttm_bo_manager_func;
@ -507,12 +524,12 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
dst_offset = new_mem->start << PAGE_SHIFT;
if (!nvbo->no_vm) {
if (old_mem->mem_type == TTM_PL_VRAM)
src_offset += dev_priv->vm_vram_base;
src_offset = nvbo->vma.offset;
else
src_offset += dev_priv->vm_gart_base;
if (new_mem->mem_type == TTM_PL_VRAM)
dst_offset += dev_priv->vm_vram_base;
dst_offset = nvbo->vma.offset;
else
dst_offset += dev_priv->vm_gart_base;
}
@ -756,7 +773,6 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
struct drm_device *dev = dev_priv->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
uint64_t offset;
int ret;
if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
/* Nothing to do. */
@ -766,15 +782,8 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
offset = new_mem->start << PAGE_SHIFT;
if (dev_priv->card_type == NV_50) {
ret = nv50_mem_vm_bind_linear(dev,
offset + dev_priv->vm_vram_base,
new_mem->size,
nouveau_bo_tile_layout(nvbo),
offset);
if (ret)
return ret;
if (dev_priv->chan_vm) {
nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
} else if (dev_priv->card_type >= NV_10) {
*new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
nvbo->tile_mode,

View File

@ -39,7 +39,7 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
if (dev_priv->card_type >= NV_50) {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
dev_priv->vm_end, NV_MEM_ACCESS_RO,
(1ULL << 40), NV_MEM_ACCESS_RO,
NV_MEM_TARGET_VM, &pushbuf);
chan->pushbuf_base = pb->bo.offset;
} else

View File

@ -339,13 +339,11 @@ nouveau_pci_resume(struct pci_dev *pdev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
u32 offset = nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT;
nv_crtc->cursor.set_offset(nv_crtc,
nv_crtc->cursor.nvbo->bo.offset -
dev_priv->vm_vram_base);
nv_crtc->cursor.set_offset(nv_crtc, offset);
nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
nv_crtc->cursor_saved_y);
nv_crtc->cursor_saved_y);
}
/* Force CLUT to get re-loaded during modeset */

View File

@ -65,10 +65,6 @@ struct nouveau_vram;
#define NOUVEAU_MAX_CHANNEL_NR 128
#define NOUVEAU_MAX_TILE_NR 15
#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL)
#define NV50_VM_BLOCK (512*1024*1024ULL)
#define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK)
struct nouveau_vram {
struct drm_device *dev;
@ -106,6 +102,7 @@ struct nouveau_bo {
struct nouveau_channel *channel;
struct nouveau_vma vma;
bool mappable;
bool no_vm;
@ -252,7 +249,6 @@ struct nouveau_channel {
struct nouveau_vm *vm;
struct nouveau_gpuobj *vm_pd;
struct nouveau_gpuobj *vm_gart_pt;
struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
/* Objects */
struct nouveau_gpuobj *ramin; /* Private instmem */
@ -712,13 +708,9 @@ struct drm_nouveau_private {
struct nouveau_vm *bar3_vm;
/* G8x/G9x virtual address space */
struct nouveau_vm *chan_vm;
uint64_t vm_gart_base;
uint64_t vm_gart_size;
uint64_t vm_vram_base;
uint64_t vm_vram_size;
uint64_t vm_end;
struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
int vm_vram_pt_nr;
struct nvbios vbios;
@ -836,11 +828,6 @@ extern struct nouveau_tile_reg *nv10_mem_set_tiling(
extern void nv10_mem_put_tile_region(struct drm_device *dev,
struct nouveau_tile_reg *tile,
struct nouveau_fence *fence);
extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt,
uint32_t size, uint32_t flags,
uint64_t phys);
extern void nv50_mem_vm_unbind(struct drm_device *, uint64_t virt,
uint32_t size);
extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
/* nouveau_notifier.c */

View File

@ -338,8 +338,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
FBINFO_HWACCEL_IMAGEBLIT;
info->flags |= FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &nouveau_fbcon_sw_ops;
info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
dev_priv->vm_vram_base;
info->fix.smem_start = dev->mode_config.fb_base +
(nvbo->bo.mem.start << PAGE_SHIFT);
info->fix.smem_len = size;
info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);

View File

@ -144,100 +144,6 @@ nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
return found;
}
/*
* NV50 VM helpers
*/
int
nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
uint32_t flags, uint64_t phys)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *pgt;
unsigned block;
int i;
virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
size = (size >> 16) << 1;
phys |= ((uint64_t)flags << 32);
phys |= 1;
if (dev_priv->vram_sys_base) {
phys += dev_priv->vram_sys_base;
phys |= 0x30;
}
while (size) {
unsigned offset_h = upper_32_bits(phys);
unsigned offset_l = lower_32_bits(phys);
unsigned pte, end;
for (i = 7; i >= 0; i--) {
block = 1 << (i + 1);
if (size >= block && !(virt & (block - 1)))
break;
}
offset_l |= (i << 7);
phys += block << 15;
size -= block;
while (block) {
pgt = dev_priv->vm_vram_pt[virt >> 14];
pte = virt & 0x3ffe;
end = pte + block;
if (end > 16384)
end = 16384;
block -= (end - pte);
virt += (end - pte);
while (pte < end) {
nv_wo32(pgt, (pte * 4) + 0, offset_l);
nv_wo32(pgt, (pte * 4) + 4, offset_h);
pte += 2;
}
}
}
dev_priv->engine.instmem.flush(dev);
dev_priv->engine.fifo.tlb_flush(dev);
dev_priv->engine.graph.tlb_flush(dev);
nv50_vm_flush_engine(dev, 6);
return 0;
}
void
nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *pgt;
unsigned pages, pte, end;
virt -= dev_priv->vm_vram_base;
pages = (size >> 16) << 1;
while (pages) {
pgt = dev_priv->vm_vram_pt[virt >> 29];
pte = (virt & 0x1ffe0000ULL) >> 15;
end = pte + pages;
if (end > 16384)
end = 16384;
pages -= (end - pte);
virt += (end - pte) << 15;
while (pte < end) {
nv_wo32(pgt, (pte * 4), 0);
pte++;
}
}
dev_priv->engine.instmem.flush(dev);
dev_priv->engine.fifo.tlb_flush(dev);
dev_priv->engine.graph.tlb_flush(dev);
nv50_vm_flush_engine(dev, 6);
}
/*
* Cleanup everything
*/

View File

@ -35,6 +35,7 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
#include "nouveau_ramht.h"
#include "nouveau_vm.h"
struct nouveau_gpuobj_method {
struct list_head head;
@ -770,9 +771,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
struct nouveau_gpuobj *vram = NULL, *tt = NULL;
int ret, i;
int ret;
NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
@ -783,16 +783,14 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
return ret;
}
/* NV50 VM
/* NV50/NVC0 VM
* - Allocate per-channel page-directory
* - Map GART and VRAM into the channel's address space at the
* locations determined during init.
* - Link with shared channel VM
*/
if (dev_priv->card_type >= NV_50) {
if (dev_priv->chan_vm) {
u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
u64 vm_vinst = chan->ramin->vinst + pgd_offs;
u32 vm_pinst = chan->ramin->pinst;
u32 pde;
if (vm_pinst != ~0)
vm_pinst += pgd_offs;
@ -801,29 +799,9 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
0, &chan->vm_pd);
if (ret)
return ret;
for (i = 0; i < 0x4000; i += 8) {
nv_wo32(chan->vm_pd, i + 0, 0x00000000);
nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
}
nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma,
&chan->vm_gart_pt);
pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8;
nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3);
nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8;
for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i],
&chan->vm_vram_pt[i]);
nv_wo32(chan->vm_pd, pde + 0,
chan->vm_vram_pt[i]->vinst | 0x61);
nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
pde += 8;
}
instmem->flush(dev);
nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd);
chan->vm->map_pgt(chan->vm_pd, 12, 1, dev_priv->gart_info.sg_ctxdma);
}
/* RAMHT */
@ -846,8 +824,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
/* VRAM ctxdma */
if (dev_priv->card_type >= NV_50) {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
0, dev_priv->vm_end,
NV_MEM_ACCESS_RW,
0, (1ULL << 40), NV_MEM_ACCESS_RW,
NV_MEM_TARGET_VM, &vram);
if (ret) {
NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
@ -874,8 +851,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
/* TT memory ctxdma */
if (dev_priv->card_type >= NV_50) {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
0, dev_priv->vm_end,
NV_MEM_ACCESS_RW,
0, (1ULL << 40), NV_MEM_ACCESS_RW,
NV_MEM_TARGET_VM, &tt);
} else {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
@ -902,9 +878,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
void
nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
{
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_device *dev = chan->dev;
int i;
NV_DEBUG(dev, "ch%d\n", chan->id);
@ -913,10 +887,9 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
nouveau_ramht_ref(NULL, &chan->ramht, chan);
nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt);
for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
if (chan->ramin_heap.free_stack.next)
drm_mm_takedown(&chan->ramin_heap);

View File

@ -345,7 +345,6 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t buffer_handle, uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nouveau_bo *cursor = NULL;
struct drm_gem_object *gem;
@ -374,8 +373,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
nouveau_bo_unmap(cursor);
nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset -
dev_priv->vm_vram_base);
nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT);
nv_crtc->cursor.show(nv_crtc, true);
out:
@ -548,7 +546,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
return -EINVAL;
}
nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base;
nv_crtc->fb.offset = fb->nvbo->bo.mem.start << PAGE_SHIFT;
nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) {

View File

@ -3,6 +3,7 @@
#include "nouveau_dma.h"
#include "nouveau_ramht.h"
#include "nouveau_fbcon.h"
#include "nouveau_mm.h"
int
nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
@ -134,10 +135,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo;
int ret, format;
uint64_t fb;
fb = info->fix.smem_start - dev_priv->fb_phys + dev_priv->vm_vram_base;
switch (info->var.bits_per_pixel) {
case 8:
@ -224,8 +223,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
OUT_RING(chan, upper_32_bits(fb));
OUT_RING(chan, lower_32_bits(fb));
OUT_RING(chan, upper_32_bits(nvbo->vma.offset));
OUT_RING(chan, lower_32_bits(nvbo->vma.offset));
BEGIN_RING(chan, NvSub2D, 0x0230, 2);
OUT_RING(chan, format);
OUT_RING(chan, 1);
@ -233,8 +232,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
OUT_RING(chan, upper_32_bits(fb));
OUT_RING(chan, lower_32_bits(fb));
OUT_RING(chan, upper_32_bits(nvbo->vma.offset));
OUT_RING(chan, lower_32_bits(nvbo->vma.offset));
return 0;
}

View File

@ -246,6 +246,7 @@ nv50_graph_create_context(struct nouveau_channel *chan)
nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12);
dev_priv->engine.instmem.flush(dev);
atomic_inc(&chan->vm->pgraph_refs);
return 0;
}
@ -277,6 +278,8 @@ nv50_graph_destroy_context(struct nouveau_channel *chan)
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
atomic_dec(&chan->vm->pgraph_refs);
}
static int

View File

@ -131,6 +131,7 @@ nv50_instmem_init(struct drm_device *dev)
struct nouveau_channel *chan;
struct nouveau_vm *vm;
int ret, i;
u64 nongart_o;
u32 tmp;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@ -215,37 +216,18 @@ nv50_instmem_init(struct drm_device *dev)
for (i = 0; i < 8; i++)
nv_wr32(dev, 0x1900 + (i*4), 0);
/* Determine VM layout */
dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK);
dev_priv->vm_gart_size = NV50_VM_BLOCK;
/* Create shared channel VM, space is reserved for GART mappings at
* the beginning of this address space, it's managed separately
* because TTM makes life painful
*/
dev_priv->vm_gart_base = 0x0020000000ULL;
dev_priv->vm_gart_size = 512 * 1024 * 1024;
nongart_o = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
dev_priv->vm_vram_size = dev_priv->vram_size;
if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM)
dev_priv->vm_vram_size = NV50_VM_MAX_VRAM;
dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK);
dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK;
dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size;
NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n",
dev_priv->vm_gart_base,
dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1);
NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n",
dev_priv->vm_vram_base,
dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1);
/* VRAM page table(s), mapped into VM at +1GiB */
for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
ret = nouveau_gpuobj_new(dev, NULL, NV50_VM_BLOCK / 0x10000 * 8,
0, NVOBJ_FLAG_ZERO_ALLOC,
&dev_priv->vm_vram_pt[i]);
if (ret) {
NV_ERROR(dev, "Error creating VRAM PGT: %d\n", ret);
dev_priv->vm_vram_pt_nr = i;
return ret;
}
}
ret = nouveau_vm_new(dev, 0, (1ULL << 40), nongart_o,
29, 12, 16, &dev_priv->chan_vm);
if (ret)
return ret;
return 0;
@ -269,9 +251,7 @@ nv50_instmem_takedown(struct drm_device *dev)
dev_priv->ramin_available = false;
for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
nouveau_gpuobj_ref(NULL, &dev_priv->vm_vram_pt[i]);
dev_priv->vm_vram_pt_nr = 0;
nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
for (i = 0x1700; i <= 0x1710; i += 4)
nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);

View File

@ -149,9 +149,24 @@ nv50_vm_flush(struct nouveau_vm *vm)
{
struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
pinstmem->flush(vm->dev);
nv50_vm_flush_engine(vm->dev, 6);
/* BAR */
if (vm != dev_priv->chan_vm) {
nv50_vm_flush_engine(vm->dev, 6);
return;
}
pfifo->tlb_flush(vm->dev);
if (atomic_read(&vm->pgraph_refs))
pgraph->tlb_flush(vm->dev);
if (atomic_read(&vm->pcrypt_refs))
pcrypt->tlb_flush(vm->dev);
}
void

View File

@ -53,6 +53,7 @@ nv84_crypt_create_context(struct nouveau_channel *chan)
nv_wo32(ramin, 0xb4, 0);
dev_priv->engine.instmem.flush(dev);
atomic_inc(&chan->vm->pcrypt_refs);
return 0;
}
@ -80,6 +81,7 @@ nv84_crypt_destroy_context(struct nouveau_channel *chan)
nv_wr32(dev, 0x10200c, 0x00000010);
nouveau_gpuobj_ref(NULL, &chan->crypt_ctx);
atomic_dec(&chan->vm->pcrypt_refs);
}
void