dect
/
linux-2.6
Archived
13
0
Fork 0

drm/nouveau: port all engines to new engine module format

This is a HUGE commit, but it's not nearly as bad as it looks - any problems
can be isolated to a particular chipset and engine combination.  It was
simply too difficult to port each one at a time, the compat layers are
*already* ridiculous.

Most of the changes here are simply to the glue, the process for each of the
engine modules was to start with a standard skeleton and copy+paste the old
code into the appropriate places, fixing up variable names etc as needed.

v2: Marcin Slusarz <marcin.slusarz@gmail.com>
- fix find/replace bug in license header

v3: Ben Skeggs <bskeggs@redhat.com>
- bump indirect pushbuf size to 8KiB, 4KiB barely enough for userspace and
  left no space for kernel's requirements during GEM pushbuf submission.
- fix duplicate assignments noticed by clang

v4: Marcin Slusarz <marcin.slusarz@gmail.com>
- add sparse annotations to nv04_fifo_pause/nv04_fifo_start
- use ioread32_native/iowrite32_native for fifo control registers

v5: Ben Skeggs <bskeggs@redhat.com>
- rebase on v3.6-rc4, modified to keep copy engine fix intact
- nv10/fence: unmap fence bo before destroying
- fixed fermi regression when using nvidia gr fuc
- fixed typo in supported dma_mask checking

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
Ben Skeggs 2012-07-20 08:17:34 +10:00
parent ac1499d957
commit ebb945a94b
146 changed files with 14480 additions and 11360 deletions

View File

@ -4,9 +4,11 @@
ccflags-y := -Iinclude/drm -DCONFIG_NOUVEAU_DEBUG=7 -DCONFIG_NOUVEAU_DEBUG_DEFAULT=3
ccflags-y += -I$(src)/core/include
ccflags-y += -I$(src)/core
ccflags-y += -I$(src)
nouveau-y := core/core/client.o
nouveau-y += core/core/engctx.o
nouveau-y += core/core/engine.o
nouveau-y += core/core/enum.o
nouveau-y += core/core/gpuobj.o
@ -90,12 +92,20 @@ nouveau-y += core/subdev/vm/nv44.o
nouveau-y += core/subdev/vm/nv50.o
nouveau-y += core/subdev/vm/nvc0.o
nouveau-y += core/engine/dmaobj/base.o
nouveau-y += core/engine/dmaobj/nv04.o
nouveau-y += core/engine/dmaobj/nv50.o
nouveau-y += core/engine/dmaobj/nvc0.o
nouveau-y += core/engine/bsp/nv84.o
nouveau-y += core/engine/copy/nva3.o
nouveau-y += core/engine/copy/nvc0.o
nouveau-y += core/engine/crypt/nv84.o
nouveau-y += core/engine/crypt/nv98.o
nouveau-y += core/engine/disp/nv04.o
nouveau-y += core/engine/disp/nv50.o
nouveau-y += core/engine/disp/nvd0.o
nouveau-y += core/engine/disp/vga.o
nouveau-y += core/engine/fifo/base.o
nouveau-y += core/engine/fifo/nv04.o
nouveau-y += core/engine/fifo/nv10.o
nouveau-y += core/engine/fifo/nv17.o
@ -111,41 +121,82 @@ nouveau-y += core/engine/graph/ctxnve0.o
nouveau-y += core/engine/graph/nv04.o
nouveau-y += core/engine/graph/nv10.o
nouveau-y += core/engine/graph/nv20.o
nouveau-y += core/engine/graph/nv25.o
nouveau-y += core/engine/graph/nv2a.o
nouveau-y += core/engine/graph/nv30.o
nouveau-y += core/engine/graph/nv34.o
nouveau-y += core/engine/graph/nv35.o
nouveau-y += core/engine/graph/nv40.o
nouveau-y += core/engine/graph/nv50.o
nouveau-y += core/engine/graph/nvc0.o
nouveau-y += core/engine/graph/nve0.o
nouveau-y += core/engine/mpeg/nv31.o
nouveau-y += core/engine/mpeg/nv40.o
nouveau-y += core/engine/mpeg/nv50.o
nouveau-y += core/engine/mpeg/nv84.o
nouveau-y += core/engine/ppp/nv98.o
nouveau-y += core/engine/software/nv04.o
nouveau-y += core/engine/software/nv10.o
nouveau-y += core/engine/software/nv50.o
nouveau-y += core/engine/software/nvc0.o
nouveau-y += core/engine/vp/nv84.o
nouveau-y += nouveau_drm.o nouveau_compat.o \
nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nouveau_gpuobj.o nouveau_irq.o nouveau_notifier.o \
nouveau_sgdma.o nouveau_dma.o nouveau_util.o \
nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
nouveau_hw.o nouveau_calc.o \
nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
nouveau_hdmi.o nouveau_dp.o \
nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
nouveau_mxm.o nouveau_agp.o \
nouveau_abi16.o \
nouveau_bios.o \
nv04_fence.o nv10_fence.o nv50_fence.o nv84_fence.o nvc0_fence.o \
nv04_software.o nv50_software.o nvc0_software.o \
nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
nv04_crtc.o nv04_display.o nv04_cursor.o \
nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \
nv50_cursor.o nv50_display.o \
nvd0_display.o \
nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o \
nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o \
nouveau_prime.o
# drm/compat - will go away
nouveau-y += nouveau_compat.o nouveau_revcompat.o
nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
# drm/core
nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
nouveau-y += nouveau_agp.o
nouveau-y += nouveau_ttm.o nouveau_sgdma.o nouveau_bo.o nouveau_gem.o
nouveau-y += nouveau_abi16.o
nouveau-y += nv04_fence.o nv10_fence.o nv50_fence.o nv84_fence.o nvc0_fence.o
# drm/kms/common
nouveau-y += nouveau_fbcon.o
# drm/kms/nv04:nv50
nouveau-y += nv04_fbcon.o
# drm/kms/nv50:nvd9
nouveau-y += nv50_fbcon.o nvc0_fbcon.o
# drm/kms/nvd9-
##
## unported bits below
##
# drm/core
nouveau-y += nouveau_drv.o nouveau_state.o nouveau_irq.o
nouveau-y += nouveau_prime.o
# drm/kms/bios
nouveau-y += nouveau_mxm.o nouveau_bios.o
# drm/kms/common
nouveau-y += nouveau_display.o nouveau_connector.o
nouveau-y += nouveau_hdmi.o nouveau_dp.o
# drm/kms/nv04:nv50
nouveau-y += nouveau_hw.o nouveau_calc.o
nouveau-y += nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o
nouveau-y += nv04_crtc.o nv04_display.o nv04_cursor.o
# drm/kms/nv50-
nouveau-y += nv50_display.o nvd0_display.o
nouveau-y += nv50_crtc.o nv50_dac.o nv50_sor.o nv50_cursor.o
nouveau-y += nv50_evo.o
# drm/pm
nouveau-y += nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o
nouveau-y += nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o
nouveau-y += nouveau_mem.o
# optional stuff
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
nouveau-$(CONFIG_ACPI) += nouveau_acpi.o
obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o

View File

@ -1,5 +1,5 @@
/*
* Copyright 2010 Red Hat Inc.
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -18,289 +18,92 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "drmP.h"
#include "nouveau_drv.h"
#include <core/object.h>
#include <core/ramht.h>
#include <core/math.h>
#include <subdev/bar.h>
static u32
nouveau_ramht_hash_handle(struct nouveau_channel *chan, u32 handle)
nouveau_ramht_hash(struct nouveau_ramht *ramht, int chid, u32 handle)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_ramht *ramht = chan->ramht;
u32 hash = 0;
int i;
NV_DEBUG(dev, "ch%d handle=0x%08x\n", chan->id, handle);
for (i = 32; i > 0; i -= ramht->bits) {
while (handle) {
hash ^= (handle & ((1 << ramht->bits) - 1));
handle >>= ramht->bits;
}
if (dev_priv->card_type < NV_50)
hash ^= chan->id << (ramht->bits - 4);
hash <<= 3;
NV_DEBUG(dev, "hash=0x%08x\n", hash);
hash ^= chid << (ramht->bits - 4);
hash = hash << 3;
return hash;
}
static int
nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
u32 offset)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
u32 ctx = nv_ro32(ramht, offset + 4);
if (dev_priv->card_type < NV_40)
return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
return (ctx != 0);
}
static int
nouveau_ramht_entry_same_channel(struct nouveau_channel *chan,
struct nouveau_gpuobj *ramht, u32 offset)
{
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
u32 ctx = nv_ro32(ramht, offset + 4);
if (dev_priv->card_type >= NV_50)
return true;
else if (dev_priv->card_type >= NV_40)
return chan->id ==
((ctx >> NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
else
return chan->id ==
((ctx >> NV_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
}
int
nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
struct nouveau_gpuobj *gpuobj)
nouveau_ramht_insert(struct nouveau_ramht *ramht, int chid,
u32 handle, u32 context)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_ramht_entry *entry;
struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
unsigned long flags;
u32 ctx, co, ho;
struct nouveau_bar *bar = nouveau_bar(ramht);
u32 co, ho;
if (nouveau_ramht_find(chan, handle))
return -EEXIST;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
entry->channel = chan;
entry->gpuobj = NULL;
entry->handle = handle;
nouveau_gpuobj_ref(gpuobj, &entry->gpuobj);
if (dev_priv->card_type < NV_40) {
ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->addr >> 4) |
(chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
(gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
} else
if (dev_priv->card_type < NV_50) {
ctx = (gpuobj->addr >> 4) |
(chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
(gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
} else {
if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
ctx = (gpuobj->node->offset << 10) |
(chan->id << 28) |
chan->id; /* HASH_TAG */
} else {
ctx = (gpuobj->node->offset >> 4) |
((gpuobj->engine <<
NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
}
}
spin_lock_irqsave(&chan->ramht->lock, flags);
list_add(&entry->head, &chan->ramht->entries);
co = ho = nouveau_ramht_hash_handle(chan, handle);
co = ho = nouveau_ramht_hash(ramht, chid, handle);
do {
if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
NV_DEBUG(dev,
"insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
chan->id, co, handle, ctx);
if (!nv_ro32(ramht, co + 4)) {
nv_wo32(ramht, co + 0, handle);
nv_wo32(ramht, co + 4, ctx);
spin_unlock_irqrestore(&chan->ramht->lock, flags);
nvimem_flush(dev);
return 0;
nv_wo32(ramht, co + 4, context);
if (bar)
bar->flush(bar);
return co;
}
NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
chan->id, co, nv_ro32(ramht, co));
co += 8;
if (co >= ramht->size)
if (co >= nv_gpuobj(ramht)->size)
co = 0;
} while (co != ho);
NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
list_del(&entry->head);
spin_unlock_irqrestore(&chan->ramht->lock, flags);
kfree(entry);
return -ENOMEM;
}
static struct nouveau_ramht_entry *
nouveau_ramht_remove_entry(struct nouveau_channel *chan, u32 handle)
{
struct nouveau_ramht *ramht = chan ? chan->ramht : NULL;
struct nouveau_ramht_entry *entry;
unsigned long flags;
if (!ramht)
return NULL;
spin_lock_irqsave(&ramht->lock, flags);
list_for_each_entry(entry, &ramht->entries, head) {
if (entry->channel == chan &&
(!handle || entry->handle == handle)) {
list_del(&entry->head);
spin_unlock_irqrestore(&ramht->lock, flags);
return entry;
}
}
spin_unlock_irqrestore(&ramht->lock, flags);
return NULL;
}
static void
nouveau_ramht_remove_hash(struct nouveau_channel *chan, u32 handle)
{
struct drm_device *dev = chan->dev;
struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
unsigned long flags;
u32 co, ho;
spin_lock_irqsave(&chan->ramht->lock, flags);
co = ho = nouveau_ramht_hash_handle(chan, handle);
do {
if (nouveau_ramht_entry_valid(dev, ramht, co) &&
nouveau_ramht_entry_same_channel(chan, ramht, co) &&
(handle == nv_ro32(ramht, co))) {
NV_DEBUG(dev,
"remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
chan->id, co, handle, nv_ro32(ramht, co + 4));
nv_wo32(ramht, co + 0, 0x00000000);
nv_wo32(ramht, co + 4, 0x00000000);
nvimem_flush(dev);
goto out;
}
co += 8;
if (co >= ramht->size)
co = 0;
} while (co != ho);
NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
chan->id, handle);
out:
spin_unlock_irqrestore(&chan->ramht->lock, flags);
}
int
nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle)
{
struct nouveau_ramht_entry *entry;
entry = nouveau_ramht_remove_entry(chan, handle);
if (!entry)
return -ENOENT;
nouveau_ramht_remove_hash(chan, entry->handle);
nouveau_gpuobj_ref(NULL, &entry->gpuobj);
kfree(entry);
return 0;
}
struct nouveau_gpuobj *
nouveau_ramht_find(struct nouveau_channel *chan, u32 handle)
{
struct nouveau_ramht *ramht = chan->ramht;
struct nouveau_ramht_entry *entry;
struct nouveau_gpuobj *gpuobj = NULL;
unsigned long flags;
if (unlikely(!chan->ramht))
return NULL;
spin_lock_irqsave(&ramht->lock, flags);
list_for_each_entry(entry, &chan->ramht->entries, head) {
if (entry->channel == chan && entry->handle == handle) {
gpuobj = entry->gpuobj;
break;
}
}
spin_unlock_irqrestore(&ramht->lock, flags);
return gpuobj;
}
int
nouveau_ramht_new(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
struct nouveau_ramht **pramht)
{
struct nouveau_ramht *ramht;
ramht = kzalloc(sizeof(*ramht), GFP_KERNEL);
if (!ramht)
return -ENOMEM;
ramht->dev = dev;
kref_init(&ramht->refcount);
ramht->bits = drm_order(gpuobj->size / 8);
INIT_LIST_HEAD(&ramht->entries);
spin_lock_init(&ramht->lock);
nouveau_gpuobj_ref(gpuobj, &ramht->gpuobj);
*pramht = ramht;
return 0;
}
static void
nouveau_ramht_del(struct kref *ref)
{
struct nouveau_ramht *ramht =
container_of(ref, struct nouveau_ramht, refcount);
nouveau_gpuobj_ref(NULL, &ramht->gpuobj);
kfree(ramht);
}
void
nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr,
struct nouveau_channel *chan)
nouveau_ramht_remove(struct nouveau_ramht *ramht, int cookie)
{
struct nouveau_ramht_entry *entry;
struct nouveau_ramht *ramht;
if (ref)
kref_get(&ref->refcount);
ramht = *ptr;
if (ramht) {
while ((entry = nouveau_ramht_remove_entry(chan, 0))) {
nouveau_ramht_remove_hash(chan, entry->handle);
nouveau_gpuobj_ref(NULL, &entry->gpuobj);
kfree(entry);
}
kref_put(&ramht->refcount, nouveau_ramht_del);
}
*ptr = ref;
struct nouveau_bar *bar = nouveau_bar(ramht);
nv_wo32(ramht, cookie + 0, 0x00000000);
nv_wo32(ramht, cookie + 4, 0x00000000);
if (bar)
bar->flush(bar);
}
static struct nouveau_oclass
nouveau_ramht_oclass = {
.handle = 0x0000abcd,
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = NULL,
.dtor = _nouveau_gpuobj_dtor,
.init = _nouveau_gpuobj_init,
.fini = _nouveau_gpuobj_fini,
.rd32 = _nouveau_gpuobj_rd32,
.wr32 = _nouveau_gpuobj_wr32,
},
};
int
nouveau_ramht_new(struct nouveau_object *parent, struct nouveau_object *pargpu,
u32 size, u32 align, struct nouveau_ramht **pramht)
{
struct nouveau_ramht *ramht;
int ret;
ret = nouveau_gpuobj_create(parent, parent->engine ?
parent->engine : parent, /* <nv50 ramht */
&nouveau_ramht_oclass, 0, pargpu, size,
align, NVOBJ_FLAG_ZERO_ALLOC, &ramht);
*pramht = ramht;
if (ret)
return ret;
ramht->bits = log2i(nv_gpuobj(ramht)->size >> 3);
return 0;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 2011 Red Hat Inc.
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -22,61 +22,154 @@
* Authors: Ben Skeggs
*/
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_util.h"
#include <core/ramht.h>
#include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
* more than just an enable/disable stub this needs to be split out to
* nv98_bsp.c...
*/
#include <engine/bsp.h>
struct nv84_bsp_engine {
struct nouveau_exec_engine base;
struct nv84_bsp_priv {
struct nouveau_bsp base;
};
static int
nv84_bsp_fini(struct drm_device *dev, int engine, bool suspend)
{
if (!(nv_rd32(dev, 0x000200) & 0x00008000))
return 0;
struct nv84_bsp_chan {
struct nouveau_bsp_chan base;
};
nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
return 0;
}
/*******************************************************************************
* BSP object classes
******************************************************************************/
static struct nouveau_oclass
nv84_bsp_sclass[] = {
{},
};
/*******************************************************************************
* BSP context
******************************************************************************/
static int
nv84_bsp_init(struct drm_device *dev, int engine)
nv84_bsp_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
nv_mask(dev, 0x000200, 0x00008000, 0x00008000);
struct nv84_bsp_chan *priv;
int ret;
ret = nouveau_bsp_context_create(parent, engine, oclass, NULL,
0, 0, 0, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
return 0;
}
static void
nv84_bsp_destroy(struct drm_device *dev, int engine)
nv84_bsp_context_dtor(struct nouveau_object *object)
{
struct nv84_bsp_engine *pbsp = nv_engine(dev, engine);
NVOBJ_ENGINE_DEL(dev, BSP);
kfree(pbsp);
struct nv84_bsp_chan *priv = (void *)object;
nouveau_bsp_context_destroy(&priv->base);
}
int
nv84_bsp_create(struct drm_device *dev)
static int
nv84_bsp_context_init(struct nouveau_object *object)
{
struct nv84_bsp_engine *pbsp;
struct nv84_bsp_chan *priv = (void *)object;
int ret;
pbsp = kzalloc(sizeof(*pbsp), GFP_KERNEL);
if (!pbsp)
return -ENOMEM;
ret = nouveau_bsp_context_init(&priv->base);
if (ret)
return ret;
pbsp->base.destroy = nv84_bsp_destroy;
pbsp->base.init = nv84_bsp_init;
pbsp->base.fini = nv84_bsp_fini;
NVOBJ_ENGINE_ADD(dev, BSP, &pbsp->base);
return 0;
}
static int
nv84_bsp_context_fini(struct nouveau_object *object, bool suspend)
{
struct nv84_bsp_chan *priv = (void *)object;
return nouveau_bsp_context_fini(&priv->base, suspend);
}
static struct nouveau_oclass
nv84_bsp_cclass = {
.handle = NV_ENGCTX(BSP, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv84_bsp_context_ctor,
.dtor = nv84_bsp_context_dtor,
.init = nv84_bsp_context_init,
.fini = nv84_bsp_context_fini,
.rd32 = _nouveau_bsp_context_rd32,
.wr32 = _nouveau_bsp_context_wr32,
},
};
/*******************************************************************************
* BSP engine/subdev functions
******************************************************************************/
static void
nv84_bsp_intr(struct nouveau_subdev *subdev)
{
}
static int
nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv84_bsp_priv *priv;
int ret;
ret = nouveau_bsp_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x04008000;
nv_subdev(priv)->intr = nv84_bsp_intr;
nv_engine(priv)->cclass = &nv84_bsp_cclass;
nv_engine(priv)->sclass = nv84_bsp_sclass;
return 0;
}
static void
nv84_bsp_dtor(struct nouveau_object *object)
{
struct nv84_bsp_priv *priv = (void *)object;
nouveau_bsp_destroy(&priv->base);
}
static int
nv84_bsp_init(struct nouveau_object *object)
{
struct nv84_bsp_priv *priv = (void *)object;
int ret;
ret = nouveau_bsp_init(&priv->base);
if (ret)
return ret;
return 0;
}
static int
nv84_bsp_fini(struct nouveau_object *object, bool suspend)
{
struct nv84_bsp_priv *priv = (void *)object;
return nouveau_bsp_fini(&priv->base, suspend);
}
struct nouveau_oclass
nv84_bsp_oclass = {
.handle = NV_ENGINE(BSP, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv84_bsp_ctor,
.dtor = nv84_bsp_dtor,
.init = nv84_bsp_init,
.fini = nv84_bsp_fini,
},
};

View File

@ -1,5 +1,5 @@
/*
* Copyright 2011 Red Hat Inc.
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -22,112 +22,75 @@
* Authors: Ben Skeggs
*/
#include <linux/firmware.h>
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_util.h"
#include <core/ramht.h>
#include <core/os.h>
#include <core/enum.h>
#include <core/class.h>
#include <core/engctx.h>
#include <subdev/fb.h>
#include <subdev/vm.h>
#include <engine/copy.h>
#include "fuc/nva3.fuc.h"
struct nva3_copy_engine {
struct nouveau_exec_engine base;
struct nva3_copy_priv {
struct nouveau_copy base;
};
struct nva3_copy_chan {
struct nouveau_copy_chan base;
};
/*******************************************************************************
* Copy object classes
******************************************************************************/
static struct nouveau_oclass
nva3_copy_sclass[] = {
{ 0x85b5, &nouveau_object_ofuncs },
{}
};
/*******************************************************************************
* PCOPY context
******************************************************************************/
static int
nva3_copy_context_new(struct nouveau_channel *chan, int engine)
nva3_copy_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct drm_device *dev = chan->dev;
struct nouveau_gpuobj *ramin = chan->ramin;
struct nouveau_gpuobj *ctx = NULL;
struct nva3_copy_chan *priv;
int ret;
NV_DEBUG(dev, "ch%d\n", chan->id);
ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, &ctx);
ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256, 0,
NVOBJ_FLAG_ZERO_ALLOC, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_wo32(ramin, 0xc0, 0x00190000);
nv_wo32(ramin, 0xc4, ctx->addr + ctx->size - 1);
nv_wo32(ramin, 0xc8, ctx->addr);
nv_wo32(ramin, 0xcc, 0x00000000);
nv_wo32(ramin, 0xd0, 0x00000000);
nv_wo32(ramin, 0xd4, 0x00000000);
nvimem_flush(dev);
nvvm_engref(chan->vm, engine, 1);
chan->engctx[engine] = ctx;
return 0;
}
static int
nva3_copy_object_new(struct nouveau_channel *chan, int engine,
u32 handle, u16 class)
{
struct nouveau_gpuobj *ctx = chan->engctx[engine];
static struct nouveau_oclass
nva3_copy_cclass = {
.handle = NV_ENGCTX(COPY0, 0xa3),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nva3_copy_context_ctor,
.dtor = _nouveau_copy_context_dtor,
.init = _nouveau_copy_context_init,
.fini = _nouveau_copy_context_fini,
.rd32 = _nouveau_copy_context_rd32,
.wr32 = _nouveau_copy_context_wr32,
/* fuc engine doesn't need an object, our ramht code does.. */
ctx->engine = 3;
ctx->class = class;
return nouveau_ramht_insert(chan, handle, ctx);
}
},
};
static void
nva3_copy_context_del(struct nouveau_channel *chan, int engine)
{
struct nouveau_gpuobj *ctx = chan->engctx[engine];
int i;
for (i = 0xc0; i <= 0xd4; i += 4)
nv_wo32(chan->ramin, i, 0x00000000);
nvvm_engref(chan->vm, engine, -1);
nouveau_gpuobj_ref(NULL, &ctx);
chan->engctx[engine] = ctx;
}
static void
nva3_copy_tlb_flush(struct drm_device *dev, int engine)
{
nv50_vm_flush_engine(dev, 0x0d);
}
static int
nva3_copy_init(struct drm_device *dev, int engine)
{
int i;
nv_mask(dev, 0x000200, 0x00002000, 0x00000000);
nv_mask(dev, 0x000200, 0x00002000, 0x00002000);
nv_wr32(dev, 0x104014, 0xffffffff); /* disable all interrupts */
/* upload ucode */
nv_wr32(dev, 0x1041c0, 0x01000000);
for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++)
nv_wr32(dev, 0x1041c4, nva3_pcopy_data[i]);
nv_wr32(dev, 0x104180, 0x01000000);
for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) {
if ((i & 0x3f) == 0)
nv_wr32(dev, 0x104188, i >> 6);
nv_wr32(dev, 0x104184, nva3_pcopy_code[i]);
}
/* start it running */
nv_wr32(dev, 0x10410c, 0x00000000);
nv_wr32(dev, 0x104104, 0x00000000); /* ENTRY */
nv_wr32(dev, 0x104100, 0x00000002); /* TRIGGER */
return 0;
}
static int
nva3_copy_fini(struct drm_device *dev, int engine, bool suspend)
{
nv_mask(dev, 0x104048, 0x00000003, 0x00000000);
nv_wr32(dev, 0x104014, 0xffffffff);
return 0;
}
/*******************************************************************************
* PCOPY engine/subdev functions
******************************************************************************/
static struct nouveau_enum nva3_copy_isr_error_name[] = {
{ 0x0001, "ILLEGAL_MTHD" },
@ -137,65 +100,114 @@ static struct nouveau_enum nva3_copy_isr_error_name[] = {
};
static void
nva3_copy_isr(struct drm_device *dev)
nva3_copy_intr(struct nouveau_subdev *subdev)
{
u32 dispatch = nv_rd32(dev, 0x10401c);
u32 stat = nv_rd32(dev, 0x104008) & dispatch & ~(dispatch >> 16);
u32 inst = nv_rd32(dev, 0x104050) & 0x3fffffff;
u32 ssta = nv_rd32(dev, 0x104040) & 0x0000ffff;
u32 addr = nv_rd32(dev, 0x104040) >> 16;
struct nva3_copy_priv *priv = (void *)subdev;
u32 dispatch = nv_rd32(priv, 0x10401c);
u32 stat = nv_rd32(priv, 0x104008) & dispatch & ~(dispatch >> 16);
u32 inst = nv_rd32(priv, 0x104050) & 0x3fffffff;
u32 ssta = nv_rd32(priv, 0x104040) & 0x0000ffff;
u32 addr = nv_rd32(priv, 0x104040) >> 16;
u32 mthd = (addr & 0x07ff) << 2;
u32 subc = (addr & 0x3800) >> 11;
u32 data = nv_rd32(dev, 0x104044);
int chid = nv50_graph_isr_chid(dev, inst);
u32 data = nv_rd32(priv, 0x104044);
if (stat & 0x00000040) {
NV_INFO(dev, "PCOPY: DISPATCH_ERROR [");
nv_error(priv, "DISPATCH_ERROR [");
nouveau_enum_print(nva3_copy_isr_error_name, ssta);
printk("] ch %d [0x%08x] subc %d mthd 0x%04x data 0x%08x\n",
chid, inst, subc, mthd, data);
nv_wr32(dev, 0x104004, 0x00000040);
printk("] ch 0x%08x subc %d mthd 0x%04x data 0x%08x\n",
inst, subc, mthd, data);
nv_wr32(priv, 0x104004, 0x00000040);
stat &= ~0x00000040;
}
if (stat) {
NV_INFO(dev, "PCOPY: unhandled intr 0x%08x\n", stat);
nv_wr32(dev, 0x104004, stat);
nv_error(priv, "unhandled intr 0x%08x\n", stat);
nv_wr32(priv, 0x104004, stat);
}
nv50_fb_vm_trap(dev, 1);
nv50_fb_trap(nouveau_fb(priv), 1);
}
static void
nva3_copy_destroy(struct drm_device *dev, int engine)
static int
nva3_copy_tlb_flush(struct nouveau_engine *engine)
{
struct nva3_copy_engine *pcopy = nv_engine(dev, engine);
nouveau_irq_unregister(dev, 22);
NVOBJ_ENGINE_DEL(dev, COPY0);
kfree(pcopy);
}
int
nva3_copy_create(struct drm_device *dev)
{
struct nva3_copy_engine *pcopy;
pcopy = kzalloc(sizeof(*pcopy), GFP_KERNEL);
if (!pcopy)
return -ENOMEM;
pcopy->base.destroy = nva3_copy_destroy;
pcopy->base.init = nva3_copy_init;
pcopy->base.fini = nva3_copy_fini;
pcopy->base.context_new = nva3_copy_context_new;
pcopy->base.context_del = nva3_copy_context_del;
pcopy->base.object_new = nva3_copy_object_new;
pcopy->base.tlb_flush = nva3_copy_tlb_flush;
nouveau_irq_register(dev, 22, nva3_copy_isr);
NVOBJ_ENGINE_ADD(dev, COPY0, &pcopy->base);
NVOBJ_CLASS(dev, 0x85b5, COPY0);
nv50_vm_flush_engine(&engine->base, 0x0d);
return 0;
}
static int
nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
bool enable = (nv_device(parent)->chipset != 0xaf);
struct nva3_copy_priv *priv;
int ret;
ret = nouveau_copy_create(parent, engine, oclass, enable, 0, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00802000;
nv_subdev(priv)->intr = nva3_copy_intr;
nv_engine(priv)->cclass = &nva3_copy_cclass;
nv_engine(priv)->sclass = nva3_copy_sclass;
nv_engine(priv)->tlb_flush = nva3_copy_tlb_flush;
return 0;
}
static int
nva3_copy_init(struct nouveau_object *object)
{
struct nva3_copy_priv *priv = (void *)object;
int ret, i;
ret = nouveau_copy_init(&priv->base);
if (ret)
return ret;
/* disable all interrupts */
nv_wr32(priv, 0x104014, 0xffffffff);
/* upload ucode */
nv_wr32(priv, 0x1041c0, 0x01000000);
for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++)
nv_wr32(priv, 0x1041c4, nva3_pcopy_data[i]);
nv_wr32(priv, 0x104180, 0x01000000);
for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) {
if ((i & 0x3f) == 0)
nv_wr32(priv, 0x104188, i >> 6);
nv_wr32(priv, 0x104184, nva3_pcopy_code[i]);
}
/* start it running */
nv_wr32(priv, 0x10410c, 0x00000000);
nv_wr32(priv, 0x104104, 0x00000000); /* ENTRY */
nv_wr32(priv, 0x104100, 0x00000002); /* TRIGGER */
return 0;
}
static int
nva3_copy_fini(struct nouveau_object *object, bool suspend)
{
struct nva3_copy_priv *priv = (void *)object;
nv_mask(priv, 0x104048, 0x00000003, 0x00000000);
nv_wr32(priv, 0x104014, 0xffffffff);
return nouveau_copy_fini(&priv->base, suspend);
}
struct nouveau_oclass
nva3_copy_oclass = {
.handle = NV_ENGINE(COPY0, 0xa3),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nva3_copy_ctor,
.dtor = _nouveau_copy_dtor,
.init = nva3_copy_init,
.fini = nva3_copy_fini,
},
};

View File

@ -1,5 +1,5 @@
/*
* Copyright 2011 Red Hat Inc.
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -22,138 +22,86 @@
* Authors: Ben Skeggs
*/
#include <linux/firmware.h>
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_util.h"
#include <core/ramht.h>
#include <core/os.h>
#include <core/enum.h>
#include <core/class.h>
#include <core/engctx.h>
#include <engine/copy.h>
#include "fuc/nvc0.fuc.h"
struct nvc0_copy_engine {
struct nouveau_exec_engine base;
u32 irq;
u32 pmc;
u32 fuc;
u32 ctx;
struct nvc0_copy_priv {
struct nouveau_copy base;
};
struct nvc0_copy_chan {
struct nouveau_gpuobj *mem;
struct nouveau_vma vma;
struct nouveau_copy_chan base;
};
/*******************************************************************************
* Copy object classes
******************************************************************************/
static struct nouveau_oclass
nvc0_copy0_sclass[] = {
{ 0x90b5, &nouveau_object_ofuncs },
{},
};
static struct nouveau_oclass
nvc0_copy1_sclass[] = {
{ 0x90b8, &nouveau_object_ofuncs },
{},
};
/*******************************************************************************
* PCOPY context
******************************************************************************/
static int
nvc0_copy_context_new(struct nouveau_channel *chan, int engine)
nvc0_copy_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine);
struct nvc0_copy_chan *cctx;
struct drm_device *dev = chan->dev;
struct nouveau_gpuobj *ramin = chan->ramin;
struct nvc0_copy_chan *priv;
int ret;
cctx = chan->engctx[engine] = kzalloc(sizeof(*cctx), GFP_KERNEL);
if (!cctx)
return -ENOMEM;
ret = nouveau_gpuobj_new(dev, NULL, 256, 256,
NVOBJ_FLAG_ZERO_ALLOC, &cctx->mem);
ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
ret = nouveau_gpuobj_map_vm(cctx->mem, chan->vm, NV_MEM_ACCESS_RW,
&cctx->vma);
if (ret)
return ret;
nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(cctx->vma.offset));
nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(cctx->vma.offset));
nvimem_flush(dev);
return 0;
}
static int
nvc0_copy_object_new(struct nouveau_channel *chan, int engine,
u32 handle, u16 class)
{
return 0;
}
static struct nouveau_ofuncs
nvc0_copy_context_ofuncs = {
.ctor = nvc0_copy_context_ctor,
.dtor = _nouveau_copy_context_dtor,
.init = _nouveau_copy_context_init,
.fini = _nouveau_copy_context_fini,
.rd32 = _nouveau_copy_context_rd32,
.wr32 = _nouveau_copy_context_wr32,
};
static void
nvc0_copy_context_del(struct nouveau_channel *chan, int engine)
{
struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine);
struct nvc0_copy_chan *cctx = chan->engctx[engine];
struct drm_device *dev = chan->dev;
u32 inst;
static struct nouveau_oclass
nvc0_copy0_cclass = {
.handle = NV_ENGCTX(COPY0, 0xc0),
.ofuncs = &nvc0_copy_context_ofuncs,
};
inst = (chan->ramin->addr >> 12);
inst |= 0x40000000;
static struct nouveau_oclass
nvc0_copy1_cclass = {
.handle = NV_ENGCTX(COPY1, 0xc0),
.ofuncs = &nvc0_copy_context_ofuncs,
};
/* disable fifo access */
nv_wr32(dev, pcopy->fuc + 0x048, 0x00000000);
/* mark channel as unloaded if it's currently active */
if (nv_rd32(dev, pcopy->fuc + 0x050) == inst)
nv_mask(dev, pcopy->fuc + 0x050, 0x40000000, 0x00000000);
/* mark next channel as invalid if it's about to be loaded */
if (nv_rd32(dev, pcopy->fuc + 0x054) == inst)
nv_mask(dev, pcopy->fuc + 0x054, 0x40000000, 0x00000000);
/* restore fifo access */
nv_wr32(dev, pcopy->fuc + 0x048, 0x00000003);
nv_wo32(chan->ramin, pcopy->ctx + 0, 0x00000000);
nv_wo32(chan->ramin, pcopy->ctx + 4, 0x00000000);
nouveau_gpuobj_unmap(&cctx->vma);
nouveau_gpuobj_ref(NULL, &cctx->mem);
kfree(cctx);
chan->engctx[engine] = NULL;
}
static int
nvc0_copy_init(struct drm_device *dev, int engine)
{
struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
int i;
nv_mask(dev, 0x000200, pcopy->pmc, 0x00000000);
nv_mask(dev, 0x000200, pcopy->pmc, pcopy->pmc);
nv_wr32(dev, pcopy->fuc + 0x014, 0xffffffff);
nv_wr32(dev, pcopy->fuc + 0x1c0, 0x01000000);
for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++)
nv_wr32(dev, pcopy->fuc + 0x1c4, nvc0_pcopy_data[i]);
nv_wr32(dev, pcopy->fuc + 0x180, 0x01000000);
for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) {
if ((i & 0x3f) == 0)
nv_wr32(dev, pcopy->fuc + 0x188, i >> 6);
nv_wr32(dev, pcopy->fuc + 0x184, nvc0_pcopy_code[i]);
}
nv_wr32(dev, pcopy->fuc + 0x084, engine - NVOBJ_ENGINE_COPY0);
nv_wr32(dev, pcopy->fuc + 0x10c, 0x00000000);
nv_wr32(dev, pcopy->fuc + 0x104, 0x00000000); /* ENTRY */
nv_wr32(dev, pcopy->fuc + 0x100, 0x00000002); /* TRIGGER */
return 0;
}
static int
nvc0_copy_fini(struct drm_device *dev, int engine, bool suspend)
{
struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
nv_mask(dev, pcopy->fuc + 0x048, 0x00000003, 0x00000000);
/* trigger fuc context unload */
nv_wait(dev, pcopy->fuc + 0x008, 0x0000000c, 0x00000000);
nv_mask(dev, pcopy->fuc + 0x054, 0x40000000, 0x00000000);
nv_wr32(dev, pcopy->fuc + 0x000, 0x00000008);
nv_wait(dev, pcopy->fuc + 0x008, 0x00000008, 0x00000000);
nv_wr32(dev, pcopy->fuc + 0x014, 0xffffffff);
return 0;
}
/*******************************************************************************
* PCOPY engine/subdev functions
******************************************************************************/
static struct nouveau_enum nvc0_copy_isr_error_name[] = {
{ 0x0001, "ILLEGAL_MTHD" },
@ -163,93 +111,145 @@ static struct nouveau_enum nvc0_copy_isr_error_name[] = {
};
static void
nvc0_copy_isr(struct drm_device *dev, int engine)
nvc0_copy_intr(struct nouveau_subdev *subdev)
{
struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
u32 disp = nv_rd32(dev, pcopy->fuc + 0x01c);
u32 stat = nv_rd32(dev, pcopy->fuc + 0x008) & disp & ~(disp >> 16);
u64 inst = (u64)(nv_rd32(dev, pcopy->fuc + 0x050) & 0x0fffffff) << 12;
u32 chid = nvc0_graph_isr_chid(dev, inst);
u32 ssta = nv_rd32(dev, pcopy->fuc + 0x040) & 0x0000ffff;
u32 addr = nv_rd32(dev, pcopy->fuc + 0x040) >> 16;
int idx = nv_engidx(nv_object(subdev)) - NVDEV_ENGINE_COPY0;
struct nvc0_copy_priv *priv = (void *)subdev;
u32 disp = nv_rd32(priv, 0x10401c + (idx * 0x1000));
u32 intr = nv_rd32(priv, 0x104008 + (idx * 0x1000));
u32 stat = intr & disp & ~(disp >> 16);
u64 inst = nv_rd32(priv, 0x104050 + (idx * 0x1000)) & 0x0fffffff;
u32 ssta = nv_rd32(priv, 0x104040 + (idx * 0x1000)) & 0x0000ffff;
u32 addr = nv_rd32(priv, 0x104040 + (idx * 0x1000)) >> 16;
u32 mthd = (addr & 0x07ff) << 2;
u32 subc = (addr & 0x3800) >> 11;
u32 data = nv_rd32(dev, pcopy->fuc + 0x044);
u32 data = nv_rd32(priv, 0x104044 + (idx * 0x1000));
if (stat & 0x00000040) {
NV_INFO(dev, "PCOPY: DISPATCH_ERROR [");
nv_error(priv, "DISPATCH_ERROR [");
nouveau_enum_print(nvc0_copy_isr_error_name, ssta);
printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
chid, inst, subc, mthd, data);
nv_wr32(dev, pcopy->fuc + 0x004, 0x00000040);
printk("] ch 0x%010llx subc %d mthd 0x%04x data 0x%08x\n",
(u64)inst << 12, subc, mthd, data);
nv_wr32(priv, 0x104004 + (idx * 0x1000), 0x00000040);
stat &= ~0x00000040;
}
if (stat) {
NV_INFO(dev, "PCOPY: unhandled intr 0x%08x\n", stat);
nv_wr32(dev, pcopy->fuc + 0x004, stat);
nv_error(priv, "unhandled intr 0x%08x\n", stat);
nv_wr32(priv, 0x104004 + (idx * 0x1000), stat);
}
}
static void
nvc0_copy_isr_0(struct drm_device *dev)
static int
nvc0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
nvc0_copy_isr(dev, NVOBJ_ENGINE_COPY0);
}
struct nvc0_copy_priv *priv;
int ret;
static void
nvc0_copy_isr_1(struct drm_device *dev)
{
nvc0_copy_isr(dev, NVOBJ_ENGINE_COPY1);
}
if (nv_rd32(parent, 0x022500) & 0x00000100)
return -ENODEV;
static void
nvc0_copy_destroy(struct drm_device *dev, int engine)
{
struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
nouveau_irq_unregister(dev, pcopy->irq);
if (engine == NVOBJ_ENGINE_COPY0)
NVOBJ_ENGINE_DEL(dev, COPY0);
else
NVOBJ_ENGINE_DEL(dev, COPY1);
kfree(pcopy);
}
int
nvc0_copy_create(struct drm_device *dev, int engine)
{
struct nvc0_copy_engine *pcopy;
pcopy = kzalloc(sizeof(*pcopy), GFP_KERNEL);
if (!pcopy)
return -ENOMEM;
pcopy->base.destroy = nvc0_copy_destroy;
pcopy->base.init = nvc0_copy_init;
pcopy->base.fini = nvc0_copy_fini;
pcopy->base.context_new = nvc0_copy_context_new;
pcopy->base.context_del = nvc0_copy_context_del;
pcopy->base.object_new = nvc0_copy_object_new;
if (engine == 0) {
pcopy->irq = 5;
pcopy->pmc = 0x00000040;
pcopy->fuc = 0x104000;
pcopy->ctx = 0x0230;
nouveau_irq_register(dev, pcopy->irq, nvc0_copy_isr_0);
NVOBJ_ENGINE_ADD(dev, COPY0, &pcopy->base);
NVOBJ_CLASS(dev, 0x90b5, COPY0);
} else {
pcopy->irq = 6;
pcopy->pmc = 0x00000080;
pcopy->fuc = 0x105000;
pcopy->ctx = 0x0240;
nouveau_irq_register(dev, pcopy->irq, nvc0_copy_isr_1);
NVOBJ_ENGINE_ADD(dev, COPY1, &pcopy->base);
NVOBJ_CLASS(dev, 0x90b8, COPY1);
}
ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00000040;
nv_subdev(priv)->intr = nvc0_copy_intr;
nv_engine(priv)->cclass = &nvc0_copy0_cclass;
nv_engine(priv)->sclass = nvc0_copy0_sclass;
return 0;
}
static int
nvc0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nvc0_copy_priv *priv;
int ret;
if (nv_rd32(parent, 0x022500) & 0x00000200)
return -ENODEV;
ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00000080;
nv_subdev(priv)->intr = nvc0_copy_intr;
nv_engine(priv)->cclass = &nvc0_copy1_cclass;
nv_engine(priv)->sclass = nvc0_copy1_sclass;
return 0;
}
static int
nvc0_copy_init(struct nouveau_object *object)
{
int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
struct nvc0_copy_priv *priv = (void *)object;
int ret, i;
ret = nouveau_copy_init(&priv->base);
if (ret)
return ret;
/* disable all interrupts */
nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
/* upload ucode */
nv_wr32(priv, 0x1041c0 + (idx * 0x1000), 0x01000000);
for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++)
nv_wr32(priv, 0x1041c4 + (idx * 0x1000), nvc0_pcopy_data[i]);
nv_wr32(priv, 0x104180 + (idx * 0x1000), 0x01000000);
for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) {
if ((i & 0x3f) == 0)
nv_wr32(priv, 0x104188 + (idx * 0x1000), i >> 6);
nv_wr32(priv, 0x104184 + (idx * 0x1000), nvc0_pcopy_code[i]);
}
/* start it running */
nv_wr32(priv, 0x104084 + (idx * 0x1000), idx);
nv_wr32(priv, 0x10410c + (idx * 0x1000), 0x00000000);
nv_wr32(priv, 0x104104 + (idx * 0x1000), 0x00000000); /* ENTRY */
nv_wr32(priv, 0x104100 + (idx * 0x1000), 0x00000002); /* TRIGGER */
return 0;
}
static int
nvc0_copy_fini(struct nouveau_object *object, bool suspend)
{
int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
struct nvc0_copy_priv *priv = (void *)object;
nv_mask(priv, 0x104048 + (idx * 0x1000), 0x00000003, 0x00000000);
nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
return nouveau_copy_fini(&priv->base, suspend);
}
struct nouveau_oclass
nvc0_copy0_oclass = {
.handle = NV_ENGINE(COPY0, 0xc0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_copy0_ctor,
.dtor = _nouveau_copy_dtor,
.init = nvc0_copy_init,
.fini = nvc0_copy_fini,
},
};
struct nouveau_oclass
nvc0_copy1_oclass = {
.handle = NV_ENGINE(COPY1, 0xc0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_copy1_ctor,
.dtor = _nouveau_copy_dtor,
.init = nvc0_copy_init,
.fini = nvc0_copy_fini,
},
};

View File

@ -1,5 +1,5 @@
/*
* Copyright 2010 Red Hat Inc.
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -22,99 +22,106 @@
* Authors: Ben Skeggs
*/
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_util.h"
#include <core/ramht.h>
#include <core/os.h>
#include <core/enum.h>
#include <core/class.h>
#include <core/engctx.h>
#include <core/gpuobj.h>
struct nv84_crypt_engine {
struct nouveau_exec_engine base;
#include <subdev/fb.h>
#include <engine/crypt.h>
struct nv84_crypt_priv {
struct nouveau_crypt base;
};
struct nv84_crypt_chan {
struct nouveau_crypt_chan base;
};
/*******************************************************************************
* Crypt object classes
******************************************************************************/
static int
nv84_crypt_context_new(struct nouveau_channel *chan, int engine)
nv84_crypt_object_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct drm_device *dev = chan->dev;
struct nouveau_gpuobj *ramin = chan->ramin;
struct nouveau_gpuobj *ctx;
struct nouveau_gpuobj *obj;
int ret;
NV_DEBUG(dev, "ch%d\n", chan->id);
ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, &ctx);
ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
16, 16, 0, &obj);
*pobject = nv_object(obj);
if (ret)
return ret;
nv_wo32(ramin, 0xa0, 0x00190000);
nv_wo32(ramin, 0xa4, ctx->addr + ctx->size - 1);
nv_wo32(ramin, 0xa8, ctx->addr);
nv_wo32(ramin, 0xac, 0);
nv_wo32(ramin, 0xb0, 0);
nv_wo32(ramin, 0xb4, 0);
nvimem_flush(dev);
nvvm_engref(chan->vm, engine, 1);
chan->engctx[engine] = ctx;
nv_wo32(obj, 0x00, nv_mclass(obj));
nv_wo32(obj, 0x04, 0x00000000);
nv_wo32(obj, 0x08, 0x00000000);
nv_wo32(obj, 0x0c, 0x00000000);
return 0;
}
static void
nv84_crypt_context_del(struct nouveau_channel *chan, int engine)
{
struct nouveau_gpuobj *ctx = chan->engctx[engine];
struct drm_device *dev = chan->dev;
u32 inst;
static struct nouveau_ofuncs
nv84_crypt_ofuncs = {
.ctor = nv84_crypt_object_ctor,
.dtor = _nouveau_gpuobj_dtor,
.init = _nouveau_gpuobj_init,
.fini = _nouveau_gpuobj_fini,
.rd32 = _nouveau_gpuobj_rd32,
.wr32 = _nouveau_gpuobj_wr32,
};
inst = (chan->ramin->addr >> 12);
inst |= 0x80000000;
static struct nouveau_oclass
nv84_crypt_sclass[] = {
{ 0x74c1, &nv84_crypt_ofuncs },
{}
};
/* mark context as invalid if still on the hardware, not
* doing this causes issues the next time PCRYPT is used,
* unsurprisingly :)
*/
nv_wr32(dev, 0x10200c, 0x00000000);
if (nv_rd32(dev, 0x102188) == inst)
nv_mask(dev, 0x102188, 0x80000000, 0x00000000);
if (nv_rd32(dev, 0x10218c) == inst)
nv_mask(dev, 0x10218c, 0x80000000, 0x00000000);
nv_wr32(dev, 0x10200c, 0x00000010);
nouveau_gpuobj_ref(NULL, &ctx);
nvvm_engref(chan->vm, engine, -1);
chan->engctx[engine] = NULL;
}
/*******************************************************************************
* PCRYPT context
******************************************************************************/
static int
nv84_crypt_object_new(struct nouveau_channel *chan, int engine,
u32 handle, u16 class)
nv84_crypt_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct drm_device *dev = chan->dev;
struct nouveau_gpuobj *obj = NULL;
struct nv84_crypt_chan *priv;
int ret;
ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
0, NVOBJ_FLAG_ZERO_ALLOC, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
obj->engine = 5;
obj->class = class;
nv_wo32(obj, 0x00, class);
nvimem_flush(dev);
ret = nouveau_ramht_insert(chan, handle, obj);
nouveau_gpuobj_ref(NULL, &obj);
return ret;
return 0;
}
static void
nv84_crypt_tlb_flush(struct drm_device *dev, int engine)
{
nv50_vm_flush_engine(dev, 0x0a);
}
static struct nouveau_oclass
nv84_crypt_cclass = {
.handle = NV_ENGCTX(CRYPT, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv84_crypt_context_ctor,
.dtor = _nouveau_crypt_context_dtor,
.init = _nouveau_crypt_context_init,
.fini = _nouveau_crypt_context_fini,
.rd32 = _nouveau_crypt_context_rd32,
.wr32 = _nouveau_crypt_context_wr32,
},
};
static struct nouveau_bitfield nv84_crypt_intr[] = {
/*******************************************************************************
* PCRYPT engine/subdev functions
******************************************************************************/
static struct nouveau_bitfield nv84_crypt_intr_mask[] = {
{ 0x00000001, "INVALID_STATE" },
{ 0x00000002, "ILLEGAL_MTHD" },
{ 0x00000004, "ILLEGAL_CLASS" },
@ -124,79 +131,78 @@ static struct nouveau_bitfield nv84_crypt_intr[] = {
};
static void
nv84_crypt_isr(struct drm_device *dev)
nv84_crypt_intr(struct nouveau_subdev *subdev)
{
u32 stat = nv_rd32(dev, 0x102130);
u32 mthd = nv_rd32(dev, 0x102190);
u32 data = nv_rd32(dev, 0x102194);
u64 inst = (u64)(nv_rd32(dev, 0x102188) & 0x7fffffff) << 12;
int show = nouveau_ratelimit();
int chid = nv50_graph_isr_chid(dev, inst);
struct nv84_crypt_priv *priv = (void *)subdev;
u32 stat = nv_rd32(priv, 0x102130);
u32 mthd = nv_rd32(priv, 0x102190);
u32 data = nv_rd32(priv, 0x102194);
u32 inst = nv_rd32(priv, 0x102188) & 0x7fffffff;
if (show) {
NV_INFO(dev, "PCRYPT:");
nouveau_bitfield_print(nv84_crypt_intr, stat);
printk(KERN_CONT " ch %d (0x%010llx) mthd 0x%04x data 0x%08x\n",
chid, inst, mthd, data);
if (stat) {
nv_error(priv, "");
nouveau_bitfield_print(nv84_crypt_intr_mask, stat);
printk(" ch 0x%010llx mthd 0x%04x data 0x%08x\n",
(u64)inst << 12, mthd, data);
}
nv_wr32(dev, 0x102130, stat);
nv_wr32(dev, 0x10200c, 0x10);
nv_wr32(priv, 0x102130, stat);
nv_wr32(priv, 0x10200c, 0x10);
nv50_fb_vm_trap(dev, show);
nv50_fb_trap(nouveau_fb(priv), 1);
}
static int
nv84_crypt_fini(struct drm_device *dev, int engine, bool suspend)
nv84_crypt_tlb_flush(struct nouveau_engine *engine)
{
nv_wr32(dev, 0x102140, 0x00000000);
nv50_vm_flush_engine(&engine->base, 0x0a);
return 0;
}
static int
nv84_crypt_init(struct drm_device *dev, int engine)
nv84_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
struct nv84_crypt_priv *priv;
int ret;
nv_wr32(dev, 0x102130, 0xffffffff);
nv_wr32(dev, 0x102140, 0xffffffbf);
ret = nouveau_crypt_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_wr32(dev, 0x10200c, 0x00000010);
nv_subdev(priv)->unit = 0x00004000;
nv_subdev(priv)->intr = nv84_crypt_intr;
nv_engine(priv)->cclass = &nv84_crypt_cclass;
nv_engine(priv)->sclass = nv84_crypt_sclass;
nv_engine(priv)->tlb_flush = nv84_crypt_tlb_flush;
return 0;
}
static void
nv84_crypt_destroy(struct drm_device *dev, int engine)
static int
nv84_crypt_init(struct nouveau_object *object)
{
struct nv84_crypt_engine *pcrypt = nv_engine(dev, engine);
struct nv84_crypt_priv *priv = (void *)object;
int ret;
NVOBJ_ENGINE_DEL(dev, CRYPT);
ret = nouveau_crypt_init(&priv->base);
if (ret)
return ret;
nouveau_irq_unregister(dev, 14);
kfree(pcrypt);
}
int
nv84_crypt_create(struct drm_device *dev)
{
struct nv84_crypt_engine *pcrypt;
pcrypt = kzalloc(sizeof(*pcrypt), GFP_KERNEL);
if (!pcrypt)
return -ENOMEM;
pcrypt->base.destroy = nv84_crypt_destroy;
pcrypt->base.init = nv84_crypt_init;
pcrypt->base.fini = nv84_crypt_fini;
pcrypt->base.context_new = nv84_crypt_context_new;
pcrypt->base.context_del = nv84_crypt_context_del;
pcrypt->base.object_new = nv84_crypt_object_new;
pcrypt->base.tlb_flush = nv84_crypt_tlb_flush;
nouveau_irq_register(dev, 14, nv84_crypt_isr);
NVOBJ_ENGINE_ADD(dev, CRYPT, &pcrypt->base);
NVOBJ_CLASS (dev, 0x74c1, CRYPT);
nv_wr32(priv, 0x102130, 0xffffffff);
nv_wr32(priv, 0x102140, 0xffffffbf);
nv_wr32(priv, 0x10200c, 0x00000010);
return 0;
}
struct nouveau_oclass
nv84_crypt_oclass = {
.handle = NV_ENGINE(CRYPT, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv84_crypt_ctor,
.dtor = _nouveau_crypt_dtor,
.init = nv84_crypt_init,
.fini = _nouveau_crypt_fini,
},
};

View File

@ -1,5 +1,5 @@
/*
* Copyright 2011 Red Hat Inc.
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -22,124 +22,74 @@
* Authors: Ben Skeggs
*/
#include "drmP.h"
#include <core/os.h>
#include <core/enum.h>
#include <core/class.h>
#include <core/engctx.h>
#include "nouveau_drv.h"
#include "nouveau_util.h"
#include <core/ramht.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
#include <engine/crypt.h>
#include "fuc/nv98.fuc.h"
struct nv98_crypt_priv {
struct nouveau_exec_engine base;
struct nouveau_crypt base;
};
struct nv98_crypt_chan {
struct nouveau_gpuobj *mem;
struct nouveau_crypt_chan base;
};
/*******************************************************************************
* Crypt object classes
******************************************************************************/
static struct nouveau_oclass
nv98_crypt_sclass[] = {
{ 0x88b4, &nouveau_object_ofuncs },
{},
};
/*******************************************************************************
* PCRYPT context
******************************************************************************/
static int
nv98_crypt_context_new(struct nouveau_channel *chan, int engine)
nv98_crypt_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct drm_device *dev = chan->dev;
struct nv98_crypt_priv *priv = nv_engine(dev, engine);
struct nv98_crypt_chan *cctx;
struct nv98_crypt_chan *priv;
int ret;
cctx = chan->engctx[engine] = kzalloc(sizeof(*cctx), GFP_KERNEL);
if (!cctx)
return -ENOMEM;
nvvm_engref(chan->vm, engine, 1);
ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, &cctx->mem);
ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
*pobject = nv_object(priv);
if (ret)
goto error;
return ret;
nv_wo32(chan->ramin, 0xa0, 0x00190000);
nv_wo32(chan->ramin, 0xa4, cctx->mem->addr + cctx->mem->size - 1);
nv_wo32(chan->ramin, 0xa8, cctx->mem->addr);
nv_wo32(chan->ramin, 0xac, 0x00000000);
nv_wo32(chan->ramin, 0xb0, 0x00000000);
nv_wo32(chan->ramin, 0xb4, 0x00000000);
nvimem_flush(dev);
error:
if (ret)
priv->base.context_del(chan, engine);
return ret;
}
static void
nv98_crypt_context_del(struct nouveau_channel *chan, int engine)
{
struct nv98_crypt_chan *cctx = chan->engctx[engine];
int i;
for (i = 0xa0; i < 0xb4; i += 4)
nv_wo32(chan->ramin, i, 0x00000000);
nouveau_gpuobj_ref(NULL, &cctx->mem);
nvvm_engref(chan->vm, engine, -1);
chan->engctx[engine] = NULL;
kfree(cctx);
}
static int
nv98_crypt_object_new(struct nouveau_channel *chan, int engine,
u32 handle, u16 class)
{
struct nv98_crypt_chan *cctx = chan->engctx[engine];
/* fuc engine doesn't need an object, our ramht code does.. */
cctx->mem->engine = 5;
cctx->mem->class = class;
return nouveau_ramht_insert(chan, handle, cctx->mem);
}
static void
nv98_crypt_tlb_flush(struct drm_device *dev, int engine)
{
nv50_vm_flush_engine(dev, 0x0a);
}
static int
nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend)
{
nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
return 0;
}
static int
nv98_crypt_init(struct drm_device *dev, int engine)
{
int i;
static struct nouveau_oclass
nv98_crypt_cclass = {
.handle = NV_ENGCTX(CRYPT, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv98_crypt_context_ctor,
.dtor = _nouveau_crypt_context_dtor,
.init = _nouveau_crypt_context_init,
.fini = _nouveau_crypt_context_fini,
.rd32 = _nouveau_crypt_context_rd32,
.wr32 = _nouveau_crypt_context_wr32,
},
};
/* reset! */
nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
/* wait for exit interrupt to signal */
nv_wait(dev, 0x087008, 0x00000010, 0x00000010);
nv_wr32(dev, 0x087004, 0x00000010);
/* upload microcode code and data segments */
nv_wr32(dev, 0x087ff8, 0x00100000);
for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
nv_wr32(dev, 0x087ff4, nv98_pcrypt_code[i]);
nv_wr32(dev, 0x087ff8, 0x00000000);
for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
nv_wr32(dev, 0x087ff4, nv98_pcrypt_data[i]);
/* start it running */
nv_wr32(dev, 0x08710c, 0x00000000);
nv_wr32(dev, 0x087104, 0x00000000); /* ENTRY */
nv_wr32(dev, 0x087100, 0x00000002); /* TRIGGER */
return 0;
}
/*******************************************************************************
* PCRYPT engine/subdev functions
******************************************************************************/
static struct nouveau_enum nv98_crypt_isr_error_name[] = {
{ 0x0000, "ILLEGAL_MTHD" },
@ -150,65 +100,100 @@ static struct nouveau_enum nv98_crypt_isr_error_name[] = {
};
static void
nv98_crypt_isr(struct drm_device *dev)
nv98_crypt_intr(struct nouveau_subdev *subdev)
{
u32 disp = nv_rd32(dev, 0x08701c);
u32 stat = nv_rd32(dev, 0x087008) & disp & ~(disp >> 16);
u32 inst = nv_rd32(dev, 0x087050) & 0x3fffffff;
u32 ssta = nv_rd32(dev, 0x087040) & 0x0000ffff;
u32 addr = nv_rd32(dev, 0x087040) >> 16;
struct nv98_crypt_priv *priv = (void *)subdev;
u32 disp = nv_rd32(priv, 0x08701c);
u32 stat = nv_rd32(priv, 0x087008) & disp & ~(disp >> 16);
u32 inst = nv_rd32(priv, 0x087050) & 0x3fffffff;
u32 ssta = nv_rd32(priv, 0x087040) & 0x0000ffff;
u32 addr = nv_rd32(priv, 0x087040) >> 16;
u32 mthd = (addr & 0x07ff) << 2;
u32 subc = (addr & 0x3800) >> 11;
u32 data = nv_rd32(dev, 0x087044);
int chid = nv50_graph_isr_chid(dev, inst);
u32 data = nv_rd32(priv, 0x087044);
if (stat & 0x00000040) {
NV_INFO(dev, "PCRYPT: DISPATCH_ERROR [");
nv_error(priv, "DISPATCH_ERROR [");
nouveau_enum_print(nv98_crypt_isr_error_name, ssta);
printk("] ch %d [0x%08x] subc %d mthd 0x%04x data 0x%08x\n",
chid, inst, subc, mthd, data);
nv_wr32(dev, 0x087004, 0x00000040);
printk("] ch 0x%08x subc %d mthd 0x%04x data 0x%08x\n",
inst, subc, mthd, data);
nv_wr32(priv, 0x087004, 0x00000040);
stat &= ~0x00000040;
}
if (stat) {
NV_INFO(dev, "PCRYPT: unhandled intr 0x%08x\n", stat);
nv_wr32(dev, 0x087004, stat);
nv_error(priv, "unhandled intr 0x%08x\n", stat);
nv_wr32(priv, 0x087004, stat);
}
nv50_fb_vm_trap(dev, 1);
nv50_fb_trap(nouveau_fb(priv), 1);
}
static void
nv98_crypt_destroy(struct drm_device *dev, int engine)
static int
nv98_crypt_tlb_flush(struct nouveau_engine *engine)
{
struct nv98_crypt_priv *priv = nv_engine(dev, engine);
nouveau_irq_unregister(dev, 14);
NVOBJ_ENGINE_DEL(dev, CRYPT);
kfree(priv);
}
int
nv98_crypt_create(struct drm_device *dev)
{
struct nv98_crypt_priv *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->base.destroy = nv98_crypt_destroy;
priv->base.init = nv98_crypt_init;
priv->base.fini = nv98_crypt_fini;
priv->base.context_new = nv98_crypt_context_new;
priv->base.context_del = nv98_crypt_context_del;
priv->base.object_new = nv98_crypt_object_new;
priv->base.tlb_flush = nv98_crypt_tlb_flush;
nouveau_irq_register(dev, 14, nv98_crypt_isr);
NVOBJ_ENGINE_ADD(dev, CRYPT, &priv->base);
NVOBJ_CLASS(dev, 0x88b4, CRYPT);
nv50_vm_flush_engine(&engine->base, 0x0a);
return 0;
}
static int
nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv98_crypt_priv *priv;
int ret;
ret = nouveau_crypt_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00004000;
nv_subdev(priv)->intr = nv98_crypt_intr;
nv_engine(priv)->cclass = &nv98_crypt_cclass;
nv_engine(priv)->sclass = nv98_crypt_sclass;
nv_engine(priv)->tlb_flush = nv98_crypt_tlb_flush;
return 0;
}
static int
nv98_crypt_init(struct nouveau_object *object)
{
struct nv98_crypt_priv *priv = (void *)object;
int ret, i;
ret = nouveau_crypt_init(&priv->base);
if (ret)
return ret;
/* wait for exit interrupt to signal */
nv_wait(priv, 0x087008, 0x00000010, 0x00000010);
nv_wr32(priv, 0x087004, 0x00000010);
/* upload microcode code and data segments */
nv_wr32(priv, 0x087ff8, 0x00100000);
for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
nv_wr32(priv, 0x087ff4, nv98_pcrypt_code[i]);
nv_wr32(priv, 0x087ff8, 0x00000000);
for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
nv_wr32(priv, 0x087ff4, nv98_pcrypt_data[i]);
/* start it running */
nv_wr32(priv, 0x08710c, 0x00000000);
nv_wr32(priv, 0x087104, 0x00000000); /* ENTRY */
nv_wr32(priv, 0x087100, 0x00000002); /* TRIGGER */
return 0;
}
struct nouveau_oclass
nv98_crypt_oclass = {
.handle = NV_ENGINE(CRYPT, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv98_crypt_ctor,
.dtor = _nouveau_crypt_dtor,
.init = nv98_crypt_init,
.fini = _nouveau_crypt_fini,
},
};

View File

@ -0,0 +1,90 @@
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <engine/disp.h>
struct nv04_disp_priv {
struct nouveau_disp base;
};
static struct nouveau_oclass
nv04_disp_sclass[] = {
{},
};
static void
nv04_disp_intr_vblank(struct nv04_disp_priv *priv, int crtc)
{
struct nouveau_disp *disp = &priv->base;
if (disp->vblank.notify)
disp->vblank.notify(disp->vblank.data, crtc);
}
static void
nv04_disp_intr(struct nouveau_subdev *subdev)
{
struct nv04_disp_priv *priv = (void *)subdev;
u32 crtc0 = nv_rd32(priv, 0x600100);
u32 crtc1 = nv_rd32(priv, 0x602100);
if (crtc0 & 0x00000001) {
nv04_disp_intr_vblank(priv, 0);
nv_wr32(priv, 0x600100, 0x00000001);
}
if (crtc1 & 0x00000001) {
nv04_disp_intr_vblank(priv, 1);
nv_wr32(priv, 0x602100, 0x00000001);
}
}
static int
nv04_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv04_disp_priv *priv;
int ret;
ret = nouveau_disp_create(parent, engine, oclass, "DISPLAY",
"display", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_engine(priv)->sclass = nv04_disp_sclass;
nv_subdev(priv)->intr = nv04_disp_intr;
return 0;
}
struct nouveau_oclass
nv04_disp_oclass = {
.handle = NV_ENGINE(DISP, 0x04),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv04_disp_ctor,
.dtor = _nouveau_disp_dtor,
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
};

View File

@ -0,0 +1,125 @@
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <engine/software.h>
#include <engine/disp.h>
struct nv50_disp_priv {
struct nouveau_disp base;
};
static struct nouveau_oclass
nv50_disp_sclass[] = {
{},
};
static void
nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
{
struct nouveau_disp *disp = &priv->base;
struct nouveau_software_chan *chan, *temp;
unsigned long flags;
spin_lock_irqsave(&disp->vblank.lock, flags);
list_for_each_entry_safe(chan, temp, &disp->vblank.list, vblank.head) {
if (chan->vblank.crtc != crtc)
continue;
nv_wr32(priv, 0x001704, chan->vblank.channel);
nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
if (nv_device(priv)->chipset == 0x50) {
nv_wr32(priv, 0x001570, chan->vblank.offset);
nv_wr32(priv, 0x001574, chan->vblank.value);
} else {
if (nv_device(priv)->chipset >= 0xc0) {
nv_wr32(priv, 0x06000c,
upper_32_bits(chan->vblank.offset));
}
nv_wr32(priv, 0x060010, chan->vblank.offset);
nv_wr32(priv, 0x060014, chan->vblank.value);
}
list_del(&chan->vblank.head);
if (disp->vblank.put)
disp->vblank.put(disp->vblank.data, crtc);
}
spin_unlock_irqrestore(&disp->vblank.lock, flags);
if (disp->vblank.notify)
disp->vblank.notify(disp->vblank.data, crtc);
}
static void
nv50_disp_intr(struct nouveau_subdev *subdev)
{
struct nv50_disp_priv *priv = (void *)subdev;
u32 stat1 = nv_rd32(priv, 0x610024);
if (stat1 & 0x00000004) {
nv50_disp_intr_vblank(priv, 0);
nv_wr32(priv, 0x610024, 0x00000004);
stat1 &= ~0x00000004;
}
if (stat1 & 0x00000008) {
nv50_disp_intr_vblank(priv, 1);
nv_wr32(priv, 0x610024, 0x00000008);
stat1 &= ~0x00000008;
}
}
static int
nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv50_disp_priv *priv;
int ret;
ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
"display", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_engine(priv)->sclass = nv50_disp_sclass;
nv_subdev(priv)->intr = nv50_disp_intr;
INIT_LIST_HEAD(&priv->base.vblank.list);
spin_lock_init(&priv->base.vblank.lock);
return 0;
}
struct nouveau_oclass
nv50_disp_oclass = {
.handle = NV_ENGINE(DISP, 0x50),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_disp_ctor,
.dtor = _nouveau_disp_dtor,
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
};

View File

@ -0,0 +1,118 @@
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <subdev/bar.h>
#include <engine/software.h>
#include <engine/disp.h>
struct nvd0_disp_priv {
struct nouveau_disp base;
};
static struct nouveau_oclass
nvd0_disp_sclass[] = {
{},
};
static void
nvd0_disp_intr_vblank(struct nvd0_disp_priv *priv, int crtc)
{
struct nouveau_bar *bar = nouveau_bar(priv);
struct nouveau_disp *disp = &priv->base;
struct nouveau_software_chan *chan, *temp;
unsigned long flags;
spin_lock_irqsave(&disp->vblank.lock, flags);
list_for_each_entry_safe(chan, temp, &disp->vblank.list, vblank.head) {
if (chan->vblank.crtc != crtc)
continue;
nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
bar->flush(bar);
nv_wr32(priv, 0x06000c, upper_32_bits(chan->vblank.offset));
nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset));
nv_wr32(priv, 0x060014, chan->vblank.value);
list_del(&chan->vblank.head);
if (disp->vblank.put)
disp->vblank.put(disp->vblank.data, crtc);
}
spin_unlock_irqrestore(&disp->vblank.lock, flags);
if (disp->vblank.notify)
disp->vblank.notify(disp->vblank.data, crtc);
}
static void
nvd0_disp_intr(struct nouveau_subdev *subdev)
{
struct nvd0_disp_priv *priv = (void *)subdev;
u32 intr = nv_rd32(priv, 0x610088);
int i;
for (i = 0; i < 4; i++) {
u32 mask = 0x01000000 << i;
if (mask & intr) {
u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
if (stat & 0x00000001)
nvd0_disp_intr_vblank(priv, i);
nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0);
nv_rd32(priv, 0x6100c0 + (i * 0x800));
}
}
}
static int
nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nvd0_disp_priv *priv;
int ret;
ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
"display", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_engine(priv)->sclass = nvd0_disp_sclass;
nv_subdev(priv)->intr = nvd0_disp_intr;
INIT_LIST_HEAD(&priv->base.vblank.list);
spin_lock_init(&priv->base.vblank.lock);
return 0;
}
struct nouveau_oclass
nvd0_disp_oclass = {
.handle = NV_ENGINE(DISP, 0xd0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvd0_disp_ctor,
.dtor = _nouveau_disp_dtor,
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
};

View File

@ -0,0 +1,87 @@
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/object.h>
#include <core/class.h>
#include <subdev/fb.h>
#include <engine/dmaobj.h>
int
nouveau_dmaobj_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass,
void *data, u32 size, int len, void **pobject)
{
struct nv_dma_class *args = data;
struct nouveau_dmaobj *object;
int ret;
if (size < sizeof(*args))
return -EINVAL;
ret = nouveau_object_create_(parent, engine, oclass, 0, len, pobject);
object = *pobject;
if (ret)
return ret;
switch (args->flags & NV_DMA_TARGET_MASK) {
case NV_DMA_TARGET_VM:
object->target = NV_MEM_TARGET_VM;
break;
case NV_DMA_TARGET_VRAM:
object->target = NV_MEM_TARGET_VRAM;
break;
case NV_DMA_TARGET_PCI:
object->target = NV_MEM_TARGET_PCI;
break;
case NV_DMA_TARGET_PCI_US:
case NV_DMA_TARGET_AGP:
object->target = NV_MEM_TARGET_PCI_NOSNOOP;
break;
default:
return -EINVAL;
}
switch (args->flags & NV_DMA_ACCESS_MASK) {
case NV_DMA_ACCESS_VM:
object->access = NV_MEM_ACCESS_VM;
break;
case NV_DMA_ACCESS_RD:
object->access = NV_MEM_ACCESS_RO;
break;
case NV_DMA_ACCESS_WR:
object->access = NV_MEM_ACCESS_WO;
break;
case NV_DMA_ACCESS_RDWR:
object->access = NV_MEM_ACCESS_RW;
break;
default:
return -EINVAL;
}
object->start = args->start;
object->limit = args->limit;
return 0;
}

View File

@ -0,0 +1,176 @@
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/gpuobj.h>
#include <subdev/fb.h>
#include <subdev/vm/nv04.h>
#include <engine/dmaobj.h>
struct nv04_dmaeng_priv {
struct nouveau_dmaeng base;
};
struct nv04_dmaobj_priv {
struct nouveau_dmaobj base;
};
static int
nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
struct nouveau_object *parent,
struct nouveau_dmaobj *dmaobj,
struct nouveau_gpuobj **pgpuobj)
{
struct nouveau_gpuobj *gpuobj;
u32 flags0 = nv_mclass(dmaobj);
u32 flags2 = 0x00000000;
u32 offset = (dmaobj->start & 0xfffff000);
u32 adjust = (dmaobj->start & 0x00000fff);
u32 length = dmaobj->limit - dmaobj->start;
int ret;
if (dmaobj->target == NV_MEM_TARGET_VM) {
gpuobj = nv04_vmmgr(dmaeng)->vm->pgt[0].obj[0];
if (dmaobj->start == 0)
return nouveau_gpuobj_dup(parent, gpuobj, pgpuobj);
offset = nv_ro32(gpuobj, 8 + (offset >> 10));
offset &= 0xfffff000;
dmaobj->target = NV_MEM_TARGET_PCI;
dmaobj->access = NV_MEM_ACCESS_RW;
}
switch (dmaobj->target) {
case NV_MEM_TARGET_VRAM:
flags0 |= 0x00003000;
break;
case NV_MEM_TARGET_PCI:
flags0 |= 0x00023000;
break;
case NV_MEM_TARGET_PCI_NOSNOOP:
flags0 |= 0x00033000;
break;
default:
return -EINVAL;
}
switch (dmaobj->access) {
case NV_MEM_ACCESS_RO:
flags0 |= 0x00004000;
break;
case NV_MEM_ACCESS_WO:
flags0 |= 0x00008000;
case NV_MEM_ACCESS_RW:
flags2 |= 0x00000002;
break;
default:
return -EINVAL;
}
ret = nouveau_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj);
*pgpuobj = gpuobj;
if (ret == 0) {
nv_wo32(*pgpuobj, 0x00, flags0 | (adjust << 20));
nv_wo32(*pgpuobj, 0x04, length);
nv_wo32(*pgpuobj, 0x08, flags2 | offset);
nv_wo32(*pgpuobj, 0x0c, flags2 | offset);
}
return ret;
}
static int
nv04_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nouveau_dmaeng *dmaeng = (void *)engine;
struct nv04_dmaobj_priv *dmaobj;
struct nouveau_gpuobj *gpuobj;
int ret;
ret = nouveau_dmaobj_create(parent, engine, oclass,
data, size, &dmaobj);
*pobject = nv_object(dmaobj);
if (ret)
return ret;
switch (nv_mclass(parent)) {
case 0x006e:
ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
nouveau_object_ref(NULL, pobject);
*pobject = nv_object(gpuobj);
break;
default:
break;
}
return ret;
}
static struct nouveau_ofuncs
nv04_dmaobj_ofuncs = {
.ctor = nv04_dmaobj_ctor,
.dtor = _nouveau_dmaobj_dtor,
.init = _nouveau_dmaobj_init,
.fini = _nouveau_dmaobj_fini,
};
static struct nouveau_oclass
nv04_dmaobj_sclass[] = {
{ 0x0002, &nv04_dmaobj_ofuncs },
{ 0x0003, &nv04_dmaobj_ofuncs },
{ 0x003d, &nv04_dmaobj_ofuncs },
{}
};
static int
nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv04_dmaeng_priv *priv;
int ret;
ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
priv->base.base.sclass = nv04_dmaobj_sclass;
priv->base.bind = nv04_dmaobj_bind;
return 0;
}
struct nouveau_oclass
nv04_dmaeng_oclass = {
.handle = NV_ENGINE(DMAOBJ, 0x04),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv04_dmaeng_ctor,
.dtor = _nouveau_dmaeng_dtor,
.init = _nouveau_dmaeng_init,
.fini = _nouveau_dmaeng_fini,
},
};

View File

@ -0,0 +1,168 @@
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/gpuobj.h>
#include <subdev/fb.h>
#include <engine/dmaobj.h>
struct nv50_dmaeng_priv {
struct nouveau_dmaeng base;
};
struct nv50_dmaobj_priv {
struct nouveau_dmaobj base;
};
static int
nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
struct nouveau_object *parent,
struct nouveau_dmaobj *dmaobj,
struct nouveau_gpuobj **pgpuobj)
{
u32 flags = nv_mclass(dmaobj);
int ret;
switch (dmaobj->target) {
case NV_MEM_TARGET_VM:
flags |= 0x00000000;
flags |= 0x60000000; /* COMPRESSION_USEVM */
flags |= 0x1fc00000; /* STORAGE_TYPE_USEVM */
break;
case NV_MEM_TARGET_VRAM:
flags |= 0x00010000;
flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
break;
case NV_MEM_TARGET_PCI:
flags |= 0x00020000;
flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
break;
case NV_MEM_TARGET_PCI_NOSNOOP:
flags |= 0x00030000;
flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
break;
default:
return -EINVAL;
}
switch (dmaobj->access) {
case NV_MEM_ACCESS_VM:
break;
case NV_MEM_ACCESS_RO:
flags |= 0x00040000;
break;
case NV_MEM_ACCESS_WO:
case NV_MEM_ACCESS_RW:
flags |= 0x00080000;
break;
}
ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
if (ret == 0) {
nv_wo32(*pgpuobj, 0x00, flags);
nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
upper_32_bits(dmaobj->start));
nv_wo32(*pgpuobj, 0x10, 0x00000000);
nv_wo32(*pgpuobj, 0x14, 0x00000000);
}
return ret;
}
static int
nv50_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nouveau_dmaeng *dmaeng = (void *)engine;
struct nv50_dmaobj_priv *dmaobj;
struct nouveau_gpuobj *gpuobj;
int ret;
ret = nouveau_dmaobj_create(parent, engine, oclass,
data, size, &dmaobj);
*pobject = nv_object(dmaobj);
if (ret)
return ret;
switch (nv_mclass(parent)) {
case 0x506f:
case 0x826f:
ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
nouveau_object_ref(NULL, pobject);
*pobject = nv_object(gpuobj);
break;
default:
break;
}
return ret;
}
static struct nouveau_ofuncs
nv50_dmaobj_ofuncs = {
.ctor = nv50_dmaobj_ctor,
.dtor = _nouveau_dmaobj_dtor,
.init = _nouveau_dmaobj_init,
.fini = _nouveau_dmaobj_fini,
};
static struct nouveau_oclass
nv50_dmaobj_sclass[] = {
{ 0x0002, &nv50_dmaobj_ofuncs },
{ 0x0003, &nv50_dmaobj_ofuncs },
{ 0x003d, &nv50_dmaobj_ofuncs },
{}
};
static int
nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv50_dmaeng_priv *priv;
int ret;
ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
priv->base.base.sclass = nv50_dmaobj_sclass;
priv->base.bind = nv50_dmaobj_bind;
return 0;
}
struct nouveau_oclass
nv50_dmaeng_oclass = {
.handle = NV_ENGINE(DMAOBJ, 0x50),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_dmaeng_ctor,
.dtor = _nouveau_dmaeng_dtor,
.init = _nouveau_dmaeng_init,
.fini = _nouveau_dmaeng_fini,
},
};

View File

@ -0,0 +1,99 @@
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/gpuobj.h>
#include <subdev/fb.h>
#include <engine/dmaobj.h>
struct nvc0_dmaeng_priv {
struct nouveau_dmaeng base;
};
struct nvc0_dmaobj_priv {
struct nouveau_dmaobj base;
};
static int
nvc0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nvc0_dmaobj_priv *dmaobj;
int ret;
ret = nouveau_dmaobj_create(parent, engine, oclass, data, size, &dmaobj);
*pobject = nv_object(dmaobj);
if (ret)
return ret;
if (dmaobj->base.target != NV_MEM_TARGET_VM || dmaobj->base.start)
return -EINVAL;
return 0;
}
static struct nouveau_ofuncs
nvc0_dmaobj_ofuncs = {
.ctor = nvc0_dmaobj_ctor,
.dtor = _nouveau_dmaobj_dtor,
.init = _nouveau_dmaobj_init,
.fini = _nouveau_dmaobj_fini,
};
static struct nouveau_oclass
nvc0_dmaobj_sclass[] = {
{ 0x0002, &nvc0_dmaobj_ofuncs },
{ 0x0003, &nvc0_dmaobj_ofuncs },
{ 0x003d, &nvc0_dmaobj_ofuncs },
{}
};
static int
nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nvc0_dmaeng_priv *priv;
int ret;
ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
priv->base.base.sclass = nvc0_dmaobj_sclass;
return 0;
}
struct nouveau_oclass
nvc0_dmaeng_oclass = {
.handle = NV_ENGINE(DMAOBJ, 0xc0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_dmaeng_ctor,
.dtor = _nouveau_dmaeng_dtor,
.init = _nouveau_dmaeng_init,
.fini = _nouveau_dmaeng_fini,
},
};

View File

@ -0,0 +1,165 @@
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/object.h>
#include <core/handle.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
int
nouveau_fifo_channel_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass,
int bar, u32 addr, u32 size, u32 pushbuf,
u32 engmask, int len, void **ptr)
{
struct nouveau_device *device = nv_device(engine);
struct nouveau_fifo *priv = (void *)engine;
struct nouveau_fifo_chan *chan;
struct nouveau_dmaeng *dmaeng;
unsigned long flags;
int ret;
/* create base object class */
ret = nouveau_namedb_create_(parent, engine, oclass, 0, NULL,
engmask, len, ptr);
chan = *ptr;
if (ret)
return ret;
/* validate dma object representing push buffer */
chan->pushdma = (void *)nouveau_handle_ref(parent, pushbuf);
if (!chan->pushdma)
return -ENOENT;
dmaeng = (void *)chan->pushdma->base.engine;
switch (chan->pushdma->base.oclass->handle) {
case 0x0002:
case 0x003d:
break;
default:
return -EINVAL;
}
if (dmaeng->bind) {
ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu);
if (ret)
return ret;
}
/* find a free fifo channel */
spin_lock_irqsave(&priv->lock, flags);
for (chan->chid = priv->min; chan->chid < priv->max; chan->chid++) {
if (!priv->channel[chan->chid]) {
priv->channel[chan->chid] = nv_object(chan);
break;
}
}
spin_unlock_irqrestore(&priv->lock, flags);
if (chan->chid == priv->max) {
nv_error(priv, "no free channels\n");
return -ENOSPC;
}
/* map fifo control registers */
chan->user = ioremap(pci_resource_start(device->pdev, bar) + addr +
(chan->chid * size), size);
if (!chan->user)
return -EFAULT;
chan->size = size;
return 0;
}
void
nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *chan)
{
struct nouveau_fifo *priv = (void *)nv_object(chan)->engine;
unsigned long flags;
iounmap(chan->user);
spin_lock_irqsave(&priv->lock, flags);
priv->channel[chan->chid] = NULL;
spin_unlock_irqrestore(&priv->lock, flags);
nouveau_gpuobj_ref(NULL, &chan->pushgpu);
nouveau_object_ref(NULL, (struct nouveau_object **)&chan->pushdma);
nouveau_namedb_destroy(&chan->base);
}
void
_nouveau_fifo_channel_dtor(struct nouveau_object *object)
{
struct nouveau_fifo_chan *chan = (void *)object;
nouveau_fifo_channel_destroy(chan);
}
u32
_nouveau_fifo_channel_rd32(struct nouveau_object *object, u32 addr)
{
struct nouveau_fifo_chan *chan = (void *)object;
return ioread32_native(chan->user + addr);
}
void
_nouveau_fifo_channel_wr32(struct nouveau_object *object, u32 addr, u32 data)
{
struct nouveau_fifo_chan *chan = (void *)object;
iowrite32_native(data, chan->user + addr);
}
void
nouveau_fifo_destroy(struct nouveau_fifo *priv)
{
kfree(priv->channel);
nouveau_engine_destroy(&priv->base);
}
int
nouveau_fifo_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass,
int min, int max, int length, void **pobject)
{
struct nouveau_fifo *priv;
int ret;
ret = nouveau_engine_create_(parent, engine, oclass, true, "PFIFO",
"fifo", length, pobject);
priv = *pobject;
if (ret)
return ret;
priv->min = min;
priv->max = max;
priv->channel = kzalloc(sizeof(*priv->channel) * (max + 1), GFP_KERNEL);
if (!priv->channel)
return -ENOMEM;
spin_lock_init(&priv->lock);
return 0;
}

View File

@ -1,44 +1,45 @@
/*
* Copyright (C) 2012 Ben Skeggs.
* All Rights Reserved.
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#include <engine/fifo.h>
#include "nouveau_util.h"
#include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
#include <core/namedb.h>
#include <core/handle.h>
#include <core/ramht.h>
#include "nouveau_software.h"
static struct ramfc_desc {
unsigned bits:6;
unsigned ctxs:5;
unsigned ctxp:8;
unsigned regs:5;
unsigned regp;
} nv04_ramfc[] = {
#include <subdev/instmem.h>
#include <subdev/instmem/nv04.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
#include <engine/fifo.h>
#include "nv04.h"
static struct ramfc_desc
nv04_ramfc[] = {
{ 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
{ 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
{ 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
@ -50,259 +51,284 @@ static struct ramfc_desc {
{}
};
struct nv04_fifo_priv {
struct nouveau_fifo_priv base;
struct ramfc_desc *ramfc_desc;
struct nouveau_gpuobj *ramro;
struct nouveau_gpuobj *ramfc;
};
/*******************************************************************************
* FIFO channel objects
******************************************************************************/
struct nv04_fifo_chan {
struct nouveau_fifo_chan base;
u32 ramfc;
};
bool
nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
int
nv04_fifo_object_attach(struct nouveau_object *parent,
struct nouveau_object *object, u32 handle)
{
int pull = nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 1, enable);
struct nv04_fifo_priv *priv = (void *)parent->engine;
struct nv04_fifo_chan *chan = (void *)parent;
u32 context, chid = chan->base.chid;
int ret;
if (!enable) {
/* In some cases the PFIFO puller may be left in an
* inconsistent state if you try to stop it when it's
* busy translating handles. Sometimes you get a
* PFIFO_CACHE_ERROR, sometimes it just fails silently
* sending incorrect instance offsets to PGRAPH after
* it's started up again. To avoid the latter we
* invalidate the most recently calculated instance.
*/
if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0,
NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0))
NV_ERROR(dev, "Timeout idling the PFIFO puller.\n");
if (nv_iclass(object, NV_GPUOBJ_CLASS))
context = nv_gpuobj(object)->addr >> 4;
else
context = 0x00000004; /* just non-zero */
if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) &
NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
nv_wr32(dev, NV03_PFIFO_INTR_0,
NV_PFIFO_INTR_CACHE_ERROR);
nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
switch (nv_engidx(object->engine)) {
case NVDEV_ENGINE_DMAOBJ:
case NVDEV_ENGINE_SW:
context |= 0x00000000;
break;
case NVDEV_ENGINE_GR:
context |= 0x00010000;
break;
case NVDEV_ENGINE_MPEG:
context |= 0x00020000;
break;
default:
return -EINVAL;
}
return pull & 1;
context |= 0x80000000; /* valid */
context |= chid << 24;
mutex_lock(&nv_subdev(priv)->mutex);
ret = nouveau_ramht_insert(priv->ramht, chid, handle, context);
mutex_unlock(&nv_subdev(priv)->mutex);
return ret;
}
void
nv04_fifo_object_detach(struct nouveau_object *parent, int cookie)
{
struct nv04_fifo_priv *priv = (void *)parent->engine;
mutex_lock(&nv_subdev(priv)->mutex);
nouveau_ramht_remove(priv->ramht, cookie);
mutex_unlock(&nv_subdev(priv)->mutex);
}
static int
nv04_fifo_context_new(struct nouveau_channel *chan, int engine)
nv04_fifo_chan_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv04_fifo_priv *priv = nv_engine(dev, engine);
struct nv04_fifo_chan *fctx;
unsigned long flags;
struct nv04_fifo_priv *priv = (void *)engine;
struct nv04_fifo_chan *chan;
struct nv_channel_dma_class *args = data;
int ret;
fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
return -ENOMEM;
if (size < sizeof(*args))
return -EINVAL;
fctx->ramfc = chan->id * 32;
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
0x10000, args->pushbuf,
(1 << NVDEV_ENGINE_DMAOBJ) |
(1 << NVDEV_ENGINE_SW) |
(1 << NVDEV_ENGINE_GR), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
/* map channel control registers */
chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
NV03_USER(chan->id), PAGE_SIZE);
if (!chan->user) {
ret = -ENOMEM;
goto error;
}
nv_parent(chan)->object_attach = nv04_fifo_object_attach;
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
chan->ramfc = chan->base.chid * 32;
/* initialise default fifo context */
nv_wo32(priv->ramfc, fctx->ramfc + 0x00, chan->pushbuf_base);
nv_wo32(priv->ramfc, fctx->ramfc + 0x04, chan->pushbuf_base);
nv_wo32(priv->ramfc, fctx->ramfc + 0x08, chan->pushbuf->addr >> 4);
nv_wo32(priv->ramfc, fctx->ramfc + 0x10,
nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
nv_wo32(priv->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4);
nv_wo32(priv->ramfc, chan->ramfc + 0x10,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
/* enable dma mode on the channel */
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
error:
if (ret)
priv->base.base.context_del(chan, engine);
return ret;
return 0;
}
void
nv04_fifo_context_del(struct nouveau_channel *chan, int engine)
nv04_fifo_chan_dtor(struct nouveau_object *object)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv04_fifo_priv *priv = nv_engine(chan->dev, engine);
struct nv04_fifo_chan *fctx = chan->engctx[engine];
struct nv04_fifo_priv *priv = (void *)object->engine;
struct nv04_fifo_chan *chan = (void *)object;
struct ramfc_desc *c = priv->ramfc_desc;
unsigned long flags;
int chid;
/* prevent fifo context switches */
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
nv_wr32(dev, NV03_PFIFO_CACHES, 0);
do {
nv_wo32(priv->ramfc, chan->ramfc + c->ctxp, 0x00000000);
} while ((++c)->bits);
/* if this channel is active, replace it with a null context */
chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels;
if (chid == chan->id) {
nv_mask(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
do {
u32 mask = ((1ULL << c->bits) - 1) << c->regs;
nv_mask(dev, c->regp, mask, 0x00000000);
nv_wo32(priv->ramfc, fctx->ramfc + c->ctxp, 0x00000000);
} while ((++c)->bits);
nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
}
/* restore normal operation, after disabling dma mode */
nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0);
nv_wr32(dev, NV03_PFIFO_CACHES, 1);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
/* clean up */
nouveau_gpuobj_ref(NULL, &chan->ramfc); /*XXX: nv40 */
if (chan->user) {
iounmap(chan->user);
chan->user = NULL;
}
nouveau_fifo_channel_destroy(&chan->base);
}
int
nv04_fifo_init(struct drm_device *dev, int engine)
nv04_fifo_chan_init(struct nouveau_object *object)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv04_fifo_priv *priv = nv_engine(dev, engine);
int i;
struct nv04_fifo_priv *priv = (void *)object->engine;
struct nv04_fifo_chan *chan = (void *)object;
u32 mask = 1 << chan->base.chid;
unsigned long flags;
int ret;
nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff);
nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
((dev_priv->ramht->bits - 9) << 16) |
(dev_priv->ramht->gpuobj->addr >> 8));
nv_wr32(dev, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
nv_wr32(dev, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
nv_wr32(dev, NV03_PFIFO_CACHES, 1);
for (i = 0; i < priv->base.channels; i++) {
if (dev_priv->channels.ptr[i])
nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
}
ret = nouveau_fifo_channel_init(&chan->base);
if (ret)
return ret;
spin_lock_irqsave(&priv->base.lock, flags);
nv_mask(priv, NV04_PFIFO_MODE, mask, mask);
spin_unlock_irqrestore(&priv->base.lock, flags);
return 0;
}
int
nv04_fifo_fini(struct drm_device *dev, int engine, bool suspend)
nv04_fifo_chan_fini(struct nouveau_object *object, bool suspend)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv04_fifo_priv *priv = nv_engine(dev, engine);
struct nouveau_channel *chan;
int chid;
struct nv04_fifo_priv *priv = (void *)object->engine;
struct nv04_fifo_chan *chan = (void *)object;
struct nouveau_gpuobj *fctx = priv->ramfc;
struct ramfc_desc *c;
unsigned long flags;
u32 data = chan->ramfc;
u32 chid;
/* prevent context switches and halt fifo operation */
nv_wr32(dev, NV03_PFIFO_CACHES, 0);
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 0);
/* prevent fifo context switches */
spin_lock_irqsave(&priv->base.lock, flags);
nv_wr32(priv, NV03_PFIFO_CACHES, 0);
/* store current fifo context in ramfc */
chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels;
chan = dev_priv->channels.ptr[chid];
if (suspend && chid != priv->base.channels && chan) {
struct nv04_fifo_chan *fctx = chan->engctx[engine];
struct nouveau_gpuobj *ctx = priv->ramfc;
struct ramfc_desc *c = priv->ramfc_desc;
/* if this channel is active, replace it with a null context */
chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
if (chid == chan->base.chid) {
nv_mask(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 0);
nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
c = priv->ramfc_desc;
do {
u32 rm = ((1ULL << c->bits) - 1) << c->regs;
u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
u32 rv = (nv_rd32(dev, c->regp) & rm) >> c->regs;
u32 cv = (nv_ro32(ctx, c->ctxp + fctx->ramfc) & ~cm);
nv_wo32(ctx, c->ctxp + fctx->ramfc, cv | (rv << c->ctxs));
u32 rv = (nv_rd32(priv, c->regp) & rm) >> c->regs;
u32 cv = (nv_ro32(fctx, c->ctxp + data) & ~cm);
nv_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
} while ((++c)->bits);
c = priv->ramfc_desc;
do {
nv_wr32(priv, c->regp, 0x00000000);
} while ((++c)->bits);
nv_wr32(priv, NV03_PFIFO_CACHE1_GET, 0);
nv_wr32(priv, NV03_PFIFO_CACHE1_PUT, 0);
nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
}
nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0x00000000);
/* restore normal operation, after disabling dma mode */
nv_mask(priv, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
nv_wr32(priv, NV03_PFIFO_CACHES, 1);
spin_unlock_irqrestore(&priv->base.lock, flags);
return nouveau_fifo_channel_fini(&chan->base, suspend);
}
static struct nouveau_ofuncs
nv04_fifo_ofuncs = {
.ctor = nv04_fifo_chan_ctor,
.dtor = nv04_fifo_chan_dtor,
.init = nv04_fifo_chan_init,
.fini = nv04_fifo_chan_fini,
.rd32 = _nouveau_fifo_channel_rd32,
.wr32 = _nouveau_fifo_channel_wr32,
};
static struct nouveau_oclass
nv04_fifo_sclass[] = {
{ 0x006e, &nv04_fifo_ofuncs },
{}
};
/*******************************************************************************
* FIFO context - basically just the instmem reserved for the channel
******************************************************************************/
int
nv04_fifo_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv04_fifo_base *base;
int ret;
ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
0x1000, NVOBJ_FLAG_HEAP, &base);
*pobject = nv_object(base);
if (ret)
return ret;
return 0;
}
static bool
nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
static struct nouveau_oclass
nv04_fifo_cclass = {
.handle = NV_ENGCTX(FIFO, 0x04),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv04_fifo_context_ctor,
.dtor = _nouveau_fifo_context_dtor,
.init = _nouveau_fifo_context_init,
.fini = _nouveau_fifo_context_fini,
.rd32 = _nouveau_fifo_context_rd32,
.wr32 = _nouveau_fifo_context_wr32,
},
};
/*******************************************************************************
* PFIFO engine
******************************************************************************/
void
nv04_fifo_pause(struct nouveau_fifo *pfifo, unsigned long *pflags)
__acquires(priv->base.lock)
{
struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = NULL;
struct nouveau_gpuobj *obj;
struct nv04_fifo_priv *priv = (void *)pfifo;
unsigned long flags;
const int subc = (addr >> 13) & 0x7;
const int mthd = addr & 0x1ffc;
bool handled = false;
u32 engine;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
if (likely(chid >= 0 && chid < pfifo->channels))
chan = dev_priv->channels.ptr[chid];
if (unlikely(!chan))
goto out;
spin_lock_irqsave(&priv->base.lock, flags);
*pflags = flags;
switch (mthd) {
case 0x0000: /* bind object to subchannel */
obj = nouveau_ramht_find(chan, data);
if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
break;
nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000000);
nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000);
engine = 0x0000000f << (subc * 4);
/* in some cases the puller may be left in an inconsistent state
* if you try to stop it while it's busy translating handles.
* sometimes you get a CACHE_ERROR, sometimes it just fails
* silently; sending incorrect instance offsets to PGRAPH after
* it's started up again.
*
* to avoid this, we invalidate the most recently calculated
* instance.
*/
if (!nv_wait(priv, NV04_PFIFO_CACHE1_PULL0,
NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0x00000000))
nv_warn(priv, "timeout idling puller\n");
nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
handled = true;
break;
default:
engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE);
if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
break;
if (nv_rd32(priv, NV04_PFIFO_CACHE1_PULL0) &
NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
if (!nouveau_gpuobj_mthd_call(chan, nouveau_software_class(dev),
mthd, data))
handled = true;
break;
}
out:
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
return handled;
nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0x00000000);
}
static const char *nv_dma_state_err(u32 state)
void
nv04_fifo_start(struct nouveau_fifo *pfifo, unsigned long *pflags)
__releases(priv->base.lock)
{
struct nv04_fifo_priv *priv = (void *)pfifo;
unsigned long flags = *pflags;
nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000001);
spin_unlock_irqrestore(&priv->base.lock, flags);
}
static const char *
nv_dma_state_err(u32 state)
{
static const char * const desc[] = {
"NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
@ -311,22 +337,74 @@ static const char *nv_dma_state_err(u32 state)
return desc[(state >> 29) & 0x7];
}
void
nv04_fifo_isr(struct drm_device *dev)
static bool
nv04_fifo_swmthd(struct nv04_fifo_priv *priv, u32 chid, u32 addr, u32 data)
{
struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv04_fifo_chan *chan = NULL;
struct nouveau_handle *bind;
const int subc = (addr >> 13) & 0x7;
const int mthd = addr & 0x1ffc;
bool handled = false;
unsigned long flags;
u32 engine;
spin_lock_irqsave(&priv->base.lock, flags);
if (likely(chid >= priv->base.min && chid <= priv->base.max))
chan = (void *)priv->base.channel[chid];
if (unlikely(!chan))
goto out;
switch (mthd) {
case 0x0000:
bind = nouveau_namedb_get(nv_namedb(chan), data);
if (unlikely(!bind))
break;
if (nv_engidx(bind->object->engine) == NVDEV_ENGINE_SW) {
engine = 0x0000000f << (subc * 4);
chan->subc[subc] = data;
handled = true;
nv_mask(priv, NV04_PFIFO_CACHE1_ENGINE, engine, 0);
}
nouveau_namedb_put(bind);
break;
default:
engine = nv_rd32(priv, NV04_PFIFO_CACHE1_ENGINE);
if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
break;
bind = nouveau_namedb_get(nv_namedb(chan), chan->subc[subc]);
if (likely(bind)) {
if (!nv_call(bind->object, mthd, data))
handled = true;
nouveau_namedb_put(bind);
}
break;
}
out:
spin_unlock_irqrestore(&priv->base.lock, flags);
return handled;
}
void
nv04_fifo_intr(struct nouveau_subdev *subdev)
{
struct nouveau_device *device = nv_device(subdev);
struct nv04_fifo_priv *priv = (void *)subdev;
uint32_t status, reassign;
int cnt = 0;
reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1;
while ((status = nv_rd32(priv, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
uint32_t chid, get;
nv_wr32(dev, NV03_PFIFO_CACHES, 0);
nv_wr32(priv, NV03_PFIFO_CACHES, 0);
chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & pfifo->channels;
get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET);
if (status & NV_PFIFO_INTR_CACHE_ERROR) {
uint32_t mthd, data;
@ -340,86 +418,85 @@ nv04_fifo_isr(struct drm_device *dev)
*/
ptr = (get & 0x7ff) >> 2;
if (dev_priv->card_type < NV_40) {
mthd = nv_rd32(dev,
if (device->card_type < NV_40) {
mthd = nv_rd32(priv,
NV04_PFIFO_CACHE1_METHOD(ptr));
data = nv_rd32(dev,
data = nv_rd32(priv,
NV04_PFIFO_CACHE1_DATA(ptr));
} else {
mthd = nv_rd32(dev,
mthd = nv_rd32(priv,
NV40_PFIFO_CACHE1_METHOD(ptr));
data = nv_rd32(dev,
data = nv_rd32(priv,
NV40_PFIFO_CACHE1_DATA(ptr));
}
if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) {
NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
"Mthd 0x%04x Data 0x%08x\n",
if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
nv_info(priv, "CACHE_ERROR - Ch %d/%d "
"Mthd 0x%04x Data 0x%08x\n",
chid, (mthd >> 13) & 7, mthd & 0x1ffc,
data);
}
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
nv_wr32(dev, NV03_PFIFO_INTR_0,
nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
nv_wr32(priv, NV03_PFIFO_INTR_0,
NV_PFIFO_INTR_CACHE_ERROR);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) & ~1);
nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) | 1);
nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0);
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH,
nv_rd32(priv, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
status &= ~NV_PFIFO_INTR_CACHE_ERROR;
}
if (status & NV_PFIFO_INTR_DMA_PUSHER) {
u32 dma_get = nv_rd32(dev, 0x003244);
u32 dma_put = nv_rd32(dev, 0x003240);
u32 push = nv_rd32(dev, 0x003220);
u32 state = nv_rd32(dev, 0x003228);
u32 dma_get = nv_rd32(priv, 0x003244);
u32 dma_put = nv_rd32(priv, 0x003240);
u32 push = nv_rd32(priv, 0x003220);
u32 state = nv_rd32(priv, 0x003228);
if (dev_priv->card_type == NV_50) {
u32 ho_get = nv_rd32(dev, 0x003328);
u32 ho_put = nv_rd32(dev, 0x003320);
u32 ib_get = nv_rd32(dev, 0x003334);
u32 ib_put = nv_rd32(dev, 0x003330);
if (device->card_type == NV_50) {
u32 ho_get = nv_rd32(priv, 0x003328);
u32 ho_put = nv_rd32(priv, 0x003320);
u32 ib_get = nv_rd32(priv, 0x003334);
u32 ib_put = nv_rd32(priv, 0x003330);
if (nouveau_ratelimit())
NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
"Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
"State 0x%08x (err: %s) Push 0x%08x\n",
chid, ho_get, dma_get, ho_put,
dma_put, ib_get, ib_put, state,
nv_dma_state_err(state),
push);
nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x "
"Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
"State 0x%08x (err: %s) Push 0x%08x\n",
chid, ho_get, dma_get, ho_put,
dma_put, ib_get, ib_put, state,
nv_dma_state_err(state),
push);
/* METHOD_COUNT, in DMA_STATE on earlier chipsets */
nv_wr32(dev, 0x003364, 0x00000000);
nv_wr32(priv, 0x003364, 0x00000000);
if (dma_get != dma_put || ho_get != ho_put) {
nv_wr32(dev, 0x003244, dma_put);
nv_wr32(dev, 0x003328, ho_put);
nv_wr32(priv, 0x003244, dma_put);
nv_wr32(priv, 0x003328, ho_put);
} else
if (ib_get != ib_put) {
nv_wr32(dev, 0x003334, ib_put);
nv_wr32(priv, 0x003334, ib_put);
}
} else {
NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%08x "
"Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
chid, dma_get, dma_put, state,
nv_dma_state_err(state), push);
if (dma_get != dma_put)
nv_wr32(dev, 0x003244, dma_put);
nv_wr32(priv, 0x003244, dma_put);
}
nv_wr32(dev, 0x003228, 0x00000000);
nv_wr32(dev, 0x003220, 0x00000001);
nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
nv_wr32(priv, 0x003228, 0x00000000);
nv_wr32(priv, 0x003220, 0x00000001);
nv_wr32(priv, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
status &= ~NV_PFIFO_INTR_DMA_PUSHER;
}
@ -427,81 +504,118 @@ nv04_fifo_isr(struct drm_device *dev)
uint32_t sem;
status &= ~NV_PFIFO_INTR_SEMAPHORE;
nv_wr32(dev, NV03_PFIFO_INTR_0,
nv_wr32(priv, NV03_PFIFO_INTR_0,
NV_PFIFO_INTR_SEMAPHORE);
sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE);
nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
}
if (dev_priv->card_type == NV_50) {
if (device->card_type == NV_50) {
if (status & 0x00000010) {
nv50_fb_vm_trap(dev, nouveau_ratelimit());
nv50_fb_trap(nouveau_fb(priv), 1);
status &= ~0x00000010;
nv_wr32(dev, 0x002100, 0x00000010);
nv_wr32(priv, 0x002100, 0x00000010);
}
}
if (status) {
if (nouveau_ratelimit())
NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
status, chid);
nv_wr32(dev, NV03_PFIFO_INTR_0, status);
nv_info(priv, "unknown intr 0x%08x, ch %d\n",
status, chid);
nv_wr32(priv, NV03_PFIFO_INTR_0, status);
status = 0;
}
nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
nv_wr32(priv, NV03_PFIFO_CACHES, reassign);
}
if (status) {
NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
nv_wr32(dev, 0x2140, 0);
nv_wr32(dev, 0x140, 0);
nv_info(priv, "still angry after %d spins, halt\n", cnt);
nv_wr32(priv, 0x002140, 0);
nv_wr32(priv, 0x000140, 0);
}
nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
nv_wr32(priv, 0x000100, 0x00000100);
}
static int
nv04_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv04_instmem_priv *imem = nv04_instmem(parent);
struct nv04_fifo_priv *priv;
int ret;
ret = nouveau_fifo_create(parent, engine, oclass, 0, 15, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nouveau_ramht_ref(imem->ramht, &priv->ramht);
nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
nv_subdev(priv)->unit = 0x00000100;
nv_subdev(priv)->intr = nv04_fifo_intr;
nv_engine(priv)->cclass = &nv04_fifo_cclass;
nv_engine(priv)->sclass = nv04_fifo_sclass;
priv->base.pause = nv04_fifo_pause;
priv->base.start = nv04_fifo_start;
priv->ramfc_desc = nv04_ramfc;
return 0;
}
void
nv04_fifo_destroy(struct drm_device *dev, int engine)
nv04_fifo_dtor(struct nouveau_object *object)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv04_fifo_priv *priv = nv_engine(dev, engine);
nouveau_irq_unregister(dev, 8);
struct nv04_fifo_priv *priv = (void *)object;
nouveau_gpuobj_ref(NULL, &priv->ramfc);
nouveau_gpuobj_ref(NULL, &priv->ramro);
dev_priv->eng[engine] = NULL;
kfree(priv);
nouveau_ramht_ref(NULL, &priv->ramht);
nouveau_fifo_destroy(&priv->base);
}
int
nv04_fifo_create(struct drm_device *dev)
nv04_fifo_init(struct nouveau_object *object)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv04_fifo_priv *priv;
struct nv04_fifo_priv *priv = (void *)object;
int ret;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
ret = nouveau_fifo_init(&priv->base);
if (ret)
return ret;
nouveau_gpuobj_ref(nvimem_ramro(dev), &priv->ramro);
nouveau_gpuobj_ref(nvimem_ramfc(dev), &priv->ramfc);
nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff);
nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
priv->base.base.destroy = nv04_fifo_destroy;
priv->base.base.init = nv04_fifo_init;
priv->base.base.fini = nv04_fifo_fini;
priv->base.base.context_new = nv04_fifo_context_new;
priv->base.base.context_del = nv04_fifo_context_del;
priv->base.channels = 15;
priv->ramfc_desc = nv04_ramfc;
dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
((priv->ramht->bits - 9) << 16) |
(priv->ramht->base.addr >> 8));
nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8);
nouveau_irq_register(dev, 8, nv04_fifo_isr);
nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
nv_wr32(priv, NV03_PFIFO_CACHES, 1);
return 0;
}
struct nouveau_oclass
nv04_fifo_oclass = {
.handle = NV_ENGINE(FIFO, 0x04),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv04_fifo_ctor,
.dtor = nv04_fifo_dtor,
.init = nv04_fifo_init,
.fini = _nouveau_fifo_fini,
},
};

View File

@ -0,0 +1,178 @@
#ifndef __NV04_FIFO_H__
#define __NV04_FIFO_H__
#include <engine/fifo.h>
#define NV04_PFIFO_DELAY_0 0x00002040
#define NV04_PFIFO_DMA_TIMESLICE 0x00002044
#define NV04_PFIFO_NEXT_CHANNEL 0x00002050
#define NV03_PFIFO_INTR_0 0x00002100
#define NV03_PFIFO_INTR_EN_0 0x00002140
# define NV_PFIFO_INTR_CACHE_ERROR (1<<0)
# define NV_PFIFO_INTR_RUNOUT (1<<4)
# define NV_PFIFO_INTR_RUNOUT_OVERFLOW (1<<8)
# define NV_PFIFO_INTR_DMA_PUSHER (1<<12)
# define NV_PFIFO_INTR_DMA_PT (1<<16)
# define NV_PFIFO_INTR_SEMAPHORE (1<<20)
# define NV_PFIFO_INTR_ACQUIRE_TIMEOUT (1<<24)
#define NV03_PFIFO_RAMHT 0x00002210
#define NV03_PFIFO_RAMFC 0x00002214
#define NV03_PFIFO_RAMRO 0x00002218
#define NV40_PFIFO_RAMFC 0x00002220
#define NV03_PFIFO_CACHES 0x00002500
#define NV04_PFIFO_MODE 0x00002504
#define NV04_PFIFO_DMA 0x00002508
#define NV04_PFIFO_SIZE 0x0000250c
#define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4)
#define NV50_PFIFO_CTX_TABLE__SIZE 128
#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED (1<<31)
#define NV50_PFIFO_CTX_TABLE_UNK30_BAD (1<<30)
#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80 0x0FFFFFFF
#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84 0x00FFFFFF
#define NV03_PFIFO_CACHE0_PUSH0 0x00003000
#define NV03_PFIFO_CACHE0_PULL0 0x00003040
#define NV04_PFIFO_CACHE0_PULL0 0x00003050
#define NV04_PFIFO_CACHE0_PULL1 0x00003054
#define NV03_PFIFO_CACHE1_PUSH0 0x00003200
#define NV03_PFIFO_CACHE1_PUSH1 0x00003204
#define NV03_PFIFO_CACHE1_PUSH1_DMA (1<<8)
#define NV40_PFIFO_CACHE1_PUSH1_DMA (1<<16)
#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000000f
#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000001f
#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000007f
#define NV03_PFIFO_CACHE1_PUT 0x00003210
#define NV04_PFIFO_CACHE1_DMA_PUSH 0x00003220
#define NV04_PFIFO_CACHE1_DMA_FETCH 0x00003224
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES 0x00000000
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES 0x00000008
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES 0x00000010
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES 0x00000018
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES 0x00000020
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES 0x00000028
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES 0x00000030
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES 0x00000038
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES 0x00000040
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES 0x00000048
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES 0x00000050
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES 0x00000058
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES 0x00000060
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES 0x00000068
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES 0x00000070
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES 0x00000078
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES 0x00000080
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES 0x00000088
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES 0x00000090
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES 0x00000098
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES 0x000000A0
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES 0x000000A8
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES 0x000000B0
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES 0x000000B8
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES 0x000000C0
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES 0x000000C8
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES 0x000000D0
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES 0x000000D8
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES 0x000000E0
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES 0x000000E8
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES 0x000000F0
# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES 0x000000F8
# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE 0x0000E000
# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES 0x00000000
# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES 0x00002000
# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES 0x00004000
# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES 0x00006000
# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES 0x00008000
# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES 0x0000A000
# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES 0x0000C000
# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES 0x0000E000
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS 0x001F0000
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0 0x00000000
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1 0x00010000
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2 0x00020000
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3 0x00030000
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 0x00040000
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5 0x00050000
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6 0x00060000
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7 0x00070000
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 0x00080000
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9 0x00090000
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10 0x000A0000
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11 0x000B0000
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12 0x000C0000
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13 0x000D0000
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14 0x000E0000
# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15 0x000F0000
# define NV_PFIFO_CACHE1_ENDIAN 0x80000000
# define NV_PFIFO_CACHE1_LITTLE_ENDIAN 0x7FFFFFFF
# define NV_PFIFO_CACHE1_BIG_ENDIAN 0x80000000
#define NV04_PFIFO_CACHE1_DMA_STATE 0x00003228
#define NV04_PFIFO_CACHE1_DMA_INSTANCE 0x0000322c
#define NV04_PFIFO_CACHE1_DMA_CTL 0x00003230
#define NV04_PFIFO_CACHE1_DMA_PUT 0x00003240
#define NV04_PFIFO_CACHE1_DMA_GET 0x00003244
#define NV10_PFIFO_CACHE1_REF_CNT 0x00003248
#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C
#define NV03_PFIFO_CACHE1_PULL0 0x00003240
#define NV04_PFIFO_CACHE1_PULL0 0x00003250
# define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED 0x00000010
# define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY 0x00001000
#define NV03_PFIFO_CACHE1_PULL1 0x00003250
#define NV04_PFIFO_CACHE1_PULL1 0x00003254
#define NV04_PFIFO_CACHE1_HASH 0x00003258
#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT 0x00003260
#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP 0x00003264
#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE 0x00003268
#define NV10_PFIFO_CACHE1_SEMAPHORE 0x0000326C
#define NV03_PFIFO_CACHE1_GET 0x00003270
#define NV04_PFIFO_CACHE1_ENGINE 0x00003280
#define NV04_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0
#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0
#define NV40_PFIFO_UNK32E4 0x000032E4
#define NV04_PFIFO_CACHE1_METHOD(i) (0x00003800+(i*8))
#define NV04_PFIFO_CACHE1_DATA(i) (0x00003804+(i*8))
#define NV40_PFIFO_CACHE1_METHOD(i) (0x00090000+(i*8))
#define NV40_PFIFO_CACHE1_DATA(i) (0x00090004+(i*8))
struct ramfc_desc {
unsigned bits:6;
unsigned ctxs:5;
unsigned ctxp:8;
unsigned regs:5;
unsigned regp;
};
struct nv04_fifo_priv {
struct nouveau_fifo base;
struct ramfc_desc *ramfc_desc;
struct nouveau_ramht *ramht;
struct nouveau_gpuobj *ramro;
struct nouveau_gpuobj *ramfc;
};
struct nv04_fifo_base {
struct nouveau_fifo_base base;
};
struct nv04_fifo_chan {
struct nouveau_fifo_chan base;
u32 subc[8];
u32 ramfc;
};
int nv04_fifo_object_attach(struct nouveau_object *,
struct nouveau_object *, u32);
void nv04_fifo_object_detach(struct nouveau_object *, int);
void nv04_fifo_chan_dtor(struct nouveau_object *);
int nv04_fifo_chan_init(struct nouveau_object *);
int nv04_fifo_chan_fini(struct nouveau_object *, bool suspend);
int nv04_fifo_context_ctor(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, void *, u32,
struct nouveau_object **);
void nv04_fifo_dtor(struct nouveau_object *);
int nv04_fifo_init(struct nouveau_object *);
void nv04_fifo_pause(struct nouveau_fifo *, unsigned long *);
void nv04_fifo_start(struct nouveau_fifo *, unsigned long *);
#endif

View File

@ -1,43 +1,42 @@
/*
* Copyright (C) 2012 Ben Skeggs.
* All Rights Reserved.
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#include <engine/fifo.h>
#include "nouveau_util.h"
#include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
#include <core/ramht.h>
static struct ramfc_desc {
unsigned bits:6;
unsigned ctxs:5;
unsigned ctxp:8;
unsigned regs:5;
unsigned regp;
} nv10_ramfc[] = {
#include <subdev/instmem.h>
#include <subdev/instmem/nv04.h>
#include <subdev/fb.h>
#include <engine/fifo.h>
#include "nv04.h"
static struct ramfc_desc
nv10_ramfc[] = {
{ 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
{ 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
{ 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
@ -50,87 +49,122 @@ static struct ramfc_desc {
{}
};
struct nv10_fifo_priv {
struct nouveau_fifo_priv base;
struct ramfc_desc *ramfc_desc;
struct nouveau_gpuobj *ramro;
struct nouveau_gpuobj *ramfc;
};
struct nv10_fifo_chan {
struct nouveau_fifo_chan base;
u32 ramfc;
};
/*******************************************************************************
* FIFO channel objects
******************************************************************************/
static int
nv10_fifo_context_new(struct nouveau_channel *chan, int engine)
nv10_fifo_chan_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv10_fifo_priv *priv = nv_engine(dev, engine);
struct nv10_fifo_chan *fctx;
unsigned long flags;
struct nv04_fifo_priv *priv = (void *)engine;
struct nv04_fifo_chan *chan;
struct nv_channel_dma_class *args = data;
int ret;
fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
return -ENOMEM;
if (size < sizeof(*args))
return -EINVAL;
fctx->ramfc = chan->id * 32;
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
0x10000, args->pushbuf,
(1 << NVDEV_ENGINE_DMAOBJ) |
(1 << NVDEV_ENGINE_SW) |
(1 << NVDEV_ENGINE_GR), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
/* map channel control registers */
chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
NV03_USER(chan->id), PAGE_SIZE);
if (!chan->user) {
ret = -ENOMEM;
goto error;
}
nv_parent(chan)->object_attach = nv04_fifo_object_attach;
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
chan->ramfc = chan->base.chid * 32;
/* initialise default fifo context */
nv_wo32(priv->ramfc, fctx->ramfc + 0x00, chan->pushbuf_base);
nv_wo32(priv->ramfc, fctx->ramfc + 0x04, chan->pushbuf_base);
nv_wo32(priv->ramfc, fctx->ramfc + 0x0c, chan->pushbuf->addr >> 4);
nv_wo32(priv->ramfc, fctx->ramfc + 0x14,
nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
nv_wo32(priv->ramfc, chan->ramfc + 0x14,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
/* enable dma mode on the channel */
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
error:
if (ret)
priv->base.base.context_del(chan, engine);
return ret;
}
int
nv10_fifo_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv10_fifo_priv *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
nouveau_gpuobj_ref(nvimem_ramro(dev), &priv->ramro);
nouveau_gpuobj_ref(nvimem_ramfc(dev), &priv->ramfc);
priv->base.base.destroy = nv04_fifo_destroy;
priv->base.base.init = nv04_fifo_init;
priv->base.base.fini = nv04_fifo_fini;
priv->base.base.context_new = nv10_fifo_context_new;
priv->base.base.context_del = nv04_fifo_context_del;
priv->base.channels = 31;
priv->ramfc_desc = nv10_ramfc;
dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
nouveau_irq_register(dev, 8, nv04_fifo_isr);
return 0;
}
static struct nouveau_ofuncs
nv10_fifo_ofuncs = {
.ctor = nv10_fifo_chan_ctor,
.dtor = nv04_fifo_chan_dtor,
.init = nv04_fifo_chan_init,
.fini = nv04_fifo_chan_fini,
.rd32 = _nouveau_fifo_channel_rd32,
.wr32 = _nouveau_fifo_channel_wr32,
};
static struct nouveau_oclass
nv10_fifo_sclass[] = {
{ 0x006e, &nv10_fifo_ofuncs },
{}
};
/*******************************************************************************
* FIFO context - basically just the instmem reserved for the channel
******************************************************************************/
static struct nouveau_oclass
nv10_fifo_cclass = {
.handle = NV_ENGCTX(FIFO, 0x10),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv04_fifo_context_ctor,
.dtor = _nouveau_fifo_context_dtor,
.init = _nouveau_fifo_context_init,
.fini = _nouveau_fifo_context_fini,
.rd32 = _nouveau_fifo_context_rd32,
.wr32 = _nouveau_fifo_context_wr32,
},
};
/*******************************************************************************
* PFIFO engine
******************************************************************************/
static int
nv10_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv04_instmem_priv *imem = nv04_instmem(parent);
struct nv04_fifo_priv *priv;
int ret;
ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nouveau_ramht_ref(imem->ramht, &priv->ramht);
nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
nv_subdev(priv)->unit = 0x00000100;
nv_subdev(priv)->intr = nv04_fifo_intr;
nv_engine(priv)->cclass = &nv10_fifo_cclass;
nv_engine(priv)->sclass = nv10_fifo_sclass;
priv->base.pause = nv04_fifo_pause;
priv->base.start = nv04_fifo_start;
priv->ramfc_desc = nv10_ramfc;
return 0;
}
struct nouveau_oclass
nv10_fifo_oclass = {
.handle = NV_ENGINE(FIFO, 0x10),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv10_fifo_ctor,
.dtor = nv04_fifo_dtor,
.init = nv04_fifo_init,
.fini = _nouveau_fifo_fini,
},
};

View File

@ -1,43 +1,42 @@
/*
* Copyright (C) 2012 Ben Skeggs.
* All Rights Reserved.
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#include <engine/fifo.h>
#include "nouveau_util.h"
#include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
#include <core/ramht.h>
static struct ramfc_desc {
unsigned bits:6;
unsigned ctxs:5;
unsigned ctxp:8;
unsigned regs:5;
unsigned regp;
} nv17_ramfc[] = {
#include <subdev/instmem.h>
#include <subdev/instmem/nv04.h>
#include <subdev/fb.h>
#include <engine/fifo.h>
#include "nv04.h"
static struct ramfc_desc
nv17_ramfc[] = {
{ 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
{ 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
{ 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
@ -55,124 +54,154 @@ static struct ramfc_desc {
{}
};
struct nv17_fifo_priv {
struct nouveau_fifo_priv base;
struct ramfc_desc *ramfc_desc;
struct nouveau_gpuobj *ramro;
struct nouveau_gpuobj *ramfc;
};
struct nv17_fifo_chan {
struct nouveau_fifo_chan base;
u32 ramfc;
};
/*******************************************************************************
* FIFO channel objects
******************************************************************************/
static int
nv17_fifo_context_new(struct nouveau_channel *chan, int engine)
nv17_fifo_chan_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv17_fifo_priv *priv = nv_engine(dev, engine);
struct nv17_fifo_chan *fctx;
unsigned long flags;
struct nv04_fifo_priv *priv = (void *)engine;
struct nv04_fifo_chan *chan;
struct nv_channel_dma_class *args = data;
int ret;
fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
return -ENOMEM;
if (size < sizeof(*args))
return -EINVAL;
fctx->ramfc = chan->id * 64;
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
0x10000, args->pushbuf,
(1 << NVDEV_ENGINE_DMAOBJ) |
(1 << NVDEV_ENGINE_SW) |
(1 << NVDEV_ENGINE_GR) |
(1 << NVDEV_ENGINE_MPEG), /* NV31- */
&chan);
*pobject = nv_object(chan);
if (ret)
return ret;
/* map channel control registers */
chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
NV03_USER(chan->id), PAGE_SIZE);
if (!chan->user) {
ret = -ENOMEM;
goto error;
}
nv_parent(chan)->object_attach = nv04_fifo_object_attach;
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
chan->ramfc = chan->base.chid * 64;
/* initialise default fifo context */
nv_wo32(priv->ramfc, fctx->ramfc + 0x00, chan->pushbuf_base);
nv_wo32(priv->ramfc, fctx->ramfc + 0x04, chan->pushbuf_base);
nv_wo32(priv->ramfc, fctx->ramfc + 0x0c, chan->pushbuf->addr >> 4);
nv_wo32(priv->ramfc, fctx->ramfc + 0x14,
nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
nv_wo32(priv->ramfc, chan->ramfc + 0x14,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
return 0;
}
/* enable dma mode on the channel */
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
static struct nouveau_ofuncs
nv17_fifo_ofuncs = {
.ctor = nv17_fifo_chan_ctor,
.dtor = nv04_fifo_chan_dtor,
.init = nv04_fifo_chan_init,
.fini = nv04_fifo_chan_fini,
.rd32 = _nouveau_fifo_channel_rd32,
.wr32 = _nouveau_fifo_channel_wr32,
};
error:
static struct nouveau_oclass
nv17_fifo_sclass[] = {
{ 0x006e, &nv17_fifo_ofuncs },
{}
};
/*******************************************************************************
* FIFO context - basically just the instmem reserved for the channel
******************************************************************************/
static struct nouveau_oclass
nv17_fifo_cclass = {
.handle = NV_ENGCTX(FIFO, 0x17),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv04_fifo_context_ctor,
.dtor = _nouveau_fifo_context_dtor,
.init = _nouveau_fifo_context_init,
.fini = _nouveau_fifo_context_fini,
.rd32 = _nouveau_fifo_context_rd32,
.wr32 = _nouveau_fifo_context_wr32,
},
};
/*******************************************************************************
* PFIFO engine
******************************************************************************/
static int
nv17_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv04_instmem_priv *imem = nv04_instmem(parent);
struct nv04_fifo_priv *priv;
int ret;
ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv);
*pobject = nv_object(priv);
if (ret)
priv->base.base.context_del(chan, engine);
return ret;
return ret;
nouveau_ramht_ref(imem->ramht, &priv->ramht);
nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
nv_subdev(priv)->unit = 0x00000100;
nv_subdev(priv)->intr = nv04_fifo_intr;
nv_engine(priv)->cclass = &nv17_fifo_cclass;
nv_engine(priv)->sclass = nv17_fifo_sclass;
priv->base.pause = nv04_fifo_pause;
priv->base.start = nv04_fifo_start;
priv->ramfc_desc = nv17_ramfc;
return 0;
}
static int
nv17_fifo_init(struct drm_device *dev, int engine)
nv17_fifo_init(struct nouveau_object *object)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv17_fifo_priv *priv = nv_engine(dev, engine);
int i;
struct nv04_fifo_priv *priv = (void *)object;
int ret;
nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
ret = nouveau_fifo_init(&priv->base);
if (ret)
return ret;
nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff);
nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff);
nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
((dev_priv->ramht->bits - 9) << 16) |
(dev_priv->ramht->gpuobj->addr >> 8));
nv_wr32(dev, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
nv_wr32(dev, NV03_PFIFO_RAMFC, 0x00010000 |
priv->ramfc->addr >> 8);
nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
((priv->ramht->bits - 9) << 16) |
(priv->ramht->base.addr >> 8));
nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8 | 0x00010000);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
nv_wr32(dev, NV03_PFIFO_CACHES, 1);
for (i = 0; i < priv->base.channels; i++) {
if (dev_priv->channels.ptr[i])
nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
}
nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
nv_wr32(priv, NV03_PFIFO_CACHES, 1);
return 0;
}
int
nv17_fifo_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv17_fifo_priv *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
nouveau_gpuobj_ref(nvimem_ramro(dev), &priv->ramro);
nouveau_gpuobj_ref(nvimem_ramfc(dev), &priv->ramfc);
priv->base.base.destroy = nv04_fifo_destroy;
priv->base.base.init = nv17_fifo_init;
priv->base.base.fini = nv04_fifo_fini;
priv->base.base.context_new = nv17_fifo_context_new;
priv->base.base.context_del = nv04_fifo_context_del;
priv->base.channels = 31;
priv->ramfc_desc = nv17_ramfc;
dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
nouveau_irq_register(dev, 8, nv04_fifo_isr);
return 0;
}
struct nouveau_oclass
nv17_fifo_oclass = {
.handle = NV_ENGINE(FIFO, 0x17),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv17_fifo_ctor,
.dtor = nv04_fifo_dtor,
.init = nv17_fifo_init,
.fini = _nouveau_fifo_fini,
},
};

View File

@ -1,43 +1,42 @@
/*
* Copyright (C) 2012 Ben Skeggs.
* All Rights Reserved.
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#include <engine/fifo.h>
#include "nouveau_util.h"
#include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
#include <core/ramht.h>
static struct ramfc_desc {
unsigned bits:6;
unsigned ctxs:5;
unsigned ctxp:8;
unsigned regs:5;
unsigned regp;
} nv40_ramfc[] = {
#include <subdev/instmem.h>
#include <subdev/instmem/nv04.h>
#include <subdev/fb.h>
#include <engine/fifo.h>
#include "nv04.h"
static struct ramfc_desc
nv40_ramfc[] = {
{ 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
{ 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
{ 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
@ -63,148 +62,287 @@ static struct ramfc_desc {
{}
};
struct nv40_fifo_priv {
struct nouveau_fifo_priv base;
struct ramfc_desc *ramfc_desc;
struct nouveau_gpuobj *ramro;
struct nouveau_gpuobj *ramfc;
};
struct nv40_fifo_chan {
struct nouveau_fifo_chan base;
u32 ramfc;
};
/*******************************************************************************
* FIFO channel objects
******************************************************************************/
static int
nv40_fifo_context_new(struct nouveau_channel *chan, int engine)
nv40_fifo_object_attach(struct nouveau_object *parent,
struct nouveau_object *object, u32 handle)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv40_fifo_priv *priv = nv_engine(dev, engine);
struct nv40_fifo_chan *fctx;
unsigned long flags;
struct nv04_fifo_priv *priv = (void *)parent->engine;
struct nv04_fifo_chan *chan = (void *)parent;
u32 context, chid = chan->base.chid;
int ret;
fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
return -ENOMEM;
if (nv_iclass(object, NV_GPUOBJ_CLASS))
context = nv_gpuobj(object)->addr >> 4;
else
context = 0x00000004; /* just non-zero */
fctx->ramfc = chan->id * 128;
/* map channel control registers */
chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
NV03_USER(chan->id), PAGE_SIZE);
if (!chan->user) {
ret = -ENOMEM;
goto error;
switch (nv_engidx(object->engine)) {
case NVDEV_ENGINE_DMAOBJ:
case NVDEV_ENGINE_SW:
context |= 0x00000000;
break;
case NVDEV_ENGINE_GR:
context |= 0x00100000;
break;
case NVDEV_ENGINE_MPEG:
context |= 0x00200000;
break;
default:
return -EINVAL;
}
/* initialise default fifo context */
nv_wo32(priv->ramfc, fctx->ramfc + 0x00, chan->pushbuf_base);
nv_wo32(priv->ramfc, fctx->ramfc + 0x04, chan->pushbuf_base);
nv_wo32(priv->ramfc, fctx->ramfc + 0x0c, chan->pushbuf->addr >> 4);
nv_wo32(priv->ramfc, fctx->ramfc + 0x18, 0x30000000 |
context |= chid << 23;
mutex_lock(&nv_subdev(priv)->mutex);
ret = nouveau_ramht_insert(priv->ramht, chid, handle, context);
mutex_unlock(&nv_subdev(priv)->mutex);
return ret;
}
static int
nv40_fifo_context_attach(struct nouveau_object *parent,
struct nouveau_object *engctx)
{
struct nv04_fifo_priv *priv = (void *)parent->engine;
struct nv04_fifo_chan *chan = (void *)parent;
unsigned long flags;
u32 reg, ctx;
switch (nv_engidx(engctx->engine)) {
case NVDEV_ENGINE_SW:
return 0;
case NVDEV_ENGINE_GR:
reg = 0x32e0;
ctx = 0x38;
break;
case NVDEV_ENGINE_MPEG:
reg = 0x330c;
ctx = 0x54;
break;
default:
return -EINVAL;
}
spin_lock_irqsave(&priv->base.lock, flags);
nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
nv_wr32(priv, reg, nv_gpuobj(engctx)->addr >> 4);
nv_wo32(priv->ramfc, chan->ramfc + ctx, nv_gpuobj(engctx)->addr >> 4);
nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&priv->base.lock, flags);
return 0;
}
static int
nv40_fifo_context_detach(struct nouveau_object *parent, bool suspend,
struct nouveau_object *engctx)
{
struct nv04_fifo_priv *priv = (void *)parent->engine;
struct nv04_fifo_chan *chan = (void *)parent;
unsigned long flags;
u32 reg, ctx;
switch (nv_engidx(engctx->engine)) {
case NVDEV_ENGINE_SW:
return 0;
case NVDEV_ENGINE_GR:
reg = 0x32e0;
ctx = 0x38;
break;
case NVDEV_ENGINE_MPEG:
reg = 0x330c;
ctx = 0x54;
break;
default:
return -EINVAL;
}
spin_lock_irqsave(&priv->base.lock, flags);
nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
nv_wr32(priv, reg, 0x00000000);
nv_wo32(priv->ramfc, chan->ramfc + ctx, 0x00000000);
nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&priv->base.lock, flags);
return 0;
}
static int
nv40_fifo_chan_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv04_fifo_priv *priv = (void *)engine;
struct nv04_fifo_chan *chan;
struct nv_channel_dma_class *args = data;
int ret;
if (size < sizeof(*args))
return -EINVAL;
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
0x1000, args->pushbuf,
(1 << NVDEV_ENGINE_DMAOBJ) |
(1 << NVDEV_ENGINE_SW) |
(1 << NVDEV_ENGINE_GR) |
(1 << NVDEV_ENGINE_MPEG), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
nv_parent(chan)->context_attach = nv40_fifo_context_attach;
nv_parent(chan)->context_detach = nv40_fifo_context_detach;
nv_parent(chan)->object_attach = nv40_fifo_object_attach;
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
chan->ramfc = chan->base.chid * 128;
nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
nv_wo32(priv->ramfc, chan->ramfc + 0x18, 0x30000000 |
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
nv_wo32(priv->ramfc, fctx->ramfc + 0x3c, 0x0001ffff);
nv_wo32(priv->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
return 0;
}
/* enable dma mode on the channel */
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
static struct nouveau_ofuncs
nv40_fifo_ofuncs = {
.ctor = nv40_fifo_chan_ctor,
.dtor = nv04_fifo_chan_dtor,
.init = nv04_fifo_chan_init,
.fini = nv04_fifo_chan_fini,
.rd32 = _nouveau_fifo_channel_rd32,
.wr32 = _nouveau_fifo_channel_wr32,
};
/*XXX: remove this later, need fifo engine context commit hook */
nouveau_gpuobj_ref(priv->ramfc, &chan->ramfc);
static struct nouveau_oclass
nv40_fifo_sclass[] = {
{ 0x006e, &nv40_fifo_ofuncs },
{}
};
error:
/*******************************************************************************
* FIFO context - basically just the instmem reserved for the channel
******************************************************************************/
static struct nouveau_oclass
nv40_fifo_cclass = {
.handle = NV_ENGCTX(FIFO, 0x40),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv04_fifo_context_ctor,
.dtor = _nouveau_fifo_context_dtor,
.init = _nouveau_fifo_context_init,
.fini = _nouveau_fifo_context_fini,
.rd32 = _nouveau_fifo_context_rd32,
.wr32 = _nouveau_fifo_context_wr32,
},
};
/*******************************************************************************
* PFIFO engine
******************************************************************************/
static int
nv40_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv04_instmem_priv *imem = nv04_instmem(parent);
struct nv04_fifo_priv *priv;
int ret;
ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv);
*pobject = nv_object(priv);
if (ret)
priv->base.base.context_del(chan, engine);
return ret;
return ret;
nouveau_ramht_ref(imem->ramht, &priv->ramht);
nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
nv_subdev(priv)->unit = 0x00000100;
nv_subdev(priv)->intr = nv04_fifo_intr;
nv_engine(priv)->cclass = &nv40_fifo_cclass;
nv_engine(priv)->sclass = nv40_fifo_sclass;
priv->base.pause = nv04_fifo_pause;
priv->base.start = nv04_fifo_start;
priv->ramfc_desc = nv40_ramfc;
return 0;
}
static int
nv40_fifo_init(struct drm_device *dev, int engine)
nv40_fifo_init(struct nouveau_object *object)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv40_fifo_priv *priv = nv_engine(dev, engine);
int i;
struct nv04_fifo_priv *priv = (void *)object;
struct nouveau_fb *pfb = nouveau_fb(object);
int ret;
nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
ret = nouveau_fifo_init(&priv->base);
if (ret)
return ret;
nv_wr32(dev, 0x002040, 0x000000ff);
nv_wr32(dev, 0x002044, 0x2101ffff);
nv_wr32(dev, 0x002058, 0x00000001);
nv_wr32(priv, 0x002040, 0x000000ff);
nv_wr32(priv, 0x002044, 0x2101ffff);
nv_wr32(priv, 0x002058, 0x00000001);
nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
((dev_priv->ramht->bits - 9) << 16) |
(dev_priv->ramht->gpuobj->addr >> 8));
nv_wr32(dev, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
((priv->ramht->bits - 9) << 16) |
(priv->ramht->base.addr >> 8));
nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
switch (dev_priv->chipset) {
switch (nv_device(priv)->chipset) {
case 0x47:
case 0x49:
case 0x4b:
nv_wr32(dev, 0x002230, 0x00000001);
nv_wr32(priv, 0x002230, 0x00000001);
case 0x40:
case 0x41:
case 0x42:
case 0x43:
case 0x45:
case 0x48:
nv_wr32(dev, 0x002220, 0x00030002);
nv_wr32(priv, 0x002220, 0x00030002);
break;
default:
nv_wr32(dev, 0x002230, 0x00000000);
nv_wr32(dev, 0x002220, ((nvfb_vram_size(dev) - 512 * 1024 +
nv_wr32(priv, 0x002230, 0x00000000);
nv_wr32(priv, 0x002220, ((pfb->ram.size - 512 * 1024 +
priv->ramfc->addr) >> 16) |
0x00030000);
0x00030000);
break;
}
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
nv_wr32(dev, NV03_PFIFO_CACHES, 1);
for (i = 0; i < priv->base.channels; i++) {
if (dev_priv->channels.ptr[i])
nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
}
nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
nv_wr32(priv, NV03_PFIFO_CACHES, 1);
return 0;
}
int
nv40_fifo_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv40_fifo_priv *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
nouveau_gpuobj_ref(nvimem_ramro(dev), &priv->ramro);
nouveau_gpuobj_ref(nvimem_ramfc(dev), &priv->ramfc);
priv->base.base.destroy = nv04_fifo_destroy;
priv->base.base.init = nv40_fifo_init;
priv->base.base.fini = nv04_fifo_fini;
priv->base.base.context_new = nv40_fifo_context_new;
priv->base.base.context_del = nv04_fifo_context_del;
priv->base.channels = 31;
priv->ramfc_desc = nv40_ramfc;
dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
nouveau_irq_register(dev, 8, nv04_fifo_isr);
return 0;
}
struct nouveau_oclass
nv40_fifo_oclass = {
.handle = NV_ENGINE(FIFO, 0x40),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv40_fifo_ctor,
.dtor = nv04_fifo_dtor,
.init = nv40_fifo_init,
.fini = _nouveau_fifo_fini,
},
};

View File

@ -1,126 +1,123 @@
/*
* Copyright (C) 2012 Ben Skeggs.
* All Rights Reserved.
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#include <engine/fifo.h>
#include <core/client.h>
#include <core/engctx.h>
#include <core/ramht.h>
#include <core/class.h>
#include <core/math.h>
struct nv50_fifo_priv {
struct nouveau_fifo_priv base;
struct nouveau_gpuobj *playlist[2];
int cur_playlist;
};
#include <subdev/timer.h>
#include <subdev/bar.h>
struct nv50_fifo_chan {
struct nouveau_fifo_chan base;
};
#include <engine/dmaobj.h>
#include <engine/fifo.h>
#include "nv50.h"
/*******************************************************************************
* FIFO channel objects
******************************************************************************/
void
nv50_fifo_playlist_update(struct drm_device *dev)
nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
{
struct nv50_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct nouveau_bar *bar = nouveau_bar(priv);
struct nouveau_gpuobj *cur;
int i, p;
cur = priv->playlist[priv->cur_playlist];
priv->cur_playlist = !priv->cur_playlist;
for (i = 0, p = 0; i < priv->base.channels; i++) {
if (nv_rd32(dev, 0x002600 + (i * 4)) & 0x80000000)
for (i = priv->base.min, p = 0; i < priv->base.max; i++) {
if (nv_rd32(priv, 0x002600 + (i * 4)) & 0x80000000)
nv_wo32(cur, p++ * 4, i);
}
nvimem_flush(dev);
bar->flush(bar);
nv_wr32(dev, 0x0032f4, cur->addr >> 12);
nv_wr32(dev, 0x0032ec, p);
nv_wr32(dev, 0x002500, 0x00000101);
nv_wr32(priv, 0x0032f4, cur->addr >> 12);
nv_wr32(priv, 0x0032ec, p);
nv_wr32(priv, 0x002500, 0x00000101);
}
static int
nv50_fifo_context_new(struct nouveau_channel *chan, int engine)
nv50_fifo_context_attach(struct nouveau_object *parent,
struct nouveau_object *object)
{
struct nv50_fifo_priv *priv = nv_engine(chan->dev, engine);
struct nv50_fifo_chan *fctx;
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
u64 instance = chan->ramin->addr >> 12;
unsigned long flags;
int ret = 0, i;
struct nouveau_bar *bar = nouveau_bar(parent);
struct nv50_fifo_base *base = (void *)parent->parent;
struct nouveau_gpuobj *ectx = (void *)object;
u64 limit = ectx->addr + ectx->size - 1;
u64 start = ectx->addr;
u32 addr;
fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
return -ENOMEM;
nvvm_engref(chan->vm, engine, 1);
chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
NV50_USER(chan->id), PAGE_SIZE);
if (!chan->user) {
ret = -ENOMEM;
goto error;
switch (nv_engidx(object->engine)) {
case NVDEV_ENGINE_SW : return 0;
case NVDEV_ENGINE_GR : addr = 0x0000; break;
case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
default:
return -EINVAL;
}
for (i = 0; i < 0x100; i += 4)
nv_wo32(chan->ramin, i, 0x00000000);
nv_wo32(chan->ramin, 0x3c, 0x403f6078);
nv_wo32(chan->ramin, 0x40, 0x00000000);
nv_wo32(chan->ramin, 0x44, 0x01003fff);
nv_wo32(chan->ramin, 0x48, chan->pushbuf->node->offset >> 4);
nv_wo32(chan->ramin, 0x50, lower_32_bits(ib_offset));
nv_wo32(chan->ramin, 0x54, upper_32_bits(ib_offset) |
drm_order(chan->dma.ib_max + 1) << 16);
nv_wo32(chan->ramin, 0x60, 0x7fffffff);
nv_wo32(chan->ramin, 0x78, 0x00000000);
nv_wo32(chan->ramin, 0x7c, 0x30000001);
nv_wo32(chan->ramin, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ |
(chan->ramht->gpuobj->node->offset >> 4));
nvimem_flush(dev);
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
nv50_fifo_playlist_update(dev);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
error:
if (ret)
priv->base.base.context_del(chan, engine);
return ret;
nv_wo32(base->eng, addr + 0x00, 0x00190000);
nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
upper_32_bits(start));
nv_wo32(base->eng, addr + 0x10, 0x00000000);
nv_wo32(base->eng, addr + 0x14, 0x00000000);
bar->flush(bar);
return 0;
}
static bool
nv50_fifo_kickoff(struct nouveau_channel *chan)
static int
nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
struct nouveau_object *object)
{
struct drm_device *dev = chan->dev;
bool done = true;
u32 me;
struct nouveau_bar *bar = nouveau_bar(parent);
struct nv50_fifo_priv *priv = (void *)parent->engine;
struct nv50_fifo_base *base = (void *)parent->parent;
struct nv50_fifo_chan *chan = (void *)parent;
u32 addr, me;
int ret = 0;
switch (nv_engidx(object->engine)) {
case NVDEV_ENGINE_SW : return 0;
case NVDEV_ENGINE_GR : addr = 0x0000; break;
case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
default:
return -EINVAL;
}
nv_wo32(base->eng, addr + 0x00, 0x00000000);
nv_wo32(base->eng, addr + 0x04, 0x00000000);
nv_wo32(base->eng, addr + 0x08, 0x00000000);
nv_wo32(base->eng, addr + 0x0c, 0x00000000);
nv_wo32(base->eng, addr + 0x10, 0x00000000);
nv_wo32(base->eng, addr + 0x14, 0x00000000);
bar->flush(bar);
/* HW bug workaround:
*
@ -134,159 +131,308 @@ nv50_fifo_kickoff(struct nouveau_channel *chan)
* there's also a "ignore these engines" bitmask reg we can use
* if we hit the issue there..
*/
/* PME: make sure engine is enabled */
me = nv_mask(dev, 0x00b860, 0x00000001, 0x00000001);
me = nv_mask(priv, 0x00b860, 0x00000001, 0x00000001);
/* do the kickoff... */
nv_wr32(dev, 0x0032fc, chan->ramin->addr >> 12);
if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
done = false;
nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
if (!nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff)) {
nv_error(priv, "channel %d unload timeout\n", chan->base.chid);
if (suspend)
ret = -EBUSY;
}
/* restore any engine states we changed, and exit */
nv_wr32(dev, 0x00b860, me);
return done;
}
static void
nv50_fifo_context_del(struct nouveau_channel *chan, int engine)
{
struct nv50_fifo_chan *fctx = chan->engctx[engine];
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
unsigned long flags;
/* remove channel from playlist, will context switch if active */
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
nv50_fifo_playlist_update(dev);
/* tell any engines on this channel to unload their contexts */
nv50_fifo_kickoff(chan);
nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
/* clean up */
if (chan->user) {
iounmap(chan->user);
chan->user = NULL;
}
nvvm_engref(chan->vm, engine, -1);
chan->engctx[engine] = NULL;
kfree(fctx);
nv_wr32(priv, 0x00b860, me);
return ret;
}
static int
nv50_fifo_init(struct drm_device *dev, int engine)
nv50_fifo_object_attach(struct nouveau_object *parent,
struct nouveau_object *object, u32 handle)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
u32 instance;
int i;
struct nv50_fifo_chan *chan = (void *)parent;
u32 context;
nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
nv_wr32(dev, 0x00250c, 0x6f3cfc34);
nv_wr32(dev, 0x002044, 0x01003fff);
if (nv_iclass(object, NV_GPUOBJ_CLASS))
context = nv_gpuobj(object)->node->offset >> 4;
else
context = 0x00000004; /* just non-zero */
nv_wr32(dev, 0x002100, 0xffffffff);
nv_wr32(dev, 0x002140, 0xffffffff);
for (i = 0; i < 128; i++) {
struct nouveau_channel *chan = dev_priv->channels.ptr[i];
if (chan && chan->engctx[engine])
instance = 0x80000000 | chan->ramin->addr >> 12;
else
instance = 0x00000000;
nv_wr32(dev, 0x002600 + (i * 4), instance);
switch (nv_engidx(object->engine)) {
case NVDEV_ENGINE_DMAOBJ:
case NVDEV_ENGINE_SW : context |= 0x00000000; break;
case NVDEV_ENGINE_GR : context |= 0x00100000; break;
case NVDEV_ENGINE_MPEG : context |= 0x00200000; break;
default:
return -EINVAL;
}
nv50_fifo_playlist_update(dev);
return nouveau_ramht_insert(chan->ramht, 0, handle, context);
}
nv_wr32(dev, 0x003200, 1);
nv_wr32(dev, 0x003250, 1);
nv_wr32(dev, 0x002500, 1);
return 0;
void
nv50_fifo_object_detach(struct nouveau_object *parent, int cookie)
{
struct nv50_fifo_chan *chan = (void *)parent;
nouveau_ramht_remove(chan->ramht, cookie);
}
static int
nv50_fifo_fini(struct drm_device *dev, int engine, bool suspend)
nv50_fifo_chan_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_fifo_priv *priv = nv_engine(dev, engine);
int i;
struct nv_channel_ind_class *args = data;
struct nouveau_bar *bar = nouveau_bar(parent);
struct nv50_fifo_base *base = (void *)parent;
struct nv50_fifo_chan *chan;
u64 ioffset, ilength;
int ret;
/* set playlist length to zero, fifo will unload context */
nv_wr32(dev, 0x0032ec, 0);
if (size < sizeof(*args))
return -EINVAL;
/* tell all connected engines to unload their contexts */
for (i = 0; i < priv->base.channels; i++) {
struct nouveau_channel *chan = dev_priv->channels.ptr[i];
if (chan && !nv50_fifo_kickoff(chan))
return -EBUSY;
}
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
0x2000, args->pushbuf,
(1 << NVDEV_ENGINE_DMAOBJ) |
(1 << NVDEV_ENGINE_SW) |
(1 << NVDEV_ENGINE_GR) |
(1 << NVDEV_ENGINE_MPEG), &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
nv_wr32(dev, 0x002140, 0);
nv_parent(chan)->context_attach = nv50_fifo_context_attach;
nv_parent(chan)->context_detach = nv50_fifo_context_detach;
nv_parent(chan)->object_attach = nv50_fifo_object_attach;
nv_parent(chan)->object_detach = nv50_fifo_object_detach;
ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
if (ret)
return ret;
ioffset = args->ioffset;
ilength = log2i(args->ilength / 8);
nv_wo32(base->ramfc, 0x3c, 0x403f6078);
nv_wo32(base->ramfc, 0x44, 0x01003fff);
nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
nv_wo32(base->ramfc, 0x60, 0x7fffffff);
nv_wo32(base->ramfc, 0x78, 0x00000000);
nv_wo32(base->ramfc, 0x7c, 0x30000001);
nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ |
(chan->ramht->base.node->offset >> 4));
bar->flush(bar);
return 0;
}
void
nv50_fifo_tlb_flush(struct drm_device *dev, int engine)
nv50_fifo_chan_dtor(struct nouveau_object *object)
{
nv50_vm_flush_engine(dev, 5);
struct nv50_fifo_chan *chan = (void *)object;
nouveau_ramht_ref(NULL, &chan->ramht);
nouveau_fifo_channel_destroy(&chan->base);
}
void
nv50_fifo_destroy(struct drm_device *dev, int engine)
static int
nv50_fifo_chan_init(struct nouveau_object *object)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_fifo_priv *priv = nv_engine(dev, engine);
struct nv50_fifo_priv *priv = (void *)object->engine;
struct nv50_fifo_base *base = (void *)object->parent;
struct nv50_fifo_chan *chan = (void *)object;
struct nouveau_gpuobj *ramfc = base->ramfc;
u32 chid = chan->base.chid;
int ret;
nouveau_irq_unregister(dev, 8);
ret = nouveau_fifo_channel_init(&chan->base);
if (ret)
return ret;
nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
dev_priv->eng[engine] = NULL;
kfree(priv);
nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 12);
nv50_fifo_playlist_update(priv);
return 0;
}
int
nv50_fifo_create(struct drm_device *dev)
nv50_fifo_chan_fini(struct nouveau_object *object, bool suspend)
{
struct nv50_fifo_priv *priv = (void *)object->engine;
struct nv50_fifo_chan *chan = (void *)object;
u32 chid = chan->base.chid;
/* remove channel from playlist, fifo will unload context */
nv_mask(priv, 0x002600 + (chid * 4), 0x80000000, 0x00000000);
nv50_fifo_playlist_update(priv);
nv_wr32(priv, 0x002600 + (chid * 4), 0x00000000);
return nouveau_fifo_channel_fini(&chan->base, suspend);
}
static struct nouveau_ofuncs
nv50_fifo_ofuncs = {
.ctor = nv50_fifo_chan_ctor,
.dtor = nv50_fifo_chan_dtor,
.init = nv50_fifo_chan_init,
.fini = nv50_fifo_chan_fini,
.rd32 = _nouveau_fifo_channel_rd32,
.wr32 = _nouveau_fifo_channel_wr32,
};
static struct nouveau_oclass
nv50_fifo_sclass[] = {
{ 0x506f, &nv50_fifo_ofuncs },
{}
};
/*******************************************************************************
* FIFO context - basically just the instmem reserved for the channel
******************************************************************************/
static int
nv50_fifo_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv50_fifo_base *base;
int ret;
ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
0x1000, NVOBJ_FLAG_HEAP, &base);
*pobject = nv_object(base);
if (ret)
return ret;
ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0200, 0x1000,
NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
if (ret)
return ret;
ret = nouveau_gpuobj_new(parent, nv_object(base), 0x1200, 0,
NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
if (ret)
return ret;
ret = nouveau_gpuobj_new(parent, nv_object(base), 0x4000, 0, 0,
&base->pgd);
if (ret)
return ret;
ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
if (ret)
return ret;
return 0;
}
void
nv50_fifo_context_dtor(struct nouveau_object *object)
{
struct nv50_fifo_base *base = (void *)object;
nouveau_vm_ref(NULL, &base->vm, base->pgd);
nouveau_gpuobj_ref(NULL, &base->pgd);
nouveau_gpuobj_ref(NULL, &base->eng);
nouveau_gpuobj_ref(NULL, &base->ramfc);
nouveau_gpuobj_ref(NULL, &base->cache);
nouveau_fifo_context_destroy(&base->base);
}
static struct nouveau_oclass
nv50_fifo_cclass = {
.handle = NV_ENGCTX(FIFO, 0x50),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_fifo_context_ctor,
.dtor = nv50_fifo_context_dtor,
.init = _nouveau_fifo_context_init,
.fini = _nouveau_fifo_context_fini,
.rd32 = _nouveau_fifo_context_rd32,
.wr32 = _nouveau_fifo_context_wr32,
},
};
/*******************************************************************************
* PFIFO engine
******************************************************************************/
static int
nv50_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_fifo_priv *priv;
int ret;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->base.base.destroy = nv50_fifo_destroy;
priv->base.base.init = nv50_fifo_init;
priv->base.base.fini = nv50_fifo_fini;
priv->base.base.context_new = nv50_fifo_context_new;
priv->base.base.context_del = nv50_fifo_context_del;
priv->base.base.tlb_flush = nv50_fifo_tlb_flush;
priv->base.channels = 127;
dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]);
ret = nouveau_fifo_create(parent, engine, oclass, 1, 127, &priv);
*pobject = nv_object(priv);
if (ret)
goto error;
return ret;
ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]);
ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
&priv->playlist[0]);
if (ret)
goto error;
return ret;
nouveau_irq_register(dev, 8, nv04_fifo_isr);
error:
ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
&priv->playlist[1]);
if (ret)
priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
return ret;
return ret;
nv_subdev(priv)->unit = 0x00000100;
nv_subdev(priv)->intr = nv04_fifo_intr;
nv_engine(priv)->cclass = &nv50_fifo_cclass;
nv_engine(priv)->sclass = nv50_fifo_sclass;
return 0;
}
void
nv50_fifo_dtor(struct nouveau_object *object)
{
struct nv50_fifo_priv *priv = (void *)object;
nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
nouveau_fifo_destroy(&priv->base);
}
int
nv50_fifo_init(struct nouveau_object *object)
{
struct nv50_fifo_priv *priv = (void *)object;
int ret, i;
ret = nouveau_fifo_init(&priv->base);
if (ret)
return ret;
nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
nv_wr32(priv, 0x00250c, 0x6f3cfc34);
nv_wr32(priv, 0x002044, 0x01003fff);
nv_wr32(priv, 0x002100, 0xffffffff);
nv_wr32(priv, 0x002140, 0xffffffff);
for (i = 0; i < 128; i++)
nv_wr32(priv, 0x002600 + (i * 4), 0x00000000);
nv50_fifo_playlist_update(priv);
nv_wr32(priv, 0x003200, 0x00000001);
nv_wr32(priv, 0x003250, 0x00000001);
nv_wr32(priv, 0x002500, 0x00000001);
return 0;
}
struct nouveau_oclass
nv50_fifo_oclass = {
.handle = NV_ENGINE(FIFO, 0x50),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_fifo_ctor,
.dtor = nv50_fifo_dtor,
.init = nv50_fifo_init,
.fini = _nouveau_fifo_fini,
},
};

View File

@ -0,0 +1,36 @@
#ifndef __NV50_FIFO_H__
#define __NV50_FIFO_H__
struct nv50_fifo_priv {
struct nouveau_fifo base;
struct nouveau_gpuobj *playlist[2];
int cur_playlist;
};
struct nv50_fifo_base {
struct nouveau_fifo_base base;
struct nouveau_gpuobj *ramfc;
struct nouveau_gpuobj *cache;
struct nouveau_gpuobj *eng;
struct nouveau_gpuobj *pgd;
struct nouveau_vm *vm;
};
struct nv50_fifo_chan {
struct nouveau_fifo_chan base;
u32 subc[8];
struct nouveau_ramht *ramht;
};
void nv50_fifo_playlist_update(struct nv50_fifo_priv *);
void nv50_fifo_object_detach(struct nouveau_object *, int);
void nv50_fifo_chan_dtor(struct nouveau_object *);
int nv50_fifo_chan_fini(struct nouveau_object *, bool);
void nv50_fifo_context_dtor(struct nouveau_object *);
void nv50_fifo_dtor(struct nouveau_object *);
int nv50_fifo_init(struct nouveau_object *);
#endif

View File

@ -1,249 +1,343 @@
/*
* Copyright (C) 2012 Ben Skeggs.
* All Rights Reserved.
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#include <engine/fifo.h>
#include <core/os.h>
#include <core/client.h>
#include <core/engctx.h>
#include <core/ramht.h>
#include <core/class.h>
#include <core/math.h>
struct nv84_fifo_priv {
struct nouveau_fifo_priv base;
struct nouveau_gpuobj *playlist[2];
int cur_playlist;
};
#include <subdev/timer.h>
#include <subdev/bar.h>
struct nv84_fifo_chan {
struct nouveau_fifo_chan base;
struct nouveau_gpuobj *ramfc;
struct nouveau_gpuobj *cache;
};
#include <engine/dmaobj.h>
#include <engine/fifo.h>
#include "nv50.h"
/*******************************************************************************
* FIFO channel objects
******************************************************************************/
static int
nv84_fifo_context_new(struct nouveau_channel *chan, int engine)
nv84_fifo_context_attach(struct nouveau_object *parent,
struct nouveau_object *object)
{
struct nv84_fifo_priv *priv = nv_engine(chan->dev, engine);
struct nv84_fifo_chan *fctx;
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
u64 instance;
unsigned long flags;
struct nouveau_bar *bar = nouveau_bar(parent);
struct nv50_fifo_base *base = (void *)parent->parent;
struct nouveau_gpuobj *ectx = (void *)object;
u64 limit = ectx->addr + ectx->size - 1;
u64 start = ectx->addr;
u32 addr;
switch (nv_engidx(object->engine)) {
case NVDEV_ENGINE_SW : return 0;
case NVDEV_ENGINE_GR : addr = 0x0020; break;
case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
case NVDEV_ENGINE_CRYPT: addr = 0x00a0; break;
case NVDEV_ENGINE_COPY0: addr = 0x00c0; break;
default:
return -EINVAL;
}
nv_wo32(base->eng, addr + 0x00, 0x00190000);
nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
upper_32_bits(start));
nv_wo32(base->eng, addr + 0x10, 0x00000000);
nv_wo32(base->eng, addr + 0x14, 0x00000000);
bar->flush(bar);
return 0;
}
static int
nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
struct nouveau_object *object)
{
struct nouveau_bar *bar = nouveau_bar(parent);
struct nv50_fifo_priv *priv = (void *)parent->engine;
struct nv50_fifo_base *base = (void *)parent->parent;
struct nv50_fifo_chan *chan = (void *)parent;
u32 addr;
switch (nv_engidx(object->engine)) {
case NVDEV_ENGINE_SW : return 0;
case NVDEV_ENGINE_GR : addr = 0x0020; break;
case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
case NVDEV_ENGINE_CRYPT: addr = 0x00a0; break;
case NVDEV_ENGINE_COPY0: addr = 0x00c0; break;
default:
return -EINVAL;
}
nv_wo32(base->eng, addr + 0x00, 0x00000000);
nv_wo32(base->eng, addr + 0x04, 0x00000000);
nv_wo32(base->eng, addr + 0x08, 0x00000000);
nv_wo32(base->eng, addr + 0x0c, 0x00000000);
nv_wo32(base->eng, addr + 0x10, 0x00000000);
nv_wo32(base->eng, addr + 0x14, 0x00000000);
bar->flush(bar);
nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
if (!nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff)) {
nv_error(priv, "channel %d unload timeout\n", chan->base.chid);
if (suspend)
return -EBUSY;
}
return 0;
}
static int
nv84_fifo_object_attach(struct nouveau_object *parent,
struct nouveau_object *object, u32 handle)
{
struct nv50_fifo_chan *chan = (void *)parent;
u32 context;
if (nv_iclass(object, NV_GPUOBJ_CLASS))
context = nv_gpuobj(object)->node->offset >> 4;
else
context = 0x00000004; /* just non-zero */
switch (nv_engidx(object->engine)) {
case NVDEV_ENGINE_DMAOBJ:
case NVDEV_ENGINE_SW : context |= 0x00000000; break;
case NVDEV_ENGINE_GR : context |= 0x00100000; break;
case NVDEV_ENGINE_MPEG :
case NVDEV_ENGINE_PPP : context |= 0x00200000; break;
case NVDEV_ENGINE_ME :
case NVDEV_ENGINE_COPY0 : context |= 0x00300000; break;
case NVDEV_ENGINE_VP : context |= 0x00400000; break;
case NVDEV_ENGINE_CRYPT :
case NVDEV_ENGINE_UNK1C1: context |= 0x00500000; break;
case NVDEV_ENGINE_BSP : context |= 0x00600000; break;
default:
return -EINVAL;
}
return nouveau_ramht_insert(chan->ramht, 0, handle, context);
}
static int
nv84_fifo_chan_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nouveau_bar *bar = nouveau_bar(parent);
struct nv50_fifo_base *base = (void *)parent;
struct nv50_fifo_chan *chan;
struct nv_channel_ind_class *args = data;
u64 ioffset, ilength;
int ret;
fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
return -ENOMEM;
nvvm_engref(chan->vm, engine, 1);
if (size < sizeof(*args))
return -EINVAL;
chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
NV50_USER(chan->id), PAGE_SIZE);
if (!chan->user) {
ret = -ENOMEM;
goto error;
}
ret = nouveau_gpuobj_new(dev, chan, 256, 256, NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
0x2000, args->pushbuf,
(1 << NVDEV_ENGINE_DMAOBJ) |
(1 << NVDEV_ENGINE_SW) |
(1 << NVDEV_ENGINE_GR) |
(1 << NVDEV_ENGINE_MPEG) |
(1 << NVDEV_ENGINE_ME) |
(1 << NVDEV_ENGINE_VP) |
(1 << NVDEV_ENGINE_CRYPT) |
(1 << NVDEV_ENGINE_BSP) |
(1 << NVDEV_ENGINE_PPP) |
(1 << NVDEV_ENGINE_COPY0) |
(1 << NVDEV_ENGINE_UNK1C1), &chan);
*pobject = nv_object(chan);
if (ret)
goto error;
return ret;
instance = fctx->ramfc->addr >> 8;
ret = nouveau_gpuobj_new(dev, chan, 4096, 1024, 0, &fctx->cache);
ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
if (ret)
goto error;
return ret;
nv_wo32(fctx->ramfc, 0x3c, 0x403f6078);
nv_wo32(fctx->ramfc, 0x40, 0x00000000);
nv_wo32(fctx->ramfc, 0x44, 0x01003fff);
nv_wo32(fctx->ramfc, 0x48, chan->pushbuf->node->offset >> 4);
nv_wo32(fctx->ramfc, 0x50, lower_32_bits(ib_offset));
nv_wo32(fctx->ramfc, 0x54, upper_32_bits(ib_offset) |
drm_order(chan->dma.ib_max + 1) << 16);
nv_wo32(fctx->ramfc, 0x60, 0x7fffffff);
nv_wo32(fctx->ramfc, 0x78, 0x00000000);
nv_wo32(fctx->ramfc, 0x7c, 0x30000001);
nv_wo32(fctx->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
nv_parent(chan)->context_attach = nv84_fifo_context_attach;
nv_parent(chan)->context_detach = nv84_fifo_context_detach;
nv_parent(chan)->object_attach = nv84_fifo_object_attach;
nv_parent(chan)->object_detach = nv50_fifo_object_detach;
ioffset = args->ioffset;
ilength = log2i(args->ilength / 8);
nv_wo32(base->ramfc, 0x3c, 0x403f6078);
nv_wo32(base->ramfc, 0x44, 0x01003fff);
nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
nv_wo32(base->ramfc, 0x60, 0x7fffffff);
nv_wo32(base->ramfc, 0x78, 0x00000000);
nv_wo32(base->ramfc, 0x7c, 0x30000001);
nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ |
(chan->ramht->gpuobj->node->offset >> 4));
nv_wo32(fctx->ramfc, 0x88, fctx->cache->addr >> 10);
nv_wo32(fctx->ramfc, 0x98, chan->ramin->addr >> 12);
(chan->ramht->base.node->offset >> 4));
nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
bar->flush(bar);
return 0;
}
nv_wo32(chan->ramin, 0x00, chan->id);
nv_wo32(chan->ramin, 0x04, fctx->ramfc->addr >> 8);
static int
nv84_fifo_chan_init(struct nouveau_object *object)
{
struct nv50_fifo_priv *priv = (void *)object->engine;
struct nv50_fifo_base *base = (void *)object->parent;
struct nv50_fifo_chan *chan = (void *)object;
struct nouveau_gpuobj *ramfc = base->ramfc;
u32 chid = chan->base.chid;
int ret;
nvimem_flush(dev);
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
nv50_fifo_playlist_update(dev);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
error:
ret = nouveau_fifo_channel_init(&chan->base);
if (ret)
priv->base.base.context_del(chan, engine);
return ret;
}
return ret;
static void
nv84_fifo_context_del(struct nouveau_channel *chan, int engine)
{
struct nv84_fifo_chan *fctx = chan->engctx[engine];
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
unsigned long flags;
u32 save;
/* remove channel from playlist, will context switch if active */
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
nv50_fifo_playlist_update(dev);
save = nv_mask(dev, 0x002520, 0x0000003f, 0x15);
/* tell any engines on this channel to unload their contexts */
nv_wr32(dev, 0x0032fc, chan->ramin->addr >> 12);
if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff))
NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
nv_wr32(dev, 0x002520, save);
nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
/* clean up */
if (chan->user) {
iounmap(chan->user);
chan->user = NULL;
}
nouveau_gpuobj_ref(NULL, &fctx->ramfc);
nouveau_gpuobj_ref(NULL, &fctx->cache);
nvvm_engref(chan->vm, engine, -1);
chan->engctx[engine] = NULL;
kfree(fctx);
}
static int
nv84_fifo_init(struct drm_device *dev, int engine)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv84_fifo_chan *fctx;
u32 instance;
int i;
nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
nv_wr32(dev, 0x00250c, 0x6f3cfc34);
nv_wr32(dev, 0x002044, 0x01003fff);
nv_wr32(dev, 0x002100, 0xffffffff);
nv_wr32(dev, 0x002140, 0xffffffff);
for (i = 0; i < 128; i++) {
struct nouveau_channel *chan = dev_priv->channels.ptr[i];
if (chan && (fctx = chan->engctx[engine]))
instance = 0x80000000 | fctx->ramfc->addr >> 8;
else
instance = 0x00000000;
nv_wr32(dev, 0x002600 + (i * 4), instance);
}
nv50_fifo_playlist_update(dev);
nv_wr32(dev, 0x003200, 1);
nv_wr32(dev, 0x003250, 1);
nv_wr32(dev, 0x002500, 1);
nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 8);
nv50_fifo_playlist_update(priv);
return 0;
}
static int
nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv84_fifo_priv *priv = nv_engine(dev, engine);
int i;
u32 save;
static struct nouveau_ofuncs
nv84_fifo_ofuncs = {
.ctor = nv84_fifo_chan_ctor,
.dtor = nv50_fifo_chan_dtor,
.init = nv84_fifo_chan_init,
.fini = nv50_fifo_chan_fini,
.rd32 = _nouveau_fifo_channel_rd32,
.wr32 = _nouveau_fifo_channel_wr32,
};
/* set playlist length to zero, fifo will unload context */
nv_wr32(dev, 0x0032ec, 0);
static struct nouveau_oclass
nv84_fifo_sclass[] = {
{ 0x826f, &nv84_fifo_ofuncs },
{}
};
save = nv_mask(dev, 0x002520, 0x0000003f, 0x15);
/* tell all connected engines to unload their contexts */
for (i = 0; i < priv->base.channels; i++) {
struct nouveau_channel *chan = dev_priv->channels.ptr[i];
if (chan)
nv_wr32(dev, 0x0032fc, chan->ramin->addr >> 12);
if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
NV_INFO(dev, "PFIFO: channel %d unload timeout\n", i);
return -EBUSY;
}
}
nv_wr32(dev, 0x002520, save);
nv_wr32(dev, 0x002140, 0);
return 0;
}
/*******************************************************************************
* FIFO context - basically just the instmem reserved for the channel
******************************************************************************/
int
nv84_fifo_create(struct drm_device *dev)
nv84_fifo_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv84_fifo_priv *priv;
struct nv50_fifo_base *base;
int ret;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->base.base.destroy = nv50_fifo_destroy;
priv->base.base.init = nv84_fifo_init;
priv->base.base.fini = nv84_fifo_fini;
priv->base.base.context_new = nv84_fifo_context_new;
priv->base.base.context_del = nv84_fifo_context_del;
priv->base.base.tlb_flush = nv50_fifo_tlb_flush;
priv->base.channels = 127;
dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]);
ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
0x1000, NVOBJ_FLAG_HEAP, &base);
*pobject = nv_object(base);
if (ret)
goto error;
return ret;
ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]);
ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0200, 0,
NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
if (ret)
goto error;
return ret;
nouveau_irq_register(dev, 8, nv04_fifo_isr);
error:
ret = nouveau_gpuobj_new(parent, nv_object(base), 0x4000, 0,
0, &base->pgd);
if (ret)
priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
return ret;
return ret;
ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
if (ret)
return ret;
ret = nouveau_gpuobj_new(parent, nv_object(base), 0x1000, 0x400,
NVOBJ_FLAG_ZERO_ALLOC, &base->cache);
if (ret)
return ret;
ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0100, 0x100,
NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
if (ret)
return ret;
return 0;
}
static struct nouveau_oclass
nv84_fifo_cclass = {
.handle = NV_ENGCTX(FIFO, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv84_fifo_context_ctor,
.dtor = nv50_fifo_context_dtor,
.init = _nouveau_fifo_context_init,
.fini = _nouveau_fifo_context_fini,
.rd32 = _nouveau_fifo_context_rd32,
.wr32 = _nouveau_fifo_context_wr32,
},
};
/*******************************************************************************
* PFIFO engine
******************************************************************************/
static int
nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv50_fifo_priv *priv;
int ret;
ret = nouveau_fifo_create(parent, engine, oclass, 1, 127, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
&priv->playlist[0]);
if (ret)
return ret;
ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
&priv->playlist[1]);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00000100;
nv_subdev(priv)->intr = nv04_fifo_intr;
nv_engine(priv)->cclass = &nv84_fifo_cclass;
nv_engine(priv)->sclass = nv84_fifo_sclass;
return 0;
}
struct nouveau_oclass
nv84_fifo_oclass = {
.handle = NV_ENGINE(FIFO, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv84_fifo_ctor,
.dtor = nv50_fifo_dtor,
.init = nv50_fifo_init,
.fini = _nouveau_fifo_fini,
},
};

View File

@ -1,5 +1,5 @@
/*
* Copyright 2010 Red Hat Inc.
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -22,17 +22,24 @@
* Authors: Ben Skeggs
*/
#include "drmP.h"
#include <core/client.h>
#include <core/handle.h>
#include <core/namedb.h>
#include <core/gpuobj.h>
#include <core/engctx.h>
#include <core/class.h>
#include <core/math.h>
#include <core/enum.h>
#include "nouveau_drv.h"
#include <core/mm.h>
#include <subdev/timer.h>
#include <subdev/bar.h>
#include <subdev/vm.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
#include "nouveau_software.h"
static void nvc0_fifo_isr(struct drm_device *);
struct nvc0_fifo_priv {
struct nouveau_fifo_priv base;
struct nouveau_fifo base;
struct nouveau_gpuobj *playlist[2];
int cur_playlist;
struct {
@ -42,14 +49,24 @@ struct nvc0_fifo_priv {
int spoon_nr;
};
struct nvc0_fifo_base {
struct nouveau_fifo_base base;
struct nouveau_gpuobj *pgd;
struct nouveau_vm *vm;
};
struct nvc0_fifo_chan {
struct nouveau_fifo_chan base;
};
/*******************************************************************************
* FIFO channel objects
******************************************************************************/
static void
nvc0_fifo_playlist_update(struct drm_device *dev)
nvc0_fifo_playlist_update(struct nvc0_fifo_priv *priv)
{
struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct nouveau_bar *bar = nouveau_bar(priv);
struct nouveau_gpuobj *cur;
int i, p;
@ -57,174 +74,253 @@ nvc0_fifo_playlist_update(struct drm_device *dev)
priv->cur_playlist = !priv->cur_playlist;
for (i = 0, p = 0; i < 128; i++) {
if (!(nv_rd32(dev, 0x3004 + (i * 8)) & 1))
if (!(nv_rd32(priv, 0x003004 + (i * 8)) & 1))
continue;
nv_wo32(cur, p + 0, i);
nv_wo32(cur, p + 4, 0x00000004);
p += 8;
}
nvimem_flush(dev);
bar->flush(bar);
nv_wr32(dev, 0x002270, cur->addr >> 12);
nv_wr32(dev, 0x002274, 0x01f00000 | (p >> 3));
if (!nv_wait(dev, 0x00227c, 0x00100000, 0x00000000))
NV_ERROR(dev, "PFIFO - playlist update failed\n");
nv_wr32(priv, 0x002270, cur->addr >> 12);
nv_wr32(priv, 0x002274, 0x01f00000 | (p >> 3));
if (!nv_wait(priv, 0x00227c, 0x00100000, 0x00000000))
nv_error(priv, "playlist update failed\n");
}
static int
nvc0_fifo_context_new(struct nouveau_channel *chan, int engine)
nvc0_fifo_context_attach(struct nouveau_object *parent,
struct nouveau_object *object)
{
struct drm_device *dev = chan->dev;
struct nvc0_fifo_priv *priv = nv_engine(dev, engine);
struct nvc0_fifo_chan *fctx;
u64 usermem = priv->user.mem->addr + chan->id * 0x1000;
u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
int ret, i;
struct nouveau_bar *bar = nouveau_bar(parent);
struct nvc0_fifo_base *base = (void *)parent->parent;
struct nouveau_engctx *ectx = (void *)object;
u32 addr;
int ret;
fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
return -ENOMEM;
chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
priv->user.bar.offset + (chan->id * 0x1000),
PAGE_SIZE);
if (!chan->user) {
ret = -ENOMEM;
goto error;
switch (nv_engidx(object->engine)) {
case NVDEV_ENGINE_SW : return 0;
case NVDEV_ENGINE_GR : addr = 0x0210; break;
case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
default:
return -EINVAL;
}
for (i = 0; i < 0x100; i += 4)
nv_wo32(chan->ramin, i, 0x00000000);
nv_wo32(chan->ramin, 0x08, lower_32_bits(usermem));
nv_wo32(chan->ramin, 0x0c, upper_32_bits(usermem));
nv_wo32(chan->ramin, 0x10, 0x0000face);
nv_wo32(chan->ramin, 0x30, 0xfffff902);
nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt));
nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
upper_32_bits(ib_virt));
nv_wo32(chan->ramin, 0x54, 0x00000002);
nv_wo32(chan->ramin, 0x84, 0x20400000);
nv_wo32(chan->ramin, 0x94, 0x30000001);
nv_wo32(chan->ramin, 0x9c, 0x00000100);
nv_wo32(chan->ramin, 0xa4, 0x1f1f1f1f);
nv_wo32(chan->ramin, 0xa8, 0x1f1f1f1f);
nv_wo32(chan->ramin, 0xac, 0x0000001f);
nv_wo32(chan->ramin, 0xb8, 0xf8000000);
nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
nvimem_flush(dev);
if (!ectx->vma.node) {
ret = nouveau_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
NV_MEM_ACCESS_RW, &ectx->vma);
if (ret)
return ret;
}
nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 |
(chan->ramin->addr >> 12));
nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001);
nvc0_fifo_playlist_update(dev);
nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
bar->flush(bar);
return 0;
}
error:
static int
nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
struct nouveau_object *object)
{
struct nouveau_bar *bar = nouveau_bar(parent);
struct nvc0_fifo_priv *priv = (void *)parent->engine;
struct nvc0_fifo_base *base = (void *)parent->parent;
struct nvc0_fifo_chan *chan = (void *)parent;
u32 addr;
switch (nv_engidx(object->engine)) {
case NVDEV_ENGINE_SW : return 0;
case NVDEV_ENGINE_GR : addr = 0x0210; break;
case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
default:
return -EINVAL;
}
nv_wo32(base, addr + 0x00, 0x00000000);
nv_wo32(base, addr + 0x04, 0x00000000);
bar->flush(bar);
nv_wr32(priv, 0x002634, chan->base.chid);
if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
if (suspend)
return -EBUSY;
}
return 0;
}
static int
nvc0_fifo_chan_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nouveau_bar *bar = nouveau_bar(parent);
struct nvc0_fifo_priv *priv = (void *)engine;
struct nvc0_fifo_base *base = (void *)parent;
struct nvc0_fifo_chan *chan;
struct nv_channel_ind_class *args = data;
u64 usermem, ioffset, ilength;
int ret, i;
if (size < sizeof(*args))
return -EINVAL;
ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
priv->user.bar.offset, 0x1000,
args->pushbuf,
(1 << NVDEV_ENGINE_SW) |
(1 << NVDEV_ENGINE_GR) |
(1 << NVDEV_ENGINE_COPY0) |
(1 << NVDEV_ENGINE_COPY1), &chan);
*pobject = nv_object(chan);
if (ret)
priv->base.base.context_del(chan, engine);
return ret;
return ret;
nv_parent(chan)->context_attach = nvc0_fifo_context_attach;
nv_parent(chan)->context_detach = nvc0_fifo_context_detach;
usermem = chan->base.chid * 0x1000;
ioffset = args->ioffset;
ilength = log2i(args->ilength / 8);
for (i = 0; i < 0x1000; i += 4)
nv_wo32(priv->user.mem, usermem + i, 0x00000000);
nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
nv_wo32(base, 0x10, 0x0000face);
nv_wo32(base, 0x30, 0xfffff902);
nv_wo32(base, 0x48, lower_32_bits(ioffset));
nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
nv_wo32(base, 0x54, 0x00000002);
nv_wo32(base, 0x84, 0x20400000);
nv_wo32(base, 0x94, 0x30000001);
nv_wo32(base, 0x9c, 0x00000100);
nv_wo32(base, 0xa4, 0x1f1f1f1f);
nv_wo32(base, 0xa8, 0x1f1f1f1f);
nv_wo32(base, 0xac, 0x0000001f);
nv_wo32(base, 0xb8, 0xf8000000);
nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
bar->flush(bar);
return 0;
}
static int
nvc0_fifo_chan_init(struct nouveau_object *object)
{
struct nouveau_gpuobj *base = nv_gpuobj(object->parent);
struct nvc0_fifo_priv *priv = (void *)object->engine;
struct nvc0_fifo_chan *chan = (void *)object;
u32 chid = chan->base.chid;
int ret;
ret = nouveau_fifo_channel_init(&chan->base);
if (ret)
return ret;
nv_wr32(priv, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12);
nv_wr32(priv, 0x003004 + (chid * 8), 0x001f0001);
nvc0_fifo_playlist_update(priv);
return 0;
}
static int
nvc0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
{
struct nvc0_fifo_priv *priv = (void *)object->engine;
struct nvc0_fifo_chan *chan = (void *)object;
u32 chid = chan->base.chid;
nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
nvc0_fifo_playlist_update(priv);
nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000);
return nouveau_fifo_channel_fini(&chan->base, suspend);
}
static struct nouveau_ofuncs
nvc0_fifo_ofuncs = {
.ctor = nvc0_fifo_chan_ctor,
.dtor = _nouveau_fifo_channel_dtor,
.init = nvc0_fifo_chan_init,
.fini = nvc0_fifo_chan_fini,
.rd32 = _nouveau_fifo_channel_rd32,
.wr32 = _nouveau_fifo_channel_wr32,
};
static struct nouveau_oclass
nvc0_fifo_sclass[] = {
{ 0x906f, &nvc0_fifo_ofuncs },
{}
};
/*******************************************************************************
* FIFO context - instmem heap and vm setup
******************************************************************************/
static int
nvc0_fifo_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nvc0_fifo_base *base;
int ret;
ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
0x1000, NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_HEAP, &base);
*pobject = nv_object(base);
if (ret)
return ret;
ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0x1000, 0, &base->pgd);
if (ret)
return ret;
nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
nv_wo32(base, 0x0208, 0xffffffff);
nv_wo32(base, 0x020c, 0x000000ff);
ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
if (ret)
return ret;
return 0;
}
static void
nvc0_fifo_context_del(struct nouveau_channel *chan, int engine)
nvc0_fifo_context_dtor(struct nouveau_object *object)
{
struct nvc0_fifo_chan *fctx = chan->engctx[engine];
struct drm_device *dev = chan->dev;
nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000);
nv_wr32(dev, 0x002634, chan->id);
if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
nvc0_fifo_playlist_update(dev);
nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000);
if (chan->user) {
iounmap(chan->user);
chan->user = NULL;
}
chan->engctx[engine] = NULL;
kfree(fctx);
struct nvc0_fifo_base *base = (void *)object;
nouveau_vm_ref(NULL, &base->vm, base->pgd);
nouveau_gpuobj_ref(NULL, &base->pgd);
nouveau_fifo_context_destroy(&base->base);
}
static int
nvc0_fifo_init(struct drm_device *dev, int engine)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvc0_fifo_priv *priv = nv_engine(dev, engine);
struct nouveau_channel *chan;
int i;
/* reset PFIFO, enable all available PSUBFIFO areas */
nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
nv_wr32(dev, 0x000204, 0xffffffff);
nv_wr32(dev, 0x002204, 0xffffffff);
priv->spoon_nr = hweight32(nv_rd32(dev, 0x002204));
NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
/* assign engines to subfifos */
if (priv->spoon_nr >= 3) {
nv_wr32(dev, 0x002208, ~(1 << 0)); /* PGRAPH */
nv_wr32(dev, 0x00220c, ~(1 << 1)); /* PVP */
nv_wr32(dev, 0x002210, ~(1 << 1)); /* PPP */
nv_wr32(dev, 0x002214, ~(1 << 1)); /* PBSP */
nv_wr32(dev, 0x002218, ~(1 << 2)); /* PCE0 */
nv_wr32(dev, 0x00221c, ~(1 << 1)); /* PCE1 */
}
/* PSUBFIFO[n] */
for (i = 0; i < priv->spoon_nr; i++) {
nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
}
nv_mask(dev, 0x002200, 0x00000001, 0x00000001);
nv_wr32(dev, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
nv_wr32(dev, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
nv_wr32(dev, 0x002100, 0xffffffff);
nv_wr32(dev, 0x002140, 0xbfffffff);
/* restore PFIFO context table */
for (i = 0; i < 128; i++) {
chan = dev_priv->channels.ptr[i];
if (!chan || !chan->engctx[engine])
continue;
nv_wr32(dev, 0x003000 + (i * 8), 0xc0000000 |
(chan->ramin->addr >> 12));
nv_wr32(dev, 0x003004 + (i * 8), 0x001f0001);
}
nvc0_fifo_playlist_update(dev);
return 0;
}
static int
nvc0_fifo_fini(struct drm_device *dev, int engine, bool suspend)
{
int i;
for (i = 0; i < 128; i++) {
if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1))
continue;
nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000);
nv_wr32(dev, 0x002634, i);
if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
i, nv_rd32(dev, 0x002634));
return -EBUSY;
}
}
nv_wr32(dev, 0x002140, 0x00000000);
return 0;
}
static struct nouveau_oclass
nvc0_fifo_cclass = {
.handle = NV_ENGCTX(FIFO, 0xc0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_fifo_context_ctor,
.dtor = nvc0_fifo_context_dtor,
.init = _nouveau_fifo_context_init,
.fini = _nouveau_fifo_context_fini,
.rd32 = _nouveau_fifo_context_rd32,
.wr32 = _nouveau_fifo_context_wr32,
},
};
/*******************************************************************************
* PFIFO engine
******************************************************************************/
struct nouveau_enum nvc0_fifo_fault_unit[] = {
{ 0x00, "PGRAPH" },
@ -289,16 +385,16 @@ struct nouveau_bitfield nvc0_fifo_subfifo_intr[] = {
};
static void
nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
nvc0_fifo_isr_vm_fault(struct nvc0_fifo_priv *priv, int unit)
{
u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
u32 inst = nv_rd32(priv, 0x2800 + (unit * 0x10));
u32 valo = nv_rd32(priv, 0x2804 + (unit * 0x10));
u32 vahi = nv_rd32(priv, 0x2808 + (unit * 0x10));
u32 stat = nv_rd32(priv, 0x280c + (unit * 0x10));
u32 client = (stat & 0x00001f00) >> 8;
NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
(stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
nv_error(priv, "%s fault at 0x%010llx [", (stat & 0x00000080) ?
"write" : "read", (u64)vahi << 32 | valo);
nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
printk("] from ");
nouveau_enum_print(nvc0_fifo_fault_unit, unit);
@ -313,165 +409,223 @@ nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
}
static int
nvc0_fifo_page_flip(struct drm_device *dev, u32 chid)
nvc0_fifo_swmthd(struct nvc0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
{
struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = NULL;
struct nvc0_fifo_chan *chan = NULL;
struct nouveau_handle *bind;
unsigned long flags;
int ret = -EINVAL;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
if (likely(chid >= 0 && chid < priv->base.channels)) {
chan = dev_priv->channels.ptr[chid];
if (likely(chan)) {
struct nouveau_software_chan *swch =
chan->engctx[NVOBJ_ENGINE_SW];
ret = swch->flip(swch->flip_data);
}
spin_lock_irqsave(&priv->base.lock, flags);
if (likely(chid >= priv->base.min && chid <= priv->base.max))
chan = (void *)priv->base.channel[chid];
if (unlikely(!chan))
goto out;
bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
if (likely(bind)) {
if (!mthd || !nv_call(bind->object, mthd, data))
ret = 0;
nouveau_namedb_put(bind);
}
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
out:
spin_unlock_irqrestore(&priv->base.lock, flags);
return ret;
}
static void
nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
nvc0_fifo_isr_subfifo_intr(struct nvc0_fifo_priv *priv, int unit)
{
u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
u32 subc = (addr & 0x00070000);
u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0x7f;
u32 subc = (addr & 0x00070000) >> 16;
u32 mthd = (addr & 0x00003ffc);
u32 show = stat;
if (stat & 0x00200000) {
if (mthd == 0x0054) {
if (!nvc0_fifo_page_flip(dev, chid))
if (!nvc0_fifo_swmthd(priv, chid, 0x0500, 0x00000000))
show &= ~0x00200000;
}
}
if (show) {
NV_INFO(dev, "PFIFO%d:", unit);
nouveau_bitfield_print(nvc0_fifo_subfifo_intr, show);
NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
unit, chid, subc, mthd, data);
if (stat & 0x00800000) {
if (!nvc0_fifo_swmthd(priv, chid, mthd, data))
show &= ~0x00800000;
}
nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
if (show) {
nv_error(priv, "SUBFIFO%d:", unit);
nouveau_bitfield_print(nvc0_fifo_subfifo_intr, show);
printk("\n");
nv_error(priv, "SUBFIFO%d: ch %d subc %d mthd 0x%04x "
"data 0x%08x\n",
unit, chid, subc, mthd, data);
}
nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
}
static void
nvc0_fifo_isr(struct drm_device *dev)
nvc0_fifo_intr(struct nouveau_subdev *subdev)
{
u32 mask = nv_rd32(dev, 0x002140);
u32 stat = nv_rd32(dev, 0x002100) & mask;
struct nvc0_fifo_priv *priv = (void *)subdev;
u32 mask = nv_rd32(priv, 0x002140);
u32 stat = nv_rd32(priv, 0x002100) & mask;
if (stat & 0x00000100) {
NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
nv_wr32(dev, 0x002100, 0x00000100);
nv_info(priv, "unknown status 0x00000100\n");
nv_wr32(priv, 0x002100, 0x00000100);
stat &= ~0x00000100;
}
if (stat & 0x10000000) {
u32 units = nv_rd32(dev, 0x00259c);
u32 units = nv_rd32(priv, 0x00259c);
u32 u = units;
while (u) {
int i = ffs(u) - 1;
nvc0_fifo_isr_vm_fault(dev, i);
nvc0_fifo_isr_vm_fault(priv, i);
u &= ~(1 << i);
}
nv_wr32(dev, 0x00259c, units);
nv_wr32(priv, 0x00259c, units);
stat &= ~0x10000000;
}
if (stat & 0x20000000) {
u32 units = nv_rd32(dev, 0x0025a0);
u32 units = nv_rd32(priv, 0x0025a0);
u32 u = units;
while (u) {
int i = ffs(u) - 1;
nvc0_fifo_isr_subfifo_intr(dev, i);
nvc0_fifo_isr_subfifo_intr(priv, i);
u &= ~(1 << i);
}
nv_wr32(dev, 0x0025a0, units);
nv_wr32(priv, 0x0025a0, units);
stat &= ~0x20000000;
}
if (stat & 0x40000000) {
NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
nv_warn(priv, "unknown status 0x40000000\n");
nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
stat &= ~0x40000000;
}
if (stat) {
NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
nv_wr32(dev, 0x002100, stat);
nv_wr32(dev, 0x002140, 0);
nv_fatal(priv, "unhandled status 0x%08x\n", stat);
nv_wr32(priv, 0x002100, stat);
nv_wr32(priv, 0x002140, 0);
}
}
static void
nvc0_fifo_destroy(struct drm_device *dev, int engine)
static int
nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
nouveau_gpuobj_unmap(&priv->user.bar);
nouveau_gpuobj_ref(NULL, &priv->user.mem);
nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
dev_priv->eng[engine] = NULL;
kfree(priv);
}
int
nvc0_fifo_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvc0_fifo_priv *priv;
int ret;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->base.base.destroy = nvc0_fifo_destroy;
priv->base.base.init = nvc0_fifo_init;
priv->base.base.fini = nvc0_fifo_fini;
priv->base.base.context_new = nvc0_fifo_context_new;
priv->base.base.context_del = nvc0_fifo_context_del;
priv->base.channels = 128;
dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[0]);
ret = nouveau_fifo_create(parent, engine, oclass, 0, 127, &priv);
*pobject = nv_object(priv);
if (ret)
goto error;
return ret;
ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[1]);
ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x1000, 0,
&priv->playlist[0]);
if (ret)
goto error;
return ret;
ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4096, 0x1000,
NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x1000, 0,
&priv->playlist[1]);
if (ret)
goto error;
return ret;
ret = nouveau_gpuobj_map_bar(priv->user.mem, NV_MEM_ACCESS_RW,
&priv->user.bar);
ret = nouveau_gpuobj_new(parent, NULL, 128 * 0x1000, 0x1000, 0,
&priv->user.mem);
if (ret)
goto error;
return ret;
nouveau_irq_register(dev, 8, nvc0_fifo_isr);
error:
ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
&priv->user.bar);
if (ret)
priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
return ret;
return ret;
nv_subdev(priv)->unit = 0x00000100;
nv_subdev(priv)->intr = nvc0_fifo_intr;
nv_engine(priv)->cclass = &nvc0_fifo_cclass;
nv_engine(priv)->sclass = nvc0_fifo_sclass;
return 0;
}
static void
nvc0_fifo_dtor(struct nouveau_object *object)
{
struct nvc0_fifo_priv *priv = (void *)object;
nouveau_gpuobj_unmap(&priv->user.bar);
nouveau_gpuobj_ref(NULL, &priv->user.mem);
nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
nouveau_fifo_destroy(&priv->base);
}
static int
nvc0_fifo_init(struct nouveau_object *object)
{
struct nvc0_fifo_priv *priv = (void *)object;
int ret, i;
ret = nouveau_fifo_init(&priv->base);
if (ret)
return ret;
nv_wr32(priv, 0x000204, 0xffffffff);
nv_wr32(priv, 0x002204, 0xffffffff);
priv->spoon_nr = hweight32(nv_rd32(priv, 0x002204));
nv_debug(priv, "%d subfifo(s)\n", priv->spoon_nr);
/* assign engines to subfifos */
if (priv->spoon_nr >= 3) {
nv_wr32(priv, 0x002208, ~(1 << 0)); /* PGRAPH */
nv_wr32(priv, 0x00220c, ~(1 << 1)); /* PVP */
nv_wr32(priv, 0x002210, ~(1 << 1)); /* PPP */
nv_wr32(priv, 0x002214, ~(1 << 1)); /* PBSP */
nv_wr32(priv, 0x002218, ~(1 << 2)); /* PCE0 */
nv_wr32(priv, 0x00221c, ~(1 << 1)); /* PCE1 */
}
/* PSUBFIFO[n] */
for (i = 0; i < priv->spoon_nr; i++) {
nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
}
nv_mask(priv, 0x002200, 0x00000001, 0x00000001);
nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
nv_wr32(priv, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
nv_wr32(priv, 0x002100, 0xffffffff);
nv_wr32(priv, 0x002140, 0xbfffffff);
return 0;
}
struct nouveau_oclass
nvc0_fifo_oclass = {
.handle = NV_ENGINE(FIFO, 0xc0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_fifo_ctor,
.dtor = nvc0_fifo_dtor,
.init = nvc0_fifo_init,
.fini = _nouveau_fifo_fini,
},
};

View File

@ -1,5 +1,5 @@
/*
* Copyright 2010 Red Hat Inc.
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -22,25 +22,30 @@
* Authors: Ben Skeggs
*/
#include "drmP.h"
#include <core/client.h>
#include <core/handle.h>
#include <core/namedb.h>
#include <core/gpuobj.h>
#include <core/engctx.h>
#include <core/class.h>
#include <core/math.h>
#include <core/enum.h>
#include "nouveau_drv.h"
#include <core/mm.h>
#include <subdev/timer.h>
#include <subdev/bar.h>
#include <subdev/vm.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
#include "nouveau_software.h"
#define NVE0_FIFO_ENGINE_NUM 32
static void nve0_fifo_isr(struct drm_device *);
struct nve0_fifo_engine {
struct nve0_fifo_engn {
struct nouveau_gpuobj *playlist[2];
int cur_playlist;
};
struct nve0_fifo_priv {
struct nouveau_fifo_priv base;
struct nve0_fifo_engine engine[NVE0_FIFO_ENGINE_NUM];
struct nouveau_fifo base;
struct nve0_fifo_engn engine[16];
struct {
struct nouveau_gpuobj *mem;
struct nouveau_vma bar;
@ -48,193 +53,285 @@ struct nve0_fifo_priv {
int spoon_nr;
};
struct nve0_fifo_base {
struct nouveau_fifo_base base;
struct nouveau_gpuobj *pgd;
struct nouveau_vm *vm;
};
struct nve0_fifo_chan {
struct nouveau_fifo_chan base;
u32 engine;
};
/*******************************************************************************
* FIFO channel objects
******************************************************************************/
static void
nve0_fifo_playlist_update(struct drm_device *dev, u32 engine)
nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
{
struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct nve0_fifo_engine *peng = &priv->engine[engine];
struct nouveau_bar *bar = nouveau_bar(priv);
struct nve0_fifo_engn *engn = &priv->engine[engine];
struct nouveau_gpuobj *cur;
u32 match = (engine << 16) | 0x00000001;
int ret, i, p;
int i, p;
cur = peng->playlist[peng->cur_playlist];
cur = engn->playlist[engn->cur_playlist];
if (unlikely(cur == NULL)) {
ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 0x1000, 0, &cur);
int ret = nouveau_gpuobj_new(nv_object(priv)->parent, NULL,
0x8000, 0x1000, 0, &cur);
if (ret) {
NV_ERROR(dev, "PFIFO: playlist alloc failed\n");
nv_error(priv, "playlist alloc failed\n");
return;
}
peng->playlist[peng->cur_playlist] = cur;
engn->playlist[engn->cur_playlist] = cur;
}
peng->cur_playlist = !peng->cur_playlist;
engn->cur_playlist = !engn->cur_playlist;
for (i = 0, p = 0; i < priv->base.channels; i++) {
u32 ctrl = nv_rd32(dev, 0x800004 + (i * 8)) & 0x001f0001;
for (i = 0, p = 0; i < priv->base.max; i++) {
u32 ctrl = nv_rd32(priv, 0x800004 + (i * 8)) & 0x001f0001;
if (ctrl != match)
continue;
nv_wo32(cur, p + 0, i);
nv_wo32(cur, p + 4, 0x00000000);
p += 8;
}
nvimem_flush(dev);
bar->flush(bar);
nv_wr32(dev, 0x002270, cur->addr >> 12);
nv_wr32(dev, 0x002274, (engine << 20) | (p >> 3));
if (!nv_wait(dev, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
NV_ERROR(dev, "PFIFO: playlist %d update timeout\n", engine);
nv_wr32(priv, 0x002270, cur->addr >> 12);
nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
nv_error(priv, "playlist %d update timeout\n", engine);
}
static int
nve0_fifo_context_new(struct nouveau_channel *chan, int engine)
nve0_fifo_context_attach(struct nouveau_object *parent,
struct nouveau_object *object)
{
struct drm_device *dev = chan->dev;
struct nve0_fifo_priv *priv = nv_engine(dev, engine);
struct nve0_fifo_chan *fctx;
u64 usermem = priv->user.mem->addr + chan->id * 512;
u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
int ret = 0, i;
struct nouveau_bar *bar = nouveau_bar(parent);
struct nve0_fifo_base *base = (void *)parent->parent;
struct nouveau_engctx *ectx = (void *)object;
u32 addr;
int ret;
fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
return -ENOMEM;
fctx->engine = 0; /* PGRAPH */
/* allocate vram for control regs, map into polling area */
chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
priv->user.bar.offset + (chan->id * 512), 512);
if (!chan->user) {
ret = -ENOMEM;
goto error;
switch (nv_engidx(object->engine)) {
case NVDEV_ENGINE_SW : return 0;
case NVDEV_ENGINE_GR : addr = 0x0210; break;
default:
return -EINVAL;
}
for (i = 0; i < 0x100; i += 4)
nv_wo32(chan->ramin, i, 0x00000000);
nv_wo32(chan->ramin, 0x08, lower_32_bits(usermem));
nv_wo32(chan->ramin, 0x0c, upper_32_bits(usermem));
nv_wo32(chan->ramin, 0x10, 0x0000face);
nv_wo32(chan->ramin, 0x30, 0xfffff902);
nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt));
nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
upper_32_bits(ib_virt));
nv_wo32(chan->ramin, 0x84, 0x20400000);
nv_wo32(chan->ramin, 0x94, 0x30000001);
nv_wo32(chan->ramin, 0x9c, 0x00000100);
nv_wo32(chan->ramin, 0xac, 0x0000001f);
nv_wo32(chan->ramin, 0xe4, 0x00000000);
nv_wo32(chan->ramin, 0xe8, chan->id);
nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
nvimem_flush(dev);
if (!ectx->vma.node) {
ret = nouveau_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
NV_MEM_ACCESS_RW, &ectx->vma);
if (ret)
return ret;
}
nv_wr32(dev, 0x800000 + (chan->id * 8), 0x80000000 |
(chan->ramin->addr >> 12));
nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
nve0_fifo_playlist_update(dev, fctx->engine);
nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
bar->flush(bar);
return 0;
}
error:
static int
nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
struct nouveau_object *object)
{
struct nouveau_bar *bar = nouveau_bar(parent);
struct nve0_fifo_priv *priv = (void *)parent->engine;
struct nve0_fifo_base *base = (void *)parent->parent;
struct nve0_fifo_chan *chan = (void *)parent;
u32 addr;
switch (nv_engidx(object->engine)) {
case NVDEV_ENGINE_SW : return 0;
case NVDEV_ENGINE_GR : addr = 0x0210; break;
default:
return -EINVAL;
}
nv_wo32(base, addr + 0x00, 0x00000000);
nv_wo32(base, addr + 0x04, 0x00000000);
bar->flush(bar);
nv_wr32(priv, 0x002634, chan->base.chid);
if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
if (suspend)
return -EBUSY;
}
return 0;
}
static int
nve0_fifo_chan_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nouveau_bar *bar = nouveau_bar(parent);
struct nve0_fifo_priv *priv = (void *)engine;
struct nve0_fifo_base *base = (void *)parent;
struct nve0_fifo_chan *chan;
struct nv_channel_ind_class *args = data;
u64 usermem, ioffset, ilength;
int ret, i;
if (size < sizeof(*args))
return -EINVAL;
ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
priv->user.bar.offset, 0x200,
args->pushbuf,
(1 << NVDEV_ENGINE_SW) |
(1 << NVDEV_ENGINE_GR), &chan);
*pobject = nv_object(chan);
if (ret)
priv->base.base.context_del(chan, engine);
return ret;
return ret;
nv_parent(chan)->context_attach = nve0_fifo_context_attach;
nv_parent(chan)->context_detach = nve0_fifo_context_detach;
usermem = chan->base.chid * 0x200;
ioffset = args->ioffset;
ilength = log2i(args->ilength / 8);
for (i = 0; i < 0x200; i += 4)
nv_wo32(priv->user.mem, usermem + i, 0x00000000);
nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
nv_wo32(base, 0x10, 0x0000face);
nv_wo32(base, 0x30, 0xfffff902);
nv_wo32(base, 0x48, lower_32_bits(ioffset));
nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
nv_wo32(base, 0x84, 0x20400000);
nv_wo32(base, 0x94, 0x30000001);
nv_wo32(base, 0x9c, 0x00000100);
nv_wo32(base, 0xac, 0x0000001f);
nv_wo32(base, 0xe8, chan->base.chid);
nv_wo32(base, 0xb8, 0xf8000000);
nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
bar->flush(bar);
return 0;
}
static int
nve0_fifo_chan_init(struct nouveau_object *object)
{
struct nouveau_gpuobj *base = nv_gpuobj(object->parent);
struct nve0_fifo_priv *priv = (void *)object->engine;
struct nve0_fifo_chan *chan = (void *)object;
u32 chid = chan->base.chid;
int ret;
ret = nouveau_fifo_channel_init(&chan->base);
if (ret)
return ret;
nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
nve0_fifo_playlist_update(priv, chan->engine);
nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
return 0;
}
static int
nve0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
{
struct nve0_fifo_priv *priv = (void *)object->engine;
struct nve0_fifo_chan *chan = (void *)object;
u32 chid = chan->base.chid;
nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
nve0_fifo_playlist_update(priv, chan->engine);
nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
return nouveau_fifo_channel_fini(&chan->base, suspend);
}
static struct nouveau_ofuncs
nve0_fifo_ofuncs = {
.ctor = nve0_fifo_chan_ctor,
.dtor = _nouveau_fifo_channel_dtor,
.init = nve0_fifo_chan_init,
.fini = nve0_fifo_chan_fini,
.rd32 = _nouveau_fifo_channel_rd32,
.wr32 = _nouveau_fifo_channel_wr32,
};
static struct nouveau_oclass
nve0_fifo_sclass[] = {
{ 0xa06f, &nve0_fifo_ofuncs },
{}
};
/*******************************************************************************
* FIFO context - instmem heap and vm setup
******************************************************************************/
static int
nve0_fifo_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nve0_fifo_base *base;
int ret;
ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
*pobject = nv_object(base);
if (ret)
return ret;
ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0x1000, 0, &base->pgd);
if (ret)
return ret;
nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
nv_wo32(base, 0x0208, 0xffffffff);
nv_wo32(base, 0x020c, 0x000000ff);
ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
if (ret)
return ret;
return 0;
}
static void
nve0_fifo_context_del(struct nouveau_channel *chan, int engine)
nve0_fifo_context_dtor(struct nouveau_object *object)
{
struct nve0_fifo_chan *fctx = chan->engctx[engine];
struct drm_device *dev = chan->dev;
nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000800, 0x00000800);
nv_wr32(dev, 0x002634, chan->id);
if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
nve0_fifo_playlist_update(dev, fctx->engine);
nv_wr32(dev, 0x800000 + (chan->id * 8), 0x00000000);
if (chan->user) {
iounmap(chan->user);
chan->user = NULL;
}
chan->engctx[NVOBJ_ENGINE_FIFO] = NULL;
kfree(fctx);
struct nve0_fifo_base *base = (void *)object;
nouveau_vm_ref(NULL, &base->vm, base->pgd);
nouveau_gpuobj_ref(NULL, &base->pgd);
nouveau_fifo_context_destroy(&base->base);
}
static int
nve0_fifo_init(struct drm_device *dev, int engine)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nve0_fifo_priv *priv = nv_engine(dev, engine);
struct nve0_fifo_chan *fctx;
int i;
static struct nouveau_oclass
nve0_fifo_cclass = {
.handle = NV_ENGCTX(FIFO, 0xe0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nve0_fifo_context_ctor,
.dtor = nve0_fifo_context_dtor,
.init = _nouveau_fifo_context_init,
.fini = _nouveau_fifo_context_fini,
.rd32 = _nouveau_fifo_context_rd32,
.wr32 = _nouveau_fifo_context_wr32,
},
};
/* reset PFIFO, enable all available PSUBFIFO areas */
nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
nv_wr32(dev, 0x000204, 0xffffffff);
priv->spoon_nr = hweight32(nv_rd32(dev, 0x000204));
NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
/* PSUBFIFO[n] */
for (i = 0; i < priv->spoon_nr; i++) {
nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
}
nv_wr32(dev, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
nv_wr32(dev, 0x002a00, 0xffffffff);
nv_wr32(dev, 0x002100, 0xffffffff);
nv_wr32(dev, 0x002140, 0xbfffffff);
/* restore PFIFO context table */
for (i = 0; i < priv->base.channels; i++) {
struct nouveau_channel *chan = dev_priv->channels.ptr[i];
if (!chan || !(fctx = chan->engctx[engine]))
continue;
nv_wr32(dev, 0x800000 + (i * 8), 0x80000000 |
(chan->ramin->addr >> 12));
nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
nve0_fifo_playlist_update(dev, fctx->engine);
nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
}
return 0;
}
static int
nve0_fifo_fini(struct drm_device *dev, int engine, bool suspend)
{
struct nve0_fifo_priv *priv = nv_engine(dev, engine);
int i;
for (i = 0; i < priv->base.channels; i++) {
if (!(nv_rd32(dev, 0x800004 + (i * 8)) & 1))
continue;
nv_mask(dev, 0x800004 + (i * 8), 0x00000800, 0x00000800);
nv_wr32(dev, 0x002634, i);
if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
i, nv_rd32(dev, 0x002634));
return -EBUSY;
}
}
nv_wr32(dev, 0x002140, 0x00000000);
return 0;
}
/*******************************************************************************
* PFIFO engine
******************************************************************************/
struct nouveau_enum nve0_fifo_fault_unit[] = {
{}
@ -268,16 +365,16 @@ struct nouveau_bitfield nve0_fifo_subfifo_intr[] = {
};
static void
nve0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
nve0_fifo_isr_vm_fault(struct nve0_fifo_priv *priv, int unit)
{
u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
u32 inst = nv_rd32(priv, 0x2800 + (unit * 0x10));
u32 valo = nv_rd32(priv, 0x2804 + (unit * 0x10));
u32 vahi = nv_rd32(priv, 0x2808 + (unit * 0x10));
u32 stat = nv_rd32(priv, 0x280c + (unit * 0x10));
u32 client = (stat & 0x00001f00) >> 8;
NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
(stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
nv_error(priv, "PFIFO: %s fault at 0x%010llx [", (stat & 0x00000080) ?
"write" : "read", (u64)vahi << 32 | valo);
nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
printk("] from ");
nouveau_enum_print(nve0_fifo_fault_unit, unit);
@ -292,160 +389,205 @@ nve0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
}
static int
nve0_fifo_page_flip(struct drm_device *dev, u32 chid)
nve0_fifo_swmthd(struct nve0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
{
struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = NULL;
struct nve0_fifo_chan *chan = NULL;
struct nouveau_handle *bind;
unsigned long flags;
int ret = -EINVAL;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
if (likely(chid >= 0 && chid < priv->base.channels)) {
chan = dev_priv->channels.ptr[chid];
if (likely(chan)) {
struct nouveau_software_chan *swch =
chan->engctx[NVOBJ_ENGINE_SW];
ret = swch->flip(swch->flip_data);
}
spin_lock_irqsave(&priv->base.lock, flags);
if (likely(chid >= priv->base.min && chid <= priv->base.max))
chan = (void *)priv->base.channel[chid];
if (unlikely(!chan))
goto out;
bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
if (likely(bind)) {
if (!mthd || !nv_call(bind->object, mthd, data))
ret = 0;
nouveau_namedb_put(bind);
}
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
out:
spin_unlock_irqrestore(&priv->base.lock, flags);
return ret;
}
static void
nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv *priv, int unit)
{
u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0xfff;
u32 subc = (addr & 0x00070000);
u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
u32 subc = (addr & 0x00070000) >> 16;
u32 mthd = (addr & 0x00003ffc);
u32 show = stat;
if (stat & 0x00200000) {
if (mthd == 0x0054) {
if (!nve0_fifo_page_flip(dev, chid))
if (!nve0_fifo_swmthd(priv, chid, 0x0500, 0x00000000))
show &= ~0x00200000;
}
}
if (show) {
NV_INFO(dev, "PFIFO%d:", unit);
nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
unit, chid, subc, mthd, data);
if (stat & 0x00800000) {
if (!nve0_fifo_swmthd(priv, chid, mthd, data))
show &= ~0x00800000;
}
nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
if (show) {
nv_error(priv, "SUBFIFO%d:", unit);
nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
printk("\n");
nv_error(priv, "SUBFIFO%d: ch %d subc %d mthd 0x%04x "
"data 0x%08x\n",
unit, chid, subc, mthd, data);
}
nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
}
static void
nve0_fifo_isr(struct drm_device *dev)
nve0_fifo_intr(struct nouveau_subdev *subdev)
{
u32 mask = nv_rd32(dev, 0x002140);
u32 stat = nv_rd32(dev, 0x002100) & mask;
struct nve0_fifo_priv *priv = (void *)subdev;
u32 mask = nv_rd32(priv, 0x002140);
u32 stat = nv_rd32(priv, 0x002100) & mask;
if (stat & 0x00000100) {
NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
nv_wr32(dev, 0x002100, 0x00000100);
nv_warn(priv, "unknown status 0x00000100\n");
nv_wr32(priv, 0x002100, 0x00000100);
stat &= ~0x00000100;
}
if (stat & 0x10000000) {
u32 units = nv_rd32(dev, 0x00259c);
u32 units = nv_rd32(priv, 0x00259c);
u32 u = units;
while (u) {
int i = ffs(u) - 1;
nve0_fifo_isr_vm_fault(dev, i);
nve0_fifo_isr_vm_fault(priv, i);
u &= ~(1 << i);
}
nv_wr32(dev, 0x00259c, units);
nv_wr32(priv, 0x00259c, units);
stat &= ~0x10000000;
}
if (stat & 0x20000000) {
u32 units = nv_rd32(dev, 0x0025a0);
u32 units = nv_rd32(priv, 0x0025a0);
u32 u = units;
while (u) {
int i = ffs(u) - 1;
nve0_fifo_isr_subfifo_intr(dev, i);
nve0_fifo_isr_subfifo_intr(priv, i);
u &= ~(1 << i);
}
nv_wr32(dev, 0x0025a0, units);
nv_wr32(priv, 0x0025a0, units);
stat &= ~0x20000000;
}
if (stat & 0x40000000) {
NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
nv_warn(priv, "unknown status 0x40000000\n");
nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
stat &= ~0x40000000;
}
if (stat) {
NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
nv_wr32(dev, 0x002100, stat);
nv_wr32(dev, 0x002140, 0);
nv_fatal(priv, "unhandled status 0x%08x\n", stat);
nv_wr32(priv, 0x002100, stat);
nv_wr32(priv, 0x002140, 0);
}
}
static void
nve0_fifo_destroy(struct drm_device *dev, int engine)
static int
nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nve0_fifo_priv *priv = nv_engine(dev, engine);
struct nve0_fifo_priv *priv;
int ret;
ret = nouveau_fifo_create(parent, engine, oclass, 0, 4095, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
ret = nouveau_gpuobj_new(parent, NULL, 4096 * 0x200, 0x1000,
NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
if (ret)
return ret;
ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
&priv->user.bar);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00000100;
nv_subdev(priv)->intr = nve0_fifo_intr;
nv_engine(priv)->cclass = &nve0_fifo_cclass;
nv_engine(priv)->sclass = nve0_fifo_sclass;
return 0;
}
static void
nve0_fifo_dtor(struct nouveau_object *object)
{
struct nve0_fifo_priv *priv = (void *)object;
int i;
nouveau_gpuobj_unmap(&priv->user.bar);
nouveau_gpuobj_ref(NULL, &priv->user.mem);
for (i = 0; i < NVE0_FIFO_ENGINE_NUM; i++) {
nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
for (i = 0; i < ARRAY_SIZE(priv->engine); i++) {
nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]);
nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
}
dev_priv->eng[engine] = NULL;
kfree(priv);
nouveau_fifo_destroy(&priv->base);
}
int
nve0_fifo_create(struct drm_device *dev)
static int
nve0_fifo_init(struct nouveau_object *object)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nve0_fifo_priv *priv;
int ret;
struct nve0_fifo_priv *priv = (void *)object;
int ret, i;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->base.base.destroy = nve0_fifo_destroy;
priv->base.base.init = nve0_fifo_init;
priv->base.base.fini = nve0_fifo_fini;
priv->base.base.context_new = nve0_fifo_context_new;
priv->base.base.context_del = nve0_fifo_context_del;
priv->base.channels = 4096;
dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 512, 0x1000,
NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
ret = nouveau_fifo_init(&priv->base);
if (ret)
goto error;
return ret;
ret = nouveau_gpuobj_map_bar(priv->user.mem, NV_MEM_ACCESS_RW,
&priv->user.bar);
if (ret)
goto error;
/* enable all available PSUBFIFOs */
nv_wr32(priv, 0x000204, 0xffffffff);
priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204));
nv_debug(priv, "%d subfifo(s)\n", priv->spoon_nr);
nouveau_irq_register(dev, 8, nve0_fifo_isr);
error:
if (ret)
priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
return ret;
/* PSUBFIFO[n] */
for (i = 0; i < priv->spoon_nr; i++) {
nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
}
nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
nv_wr32(priv, 0x002a00, 0xffffffff);
nv_wr32(priv, 0x002100, 0xffffffff);
nv_wr32(priv, 0x002140, 0xbfffffff);
return 0;
}
struct nouveau_oclass
nve0_fifo_oclass = {
.handle = NV_ENGINE(FIFO, 0xe0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nve0_fifo_ctor,
.dtor = nve0_fifo_dtor,
.init = nve0_fifo_init,
.fini = _nouveau_fifo_fini,
},
};

View File

@ -2,7 +2,7 @@
#define __NOUVEAU_GRCTX_H__
struct nouveau_grctx {
struct drm_device *dev;
struct nouveau_device *device;
enum {
NOUVEAU_GRCTX_PROG,
@ -10,18 +10,18 @@ struct nouveau_grctx {
} mode;
void *data;
uint32_t ctxprog_max;
uint32_t ctxprog_len;
uint32_t ctxprog_reg;
int ctxprog_label[32];
uint32_t ctxvals_pos;
uint32_t ctxvals_base;
u32 ctxprog_max;
u32 ctxprog_len;
u32 ctxprog_reg;
int ctxprog_label[32];
u32 ctxvals_pos;
u32 ctxvals_base;
};
static inline void
cp_out(struct nouveau_grctx *ctx, uint32_t inst)
cp_out(struct nouveau_grctx *ctx, u32 inst)
{
uint32_t *ctxprog = ctx->data;
u32 *ctxprog = ctx->data;
if (ctx->mode != NOUVEAU_GRCTX_PROG)
return;
@ -31,13 +31,13 @@ cp_out(struct nouveau_grctx *ctx, uint32_t inst)
}
static inline void
cp_lsr(struct nouveau_grctx *ctx, uint32_t val)
cp_lsr(struct nouveau_grctx *ctx, u32 val)
{
cp_out(ctx, CP_LOAD_SR | val);
}
static inline void
cp_ctx(struct nouveau_grctx *ctx, uint32_t reg, uint32_t length)
cp_ctx(struct nouveau_grctx *ctx, u32 reg, u32 length)
{
ctx->ctxprog_reg = (reg - 0x00400000) >> 2;
@ -55,7 +55,7 @@ cp_ctx(struct nouveau_grctx *ctx, uint32_t reg, uint32_t length)
static inline void
cp_name(struct nouveau_grctx *ctx, int name)
{
uint32_t *ctxprog = ctx->data;
u32 *ctxprog = ctx->data;
int i;
if (ctx->mode != NOUVEAU_GRCTX_PROG)
@ -115,7 +115,7 @@ cp_pos(struct nouveau_grctx *ctx, int offset)
}
static inline void
gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val)
gr_def(struct nouveau_grctx *ctx, u32 reg, u32 val)
{
if (ctx->mode != NOUVEAU_GRCTX_VALS)
return;

View File

@ -22,6 +22,8 @@
* Authors: Ben Skeggs
*/
#include <core/gpuobj.h>
/* NVIDIA context programs handle a number of other conditions which are
* not implemented in our versions. It's not clear why NVIDIA context
* programs have this code, nor whether it's strictly necessary for
@ -109,8 +111,7 @@
#define CP_LOAD_MAGIC_NV44TCL 0x00800029 /* per-vs state (0x4497) */
#define CP_LOAD_MAGIC_NV40TCL 0x00800041 /* per-vs state (0x4097) */
#include "drmP.h"
#include "nouveau_drv.h"
#include "nv40.h"
#include "ctx.h"
/* TODO:
@ -118,11 +119,10 @@
*/
static int
nv40_graph_vs_count(struct drm_device *dev)
nv40_graph_vs_count(struct nouveau_device *device)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
switch (dev_priv->chipset) {
switch (device->chipset) {
case 0x47:
case 0x49:
case 0x4b:
@ -160,7 +160,7 @@ enum cp_label {
static void
nv40_graph_construct_general(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
struct nouveau_device *device = ctx->device;
int i;
cp_ctx(ctx, 0x4000a4, 1);
@ -187,7 +187,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
cp_ctx(ctx, 0x400724, 1);
gr_def(ctx, 0x400724, 0x02008821);
cp_ctx(ctx, 0x400770, 3);
if (dev_priv->chipset == 0x40) {
if (device->chipset == 0x40) {
cp_ctx(ctx, 0x400814, 4);
cp_ctx(ctx, 0x400828, 5);
cp_ctx(ctx, 0x400840, 5);
@ -208,7 +208,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
gr_def(ctx, 0x4009dc, 0x80000000);
} else {
cp_ctx(ctx, 0x400840, 20);
if (nv44_graph_class(ctx->dev)) {
if (nv44_graph_class(ctx->device)) {
for (i = 0; i < 8; i++)
gr_def(ctx, 0x400860 + (i * 4), 0x00000001);
}
@ -217,21 +217,21 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
gr_def(ctx, 0x400888, 0x00000040);
cp_ctx(ctx, 0x400894, 11);
gr_def(ctx, 0x400894, 0x00000040);
if (!nv44_graph_class(ctx->dev)) {
if (!nv44_graph_class(ctx->device)) {
for (i = 0; i < 8; i++)
gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000);
}
cp_ctx(ctx, 0x4008e0, 2);
cp_ctx(ctx, 0x4008f8, 2);
if (dev_priv->chipset == 0x4c ||
(dev_priv->chipset & 0xf0) == 0x60)
if (device->chipset == 0x4c ||
(device->chipset & 0xf0) == 0x60)
cp_ctx(ctx, 0x4009f8, 1);
}
cp_ctx(ctx, 0x400a00, 73);
gr_def(ctx, 0x400b0c, 0x0b0b0b0c);
cp_ctx(ctx, 0x401000, 4);
cp_ctx(ctx, 0x405004, 1);
switch (dev_priv->chipset) {
switch (device->chipset) {
case 0x47:
case 0x49:
case 0x4b:
@ -240,7 +240,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
break;
default:
cp_ctx(ctx, 0x403440, 1);
switch (dev_priv->chipset) {
switch (device->chipset) {
case 0x40:
gr_def(ctx, 0x403440, 0x00000010);
break;
@ -266,19 +266,19 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
static void
nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
struct nouveau_device *device = ctx->device;
int i;
if (dev_priv->chipset == 0x40) {
if (device->chipset == 0x40) {
cp_ctx(ctx, 0x401880, 51);
gr_def(ctx, 0x401940, 0x00000100);
} else
if (dev_priv->chipset == 0x46 || dev_priv->chipset == 0x47 ||
dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) {
if (device->chipset == 0x46 || device->chipset == 0x47 ||
device->chipset == 0x49 || device->chipset == 0x4b) {
cp_ctx(ctx, 0x401880, 32);
for (i = 0; i < 16; i++)
gr_def(ctx, 0x401880 + (i * 4), 0x00000111);
if (dev_priv->chipset == 0x46)
if (device->chipset == 0x46)
cp_ctx(ctx, 0x401900, 16);
cp_ctx(ctx, 0x401940, 3);
}
@ -289,7 +289,7 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
gr_def(ctx, 0x401978, 0xffff0000);
gr_def(ctx, 0x40197c, 0x00000001);
gr_def(ctx, 0x401990, 0x46400000);
if (dev_priv->chipset == 0x40) {
if (device->chipset == 0x40) {
cp_ctx(ctx, 0x4019a0, 2);
cp_ctx(ctx, 0x4019ac, 5);
} else {
@ -297,7 +297,7 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
cp_ctx(ctx, 0x4019b4, 3);
}
gr_def(ctx, 0x4019bc, 0xffff0000);
switch (dev_priv->chipset) {
switch (device->chipset) {
case 0x46:
case 0x47:
case 0x49:
@ -316,7 +316,7 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
for (i = 0; i < 16; i++)
gr_def(ctx, 0x401a44 + (i * 4), 0x07ff0000);
gr_def(ctx, 0x401a8c, 0x4b7fffff);
if (dev_priv->chipset == 0x40) {
if (device->chipset == 0x40) {
cp_ctx(ctx, 0x401ab8, 3);
} else {
cp_ctx(ctx, 0x401ab8, 1);
@ -327,10 +327,10 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
gr_def(ctx, 0x401ad4, 0x70605040);
gr_def(ctx, 0x401ad8, 0xb8a89888);
gr_def(ctx, 0x401adc, 0xf8e8d8c8);
cp_ctx(ctx, 0x401b10, dev_priv->chipset == 0x40 ? 2 : 1);
cp_ctx(ctx, 0x401b10, device->chipset == 0x40 ? 2 : 1);
gr_def(ctx, 0x401b10, 0x40100000);
cp_ctx(ctx, 0x401b18, dev_priv->chipset == 0x40 ? 6 : 5);
gr_def(ctx, 0x401b28, dev_priv->chipset == 0x40 ?
cp_ctx(ctx, 0x401b18, device->chipset == 0x40 ? 6 : 5);
gr_def(ctx, 0x401b28, device->chipset == 0x40 ?
0x00000004 : 0x00000000);
cp_ctx(ctx, 0x401b30, 25);
gr_def(ctx, 0x401b34, 0x0000ffff);
@ -341,8 +341,8 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
gr_def(ctx, 0x401b84, 0xffffffff);
gr_def(ctx, 0x401b88, 0x00ff7000);
gr_def(ctx, 0x401b8c, 0x0000ffff);
if (dev_priv->chipset != 0x44 && dev_priv->chipset != 0x4a &&
dev_priv->chipset != 0x4e)
if (device->chipset != 0x44 && device->chipset != 0x4a &&
device->chipset != 0x4e)
cp_ctx(ctx, 0x401b94, 1);
cp_ctx(ctx, 0x401b98, 8);
gr_def(ctx, 0x401b9c, 0x00ff0000);
@ -371,12 +371,12 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
static void
nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
struct nouveau_device *device = ctx->device;
int i;
cp_ctx(ctx, 0x402000, 1);
cp_ctx(ctx, 0x402404, dev_priv->chipset == 0x40 ? 1 : 2);
switch (dev_priv->chipset) {
cp_ctx(ctx, 0x402404, device->chipset == 0x40 ? 1 : 2);
switch (device->chipset) {
case 0x40:
gr_def(ctx, 0x402404, 0x00000001);
break;
@ -393,9 +393,9 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
default:
gr_def(ctx, 0x402404, 0x00000021);
}
if (dev_priv->chipset != 0x40)
if (device->chipset != 0x40)
gr_def(ctx, 0x402408, 0x030c30c3);
switch (dev_priv->chipset) {
switch (device->chipset) {
case 0x44:
case 0x46:
case 0x4a:
@ -408,10 +408,10 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
default:
break;
}
cp_ctx(ctx, 0x402480, dev_priv->chipset == 0x40 ? 8 : 9);
cp_ctx(ctx, 0x402480, device->chipset == 0x40 ? 8 : 9);
gr_def(ctx, 0x402488, 0x3e020200);
gr_def(ctx, 0x40248c, 0x00ffffff);
switch (dev_priv->chipset) {
switch (device->chipset) {
case 0x40:
gr_def(ctx, 0x402490, 0x60103f00);
break;
@ -428,16 +428,16 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
gr_def(ctx, 0x402490, 0x0c103f00);
break;
}
gr_def(ctx, 0x40249c, dev_priv->chipset <= 0x43 ?
gr_def(ctx, 0x40249c, device->chipset <= 0x43 ?
0x00020000 : 0x00040000);
cp_ctx(ctx, 0x402500, 31);
gr_def(ctx, 0x402530, 0x00008100);
if (dev_priv->chipset == 0x40)
if (device->chipset == 0x40)
cp_ctx(ctx, 0x40257c, 6);
cp_ctx(ctx, 0x402594, 16);
cp_ctx(ctx, 0x402800, 17);
gr_def(ctx, 0x402800, 0x00000001);
switch (dev_priv->chipset) {
switch (device->chipset) {
case 0x47:
case 0x49:
case 0x4b:
@ -445,7 +445,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
gr_def(ctx, 0x402864, 0x00001001);
cp_ctx(ctx, 0x402870, 3);
gr_def(ctx, 0x402878, 0x00000003);
if (dev_priv->chipset != 0x47) { /* belong at end!! */
if (device->chipset != 0x47) { /* belong at end!! */
cp_ctx(ctx, 0x402900, 1);
cp_ctx(ctx, 0x402940, 1);
cp_ctx(ctx, 0x402980, 1);
@ -470,9 +470,9 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
}
cp_ctx(ctx, 0x402c00, 4);
gr_def(ctx, 0x402c00, dev_priv->chipset == 0x40 ?
gr_def(ctx, 0x402c00, device->chipset == 0x40 ?
0x80800001 : 0x00888001);
switch (dev_priv->chipset) {
switch (device->chipset) {
case 0x47:
case 0x49:
case 0x4b:
@ -485,30 +485,30 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
break;
default:
cp_ctx(ctx, 0x402c10, 4);
if (dev_priv->chipset == 0x40)
if (device->chipset == 0x40)
cp_ctx(ctx, 0x402c20, 36);
else
if (dev_priv->chipset <= 0x42)
if (device->chipset <= 0x42)
cp_ctx(ctx, 0x402c20, 24);
else
if (dev_priv->chipset <= 0x4a)
if (device->chipset <= 0x4a)
cp_ctx(ctx, 0x402c20, 16);
else
cp_ctx(ctx, 0x402c20, 8);
cp_ctx(ctx, 0x402cb0, dev_priv->chipset == 0x40 ? 12 : 13);
cp_ctx(ctx, 0x402cb0, device->chipset == 0x40 ? 12 : 13);
gr_def(ctx, 0x402cd4, 0x00000005);
if (dev_priv->chipset != 0x40)
if (device->chipset != 0x40)
gr_def(ctx, 0x402ce0, 0x0000ffff);
break;
}
cp_ctx(ctx, 0x403400, dev_priv->chipset == 0x40 ? 4 : 3);
cp_ctx(ctx, 0x403410, dev_priv->chipset == 0x40 ? 4 : 3);
cp_ctx(ctx, 0x403420, nv40_graph_vs_count(ctx->dev));
for (i = 0; i < nv40_graph_vs_count(ctx->dev); i++)
cp_ctx(ctx, 0x403400, device->chipset == 0x40 ? 4 : 3);
cp_ctx(ctx, 0x403410, device->chipset == 0x40 ? 4 : 3);
cp_ctx(ctx, 0x403420, nv40_graph_vs_count(ctx->device));
for (i = 0; i < nv40_graph_vs_count(ctx->device); i++)
gr_def(ctx, 0x403420 + (i * 4), 0x00005555);
if (dev_priv->chipset != 0x40) {
if (device->chipset != 0x40) {
cp_ctx(ctx, 0x403600, 1);
gr_def(ctx, 0x403600, 0x00000001);
}
@ -516,7 +516,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
cp_ctx(ctx, 0x403c18, 1);
gr_def(ctx, 0x403c18, 0x00000001);
switch (dev_priv->chipset) {
switch (device->chipset) {
case 0x46:
case 0x47:
case 0x49:
@ -527,7 +527,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
gr_def(ctx, 0x405c24, 0x000e3000);
break;
}
if (dev_priv->chipset != 0x4e)
if (device->chipset != 0x4e)
cp_ctx(ctx, 0x405800, 11);
cp_ctx(ctx, 0x407000, 1);
}
@ -535,7 +535,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
static void
nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
{
int len = nv44_graph_class(ctx->dev) ? 0x0084 : 0x0684;
int len = nv44_graph_class(ctx->device) ? 0x0084 : 0x0684;
cp_out (ctx, 0x300000);
cp_lsr (ctx, len - 4);
@ -550,32 +550,31 @@ nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
static void
nv40_graph_construct_shader(struct nouveau_grctx *ctx)
{
struct drm_device *dev = ctx->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_device *device = ctx->device;
struct nouveau_gpuobj *obj = ctx->data;
int vs, vs_nr, vs_len, vs_nr_b0, vs_nr_b1, b0_offset, b1_offset;
int offset, i;
vs_nr = nv40_graph_vs_count(ctx->dev);
vs_nr = nv40_graph_vs_count(ctx->device);
vs_nr_b0 = 363;
vs_nr_b1 = dev_priv->chipset == 0x40 ? 128 : 64;
if (dev_priv->chipset == 0x40) {
vs_nr_b1 = device->chipset == 0x40 ? 128 : 64;
if (device->chipset == 0x40) {
b0_offset = 0x2200/4; /* 33a0 */
b1_offset = 0x55a0/4; /* 1500 */
vs_len = 0x6aa0/4;
} else
if (dev_priv->chipset == 0x41 || dev_priv->chipset == 0x42) {
if (device->chipset == 0x41 || device->chipset == 0x42) {
b0_offset = 0x2200/4; /* 2200 */
b1_offset = 0x4400/4; /* 0b00 */
vs_len = 0x4f00/4;
} else {
b0_offset = 0x1d40/4; /* 2200 */
b1_offset = 0x3f40/4; /* 0b00 : 0a40 */
vs_len = nv44_graph_class(dev) ? 0x4980/4 : 0x4a40/4;
vs_len = nv44_graph_class(device) ? 0x4980/4 : 0x4a40/4;
}
cp_lsr(ctx, vs_len * vs_nr + 0x300/4);
cp_out(ctx, nv44_graph_class(dev) ? 0x800029 : 0x800041);
cp_out(ctx, nv44_graph_class(device) ? 0x800029 : 0x800041);
offset = ctx->ctxvals_pos;
ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));
@ -661,21 +660,21 @@ nv40_grctx_generate(struct nouveau_grctx *ctx)
}
void
nv40_grctx_fill(struct drm_device *dev, struct nouveau_gpuobj *mem)
nv40_grctx_fill(struct nouveau_device *device, struct nouveau_gpuobj *mem)
{
nv40_grctx_generate(&(struct nouveau_grctx) {
.dev = dev,
.device = device,
.mode = NOUVEAU_GRCTX_VALS,
.data = mem,
});
}
void
nv40_grctx_init(struct drm_device *dev, u32 *size)
nv40_grctx_init(struct nouveau_device *device, u32 *size)
{
u32 ctxprog[256], i;
struct nouveau_grctx ctx = {
.dev = dev,
.device = device,
.mode = NOUVEAU_GRCTX_PROG,
.data = ctxprog,
.ctxprog_max = ARRAY_SIZE(ctxprog)
@ -683,8 +682,8 @@ nv40_grctx_init(struct drm_device *dev, u32 *size)
nv40_grctx_generate(&ctx);
nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
nv_wr32(device, 0x400324, 0);
for (i = 0; i < ctx.ctxprog_len; i++)
nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, ctxprog[i]);
nv_wr32(device, 0x400328, ctxprog[i]);
*size = ctx.ctxvals_pos * 4;
}

File diff suppressed because it is too large Load Diff

View File

@ -22,13 +22,10 @@
* Authors: Ben Skeggs
*/
#include "drmP.h"
#include "nouveau_drv.h"
#include <core/mm.h>
#include "nvc0.h"
void
nv_icmd(struct drm_device *priv, u32 icmd, u32 data)
nv_icmd(struct nvc0_graph_priv *priv, u32 icmd, u32 data)
{
nv_wr32(priv, 0x400204, data);
nv_wr32(priv, 0x400200, icmd);
@ -36,21 +33,22 @@ nv_icmd(struct drm_device *priv, u32 icmd, u32 data)
}
int
nvc0_grctx_init(struct drm_device *priv, struct nvc0_graph_priv *oprv,
struct nvc0_grctx *info)
nvc0_grctx_init(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
{
struct nouveau_bar *bar = nouveau_bar(priv);
struct nouveau_object *parent = nv_object(priv);
struct nouveau_gpuobj *chan;
u32 size = (0x80000 + oprv->size + 4095) & ~4095;
u32 size = (0x80000 + priv->size + 4095) & ~4095;
int ret, i;
/* allocate memory to for a "channel", which we'll use to generate
* the default context values
*/
ret = nouveau_gpuobj_new(priv, NULL, size, 0x1000,
ret = nouveau_gpuobj_new(parent, NULL, size, 0x1000,
NVOBJ_FLAG_ZERO_ALLOC, &info->chan);
chan = info->chan;
if (ret) {
NV_ERROR(priv, "failed to allocate channel memory, %d\n", ret);
nv_error(priv, "failed to allocate channel memory, %d\n", ret);
return ret;
}
@ -75,32 +73,31 @@ nvc0_grctx_init(struct drm_device *priv, struct nvc0_graph_priv *oprv,
nv_wo32(chan, 0x0210, 0x00080004);
nv_wo32(chan, 0x0214, 0x00000000);
nvimem_flush(priv);
bar->flush(bar);
nv_wr32(priv, 0x100cb8, (chan->addr + 0x1000) >> 8);
nv_wr32(priv, 0x100cbc, 0x80000001);
nv_wait(priv, 0x100c80, 0x00008000, 0x00008000);
/* setup default state for mmio list construction */
info->dev = priv;
info->data = oprv->mmio_data;
info->mmio = oprv->mmio_list;
info->data = priv->mmio_data;
info->mmio = priv->mmio_list;
info->addr = 0x2000 + (i * 8);
info->priv = oprv;
info->priv = priv;
info->buffer_nr = 0;
if (oprv->firmware) {
if (priv->firmware) {
nv_wr32(priv, 0x409840, 0x00000030);
nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12);
nv_wr32(priv, 0x409504, 0x00000003);
if (!nv_wait(priv, 0x409800, 0x00000010, 0x00000010))
NV_ERROR(priv, "load_ctx timeout\n");
nv_error(priv, "load_ctx timeout\n");
nv_wo32(chan, 0x8001c, 1);
nv_wo32(chan, 0x80020, 0);
nv_wo32(chan, 0x80028, 0);
nv_wo32(chan, 0x8002c, 0);
nvimem_flush(priv);
bar->flush(bar);
return 0;
}
@ -109,7 +106,7 @@ nvc0_grctx_init(struct drm_device *priv, struct nvc0_graph_priv *oprv,
nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12);
nv_wr32(priv, 0x409504, 0x00000001);
if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) {
NV_ERROR(priv, "HUB_SET_CHAN timeout\n");
nv_error(priv, "HUB_SET_CHAN timeout\n");
nvc0_graph_ctxctl_debug(priv);
nouveau_gpuobj_ref(NULL, &info->chan);
return -EBUSY;
@ -135,6 +132,8 @@ nvc0_grctx_data(struct nvc0_grctx *info, u32 size, u32 align, u32 access)
void
nvc0_grctx_mmio(struct nvc0_grctx *info, u32 addr, u32 data, u32 shift, u32 buf)
{
struct nvc0_graph_priv *priv = info->priv;
info->mmio->addr = addr;
info->mmio->data = data;
info->mmio->shift = shift;
@ -143,7 +142,7 @@ nvc0_grctx_mmio(struct nvc0_grctx *info, u32 addr, u32 data, u32 shift, u32 buf)
if (shift)
data |= info->buffer[buf] >> shift;
nv_wr32(info->dev, addr, data);
nv_wr32(priv, addr, data);
}
int
@ -153,11 +152,11 @@ nvc0_grctx_fini(struct nvc0_grctx *info)
int i;
if (priv->firmware) {
nv_wr32(info->dev, 0x409840, 0x00000003);
nv_wr32(info->dev, 0x409500, 0x80000000 | info->chan->addr >> 12);
nv_wr32(info->dev, 0x409504, 0x00000009);
if (!nv_wait(info->dev, 0x409800, 0x00000001, 0x00000000)) {
NV_ERROR(info->dev, "unload_ctx timeout\n");
nv_wr32(priv, 0x409840, 0x00000003);
nv_wr32(priv, 0x409500, 0x80000000 | info->chan->addr >> 12);
nv_wr32(priv, 0x409504, 0x00000009);
if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000000)) {
nv_error(priv, "unload_ctx timeout\n");
return -EBUSY;
}
@ -165,12 +164,12 @@ nvc0_grctx_fini(struct nvc0_grctx *info)
}
/* HUB_FUC(CTX_SAVE) */
nv_wr32(info->dev, 0x409840, 0x80000000);
nv_wr32(info->dev, 0x409500, 0x80000000 | info->chan->addr >> 12);
nv_wr32(info->dev, 0x409504, 0x00000002);
if (!nv_wait(info->dev, 0x409800, 0x80000000, 0x80000000)) {
NV_ERROR(info->dev, "HUB_CTX_SAVE timeout\n");
nvc0_graph_ctxctl_debug(info->dev);
nv_wr32(priv, 0x409840, 0x80000000);
nv_wr32(priv, 0x409500, 0x80000000 | info->chan->addr >> 12);
nv_wr32(priv, 0x409504, 0x00000002);
if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) {
nv_error(priv, "HUB_CTX_SAVE timeout\n");
nvc0_graph_ctxctl_debug(priv);
return -EBUSY;
}
@ -186,7 +185,7 @@ save:
}
static void
nvc0_grctx_generate_9097(struct drm_device *priv)
nvc0_grctx_generate_9097(struct nvc0_graph_priv *priv)
{
u32 fermi = nvc0_graph_class(priv);
u32 mthd;
@ -1343,7 +1342,7 @@ nvc0_grctx_generate_9097(struct drm_device *priv)
}
static void
nvc0_grctx_generate_9197(struct drm_device *priv)
nvc0_grctx_generate_9197(struct nvc0_graph_priv *priv)
{
u32 fermi = nvc0_graph_class(priv);
u32 mthd;
@ -1356,7 +1355,7 @@ nvc0_grctx_generate_9197(struct drm_device *priv)
}
static void
nvc0_grctx_generate_9297(struct drm_device *priv)
nvc0_grctx_generate_9297(struct nvc0_graph_priv *priv)
{
u32 fermi = nvc0_graph_class(priv);
u32 mthd;
@ -1374,7 +1373,7 @@ nvc0_grctx_generate_9297(struct drm_device *priv)
}
static void
nvc0_grctx_generate_902d(struct drm_device *priv)
nvc0_grctx_generate_902d(struct nvc0_graph_priv *priv)
{
nv_mthd(priv, 0x902d, 0x0200, 0x000000cf);
nv_mthd(priv, 0x902d, 0x0204, 0x00000001);
@ -1396,7 +1395,7 @@ nvc0_grctx_generate_902d(struct drm_device *priv)
}
static void
nvc0_grctx_generate_9039(struct drm_device *priv)
nvc0_grctx_generate_9039(struct nvc0_graph_priv *priv)
{
nv_mthd(priv, 0x9039, 0x030c, 0x00000000);
nv_mthd(priv, 0x9039, 0x0310, 0x00000000);
@ -1409,12 +1408,11 @@ nvc0_grctx_generate_9039(struct drm_device *priv)
}
static void
nvc0_grctx_generate_90c0(struct drm_device *priv)
nvc0_grctx_generate_90c0(struct nvc0_graph_priv *priv)
{
struct drm_nouveau_private *dev_priv = priv->dev_private;
int i;
for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
for (i = 0; nv_device(priv)->chipset == 0xd9 && i < 4; i++) {
nv_mthd(priv, 0x90c0, 0x2700 + (i * 0x40), 0x00000000);
nv_mthd(priv, 0x90c0, 0x2720 + (i * 0x40), 0x00000000);
nv_mthd(priv, 0x90c0, 0x2704 + (i * 0x40), 0x00000000);
@ -1430,7 +1428,7 @@ nvc0_grctx_generate_90c0(struct drm_device *priv)
nv_mthd(priv, 0x90c0, 0x27ac, 0x00000000);
nv_mthd(priv, 0x90c0, 0x27cc, 0x00000000);
nv_mthd(priv, 0x90c0, 0x27ec, 0x00000000);
for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
for (i = 0; nv_device(priv)->chipset == 0xd9 && i < 4; i++) {
nv_mthd(priv, 0x90c0, 0x2710 + (i * 0x40), 0x00014000);
nv_mthd(priv, 0x90c0, 0x2730 + (i * 0x40), 0x00014000);
nv_mthd(priv, 0x90c0, 0x2714 + (i * 0x40), 0x00000040);
@ -1458,7 +1456,7 @@ nvc0_grctx_generate_90c0(struct drm_device *priv)
}
static void
nvc0_grctx_generate_dispatch(struct drm_device *priv)
nvc0_grctx_generate_dispatch(struct nvc0_graph_priv *priv)
{
int i;
@ -1511,7 +1509,7 @@ nvc0_grctx_generate_dispatch(struct drm_device *priv)
}
static void
nvc0_grctx_generate_macro(struct drm_device *priv)
nvc0_grctx_generate_macro(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x404404, 0x00000000);
nv_wr32(priv, 0x404408, 0x00000000);
@ -1536,7 +1534,7 @@ nvc0_grctx_generate_macro(struct drm_device *priv)
}
static void
nvc0_grctx_generate_m2mf(struct drm_device *priv)
nvc0_grctx_generate_m2mf(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x404604, 0x00000015);
nv_wr32(priv, 0x404608, 0x00000000);
@ -1600,7 +1598,7 @@ nvc0_grctx_generate_m2mf(struct drm_device *priv)
}
static void
nvc0_grctx_generate_unk47xx(struct drm_device *priv)
nvc0_grctx_generate_unk47xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x404700, 0x00000000);
nv_wr32(priv, 0x404704, 0x00000000);
@ -1627,16 +1625,15 @@ nvc0_grctx_generate_unk47xx(struct drm_device *priv)
}
static void
nvc0_grctx_generate_shaders(struct drm_device *priv)
nvc0_grctx_generate_shaders(struct nvc0_graph_priv *priv)
{
struct drm_nouveau_private *dev_priv = priv->dev_private;
if (dev_priv->chipset == 0xd9) {
if (nv_device(priv)->chipset == 0xd9) {
nv_wr32(priv, 0x405800, 0x0f8000bf);
nv_wr32(priv, 0x405830, 0x02180218);
nv_wr32(priv, 0x405834, 0x08000000);
} else
if (dev_priv->chipset == 0xc1) {
if (nv_device(priv)->chipset == 0xc1) {
nv_wr32(priv, 0x405800, 0x0f8000bf);
nv_wr32(priv, 0x405830, 0x02180218);
nv_wr32(priv, 0x405834, 0x00000000);
@ -1657,7 +1654,7 @@ nvc0_grctx_generate_shaders(struct drm_device *priv)
}
static void
nvc0_grctx_generate_unk60xx(struct drm_device *priv)
nvc0_grctx_generate_unk60xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x406020, 0x000103c1);
nv_wr32(priv, 0x406028, 0x00000001);
@ -1667,25 +1664,24 @@ nvc0_grctx_generate_unk60xx(struct drm_device *priv)
}
static void
nvc0_grctx_generate_unk64xx(struct drm_device *priv)
nvc0_grctx_generate_unk64xx(struct nvc0_graph_priv *priv)
{
struct drm_nouveau_private *dev_priv = priv->dev_private;
nv_wr32(priv, 0x4064a8, 0x00000000);
nv_wr32(priv, 0x4064ac, 0x00003fff);
nv_wr32(priv, 0x4064b4, 0x00000000);
nv_wr32(priv, 0x4064b8, 0x00000000);
if (dev_priv->chipset == 0xd9)
if (nv_device(priv)->chipset == 0xd9)
nv_wr32(priv, 0x4064bc, 0x00000000);
if (dev_priv->chipset == 0xc1 ||
dev_priv->chipset == 0xd9) {
if (nv_device(priv)->chipset == 0xc1 ||
nv_device(priv)->chipset == 0xd9) {
nv_wr32(priv, 0x4064c0, 0x80140078);
nv_wr32(priv, 0x4064c4, 0x0086ffff);
}
}
static void
nvc0_grctx_generate_tpbus(struct drm_device *priv)
nvc0_grctx_generate_tpbus(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x407804, 0x00000023);
nv_wr32(priv, 0x40780c, 0x0a418820);
@ -1698,7 +1694,7 @@ nvc0_grctx_generate_tpbus(struct drm_device *priv)
}
static void
nvc0_grctx_generate_ccache(struct drm_device *priv)
nvc0_grctx_generate_ccache(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x408000, 0x00000000);
nv_wr32(priv, 0x408004, 0x00000000);
@ -1711,10 +1707,9 @@ nvc0_grctx_generate_ccache(struct drm_device *priv)
}
static void
nvc0_grctx_generate_rop(struct drm_device *priv)
nvc0_grctx_generate_rop(struct nvc0_graph_priv *priv)
{
struct drm_nouveau_private *dev_priv = priv->dev_private;
int chipset = dev_priv->chipset;
int chipset = nv_device(priv)->chipset;
/* ROPC_BROADCAST */
nv_wr32(priv, 0x408800, 0x02802a3c);
@ -1741,10 +1736,9 @@ nvc0_grctx_generate_rop(struct drm_device *priv)
}
static void
nvc0_grctx_generate_gpc(struct drm_device *priv)
nvc0_grctx_generate_gpc(struct nvc0_graph_priv *priv)
{
struct drm_nouveau_private *dev_priv = priv->dev_private;
int chipset = dev_priv->chipset;
int chipset = nv_device(priv)->chipset;
int i;
/* GPC_BROADCAST */
@ -1834,10 +1828,9 @@ nvc0_grctx_generate_gpc(struct drm_device *priv)
}
static void
nvc0_grctx_generate_tp(struct drm_device *priv)
nvc0_grctx_generate_tp(struct nvc0_graph_priv *priv)
{
struct drm_nouveau_private *dev_priv = priv->dev_private;
int chipset = dev_priv->chipset;
int chipset = nv_device(priv)->chipset;
/* GPC_BROADCAST.TP_BROADCAST */
nv_wr32(priv, 0x419818, 0x00000000);
@ -1876,7 +1869,7 @@ nvc0_grctx_generate_tp(struct drm_device *priv)
nv_wr32(priv, 0x419c04, 0x00000006);
nv_wr32(priv, 0x419c08, 0x00000002);
nv_wr32(priv, 0x419c20, 0x00000000);
if (dev_priv->chipset == 0xd9) {
if (nv_device(priv)->chipset == 0xd9) {
nv_wr32(priv, 0x419c24, 0x00084210);
nv_wr32(priv, 0x419c28, 0x3cf3cf3c);
nv_wr32(priv, 0x419cb0, 0x00020048);
@ -1929,16 +1922,14 @@ nvc0_grctx_generate_tp(struct drm_device *priv)
}
int
nvc0_grctx_generate(struct drm_device *priv)
nvc0_grctx_generate(struct nvc0_graph_priv *priv)
{
struct drm_nouveau_private *dev_priv = priv->dev_private;
struct nvc0_graph_priv *oprv = nv_engine(priv, NVOBJ_ENGINE_GR);
struct nvc0_grctx info;
int ret, i, gpc, tpc, id;
u32 fermi = nvc0_graph_class(priv);
u32 r000260, tmp;
ret = nvc0_grctx_init(priv, oprv, &info);
ret = nvc0_grctx_init(priv, &info);
if (ret)
return ret;
@ -1975,11 +1966,11 @@ nvc0_grctx_generate(struct drm_device *priv)
mmio_list(0x419008, 0x00000000, 0, 0);
mmio_list(0x418808, 0x00000000, 8, 0);
mmio_list(0x41880c, 0x80000018, 0, 0);
if (dev_priv->chipset != 0xc1) {
if (nv_device(priv)->chipset != 0xc1) {
tmp = 0x02180000;
mmio_list(0x405830, tmp, 0, 0);
for (gpc = 0; gpc < oprv->gpc_nr; gpc++) {
for (tpc = 0; tpc < oprv->tpc_nr[gpc]; tpc++) {
for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
u32 reg = TPC_UNIT(gpc, tpc, 0x0520);
mmio_list(reg, tmp, 0, 0);
tmp += 0x0324;
@ -1989,13 +1980,13 @@ nvc0_grctx_generate(struct drm_device *priv)
tmp = 0x02180000;
mmio_list(0x405830, 0x00000218 | tmp, 0, 0);
mmio_list(0x4064c4, 0x0086ffff, 0, 0);
for (gpc = 0; gpc < oprv->gpc_nr; gpc++) {
for (tpc = 0; tpc < oprv->tpc_nr[gpc]; tpc++) {
for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
u32 reg = TPC_UNIT(gpc, tpc, 0x0520);
mmio_list(reg, 0x10000000 | tmp, 0, 0);
tmp += 0x0324;
}
for (tpc = 0; tpc < oprv->tpc_nr[gpc]; tpc++) {
for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
u32 reg = TPC_UNIT(gpc, tpc, 0x0544);
mmio_list(reg, tmp, 0, 0);
tmp += 0x0324;
@ -2004,8 +1995,8 @@ nvc0_grctx_generate(struct drm_device *priv)
}
for (tpc = 0, id = 0; tpc < 4; tpc++) {
for (gpc = 0; gpc < oprv->gpc_nr; gpc++) {
if (tpc < oprv->tpc_nr[gpc]) {
for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
if (tpc < priv->tpc_nr[gpc]) {
nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x698), id);
nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x4e8), id);
nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
@ -2013,14 +2004,14 @@ nvc0_grctx_generate(struct drm_device *priv)
id++;
}
nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), oprv->tpc_nr[gpc]);
nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), oprv->tpc_nr[gpc]);
nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
}
}
tmp = 0;
for (i = 0; i < oprv->gpc_nr; i++)
tmp |= oprv->tpc_nr[i] << (i * 4);
for (i = 0; i < priv->gpc_nr; i++)
tmp |= priv->tpc_nr[i] << (i * 4);
nv_wr32(priv, 0x406028, tmp);
nv_wr32(priv, 0x405870, tmp);
@ -2034,13 +2025,13 @@ nvc0_grctx_generate(struct drm_device *priv)
if (1) {
u8 tpcnr[GPC_MAX], data[TPC_MAX];
memcpy(tpcnr, oprv->tpc_nr, sizeof(oprv->tpc_nr));
memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
memset(data, 0x1f, sizeof(data));
gpc = -1;
for (tpc = 0; tpc < oprv->tpc_total; tpc++) {
for (tpc = 0; tpc < priv->tpc_total; tpc++) {
do {
gpc = (gpc + 1) % oprv->gpc_nr;
gpc = (gpc + 1) % priv->gpc_nr;
} while (!tpcnr[gpc]);
tpcnr[gpc]--;
data[tpc] = gpc;
@ -2056,12 +2047,12 @@ nvc0_grctx_generate(struct drm_device *priv)
u8 shift, ntpcv;
/* calculate first set of magics */
memcpy(tpcnr, oprv->tpc_nr, sizeof(oprv->tpc_nr));
memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
gpc = -1;
for (tpc = 0; tpc < oprv->tpc_total; tpc++) {
for (tpc = 0; tpc < priv->tpc_total; tpc++) {
do {
gpc = (gpc + 1) % oprv->gpc_nr;
gpc = (gpc + 1) % priv->gpc_nr;
} while (!tpcnr[gpc]);
tpcnr[gpc]--;
@ -2073,7 +2064,7 @@ nvc0_grctx_generate(struct drm_device *priv)
/* and the second... */
shift = 0;
ntpcv = oprv->tpc_total;
ntpcv = priv->tpc_total;
while (!(ntpcv & (1 << 4))) {
ntpcv <<= 1;
shift++;
@ -2086,22 +2077,22 @@ nvc0_grctx_generate(struct drm_device *priv)
data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
/* GPC_BROADCAST */
nv_wr32(priv, 0x418bb8, (oprv->tpc_total << 8) |
oprv->magic_not_rop_nr);
nv_wr32(priv, 0x418bb8, (priv->tpc_total << 8) |
priv->magic_not_rop_nr);
for (i = 0; i < 6; i++)
nv_wr32(priv, 0x418b08 + (i * 4), data[i]);
/* GPC_BROADCAST.TP_BROADCAST */
nv_wr32(priv, 0x419bd0, (oprv->tpc_total << 8) |
oprv->magic_not_rop_nr |
nv_wr32(priv, 0x419bd0, (priv->tpc_total << 8) |
priv->magic_not_rop_nr |
data2[0]);
nv_wr32(priv, 0x419be4, data2[1]);
for (i = 0; i < 6; i++)
nv_wr32(priv, 0x419b00 + (i * 4), data[i]);
/* UNK78xx */
nv_wr32(priv, 0x4078bc, (oprv->tpc_total << 8) |
oprv->magic_not_rop_nr);
nv_wr32(priv, 0x4078bc, (priv->tpc_total << 8) |
priv->magic_not_rop_nr);
for (i = 0; i < 6; i++)
nv_wr32(priv, 0x40780c + (i * 4), data[i]);
}
@ -2110,18 +2101,18 @@ nvc0_grctx_generate(struct drm_device *priv)
u32 tpc_mask = 0, tpc_set = 0;
u8 tpcnr[GPC_MAX], a, b;
memcpy(tpcnr, oprv->tpc_nr, sizeof(oprv->tpc_nr));
for (gpc = 0; gpc < oprv->gpc_nr; gpc++)
tpc_mask |= ((1 << oprv->tpc_nr[gpc]) - 1) << (gpc * 8);
memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
for (gpc = 0; gpc < priv->gpc_nr; gpc++)
tpc_mask |= ((1 << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
for (i = 0, gpc = -1, b = -1; i < 32; i++) {
a = (i * (oprv->tpc_total - 1)) / 32;
a = (i * (priv->tpc_total - 1)) / 32;
if (a != b) {
b = a;
do {
gpc = (gpc + 1) % oprv->gpc_nr;
gpc = (gpc + 1) % priv->gpc_nr;
} while (!tpcnr[gpc]);
tpc = oprv->tpc_nr[gpc] - tpcnr[gpc]--;
tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
tpc_set |= 1 << ((gpc * 8) + tpc);
}
@ -2232,7 +2223,7 @@ nvc0_grctx_generate(struct drm_device *priv)
nv_icmd(priv, 0x00000215, 0x00000040);
nv_icmd(priv, 0x00000216, 0x00000040);
nv_icmd(priv, 0x00000217, 0x00000040);
if (dev_priv->chipset == 0xd9) {
if (nv_device(priv)->chipset == 0xd9) {
for (i = 0x0400; i <= 0x0417; i++)
nv_icmd(priv, i, 0x00000040);
}
@ -2244,7 +2235,7 @@ nvc0_grctx_generate(struct drm_device *priv)
nv_icmd(priv, 0x0000021d, 0x0000c080);
nv_icmd(priv, 0x0000021e, 0x0000c080);
nv_icmd(priv, 0x0000021f, 0x0000c080);
if (dev_priv->chipset == 0xd9) {
if (nv_device(priv)->chipset == 0xd9) {
for (i = 0x0440; i <= 0x0457; i++)
nv_icmd(priv, i, 0x0000c080);
}
@ -2810,8 +2801,8 @@ nvc0_grctx_generate(struct drm_device *priv)
nv_icmd(priv, 0x0000053f, 0xffff0000);
nv_icmd(priv, 0x00000585, 0x0000003f);
nv_icmd(priv, 0x00000576, 0x00000003);
if (dev_priv->chipset == 0xc1 ||
dev_priv->chipset == 0xd9)
if (nv_device(priv)->chipset == 0xc1 ||
nv_device(priv)->chipset == 0xd9)
nv_icmd(priv, 0x0000057b, 0x00000059);
nv_icmd(priv, 0x00000586, 0x00000040);
nv_icmd(priv, 0x00000582, 0x00000080);
@ -2913,7 +2904,7 @@ nvc0_grctx_generate(struct drm_device *priv)
nv_icmd(priv, 0x00000957, 0x00000003);
nv_icmd(priv, 0x0000095e, 0x20164010);
nv_icmd(priv, 0x0000095f, 0x00000020);
if (dev_priv->chipset == 0xd9)
if (nv_device(priv)->chipset == 0xd9)
nv_icmd(priv, 0x0000097d, 0x00000020);
nv_icmd(priv, 0x00000683, 0x00000006);
nv_icmd(priv, 0x00000685, 0x003fffff);
@ -3056,5 +3047,6 @@ nvc0_grctx_generate(struct drm_device *priv)
nvc0_grctx_generate_90c0(priv);
nv_wr32(priv, 0x000260, r000260);
return nvc0_grctx_fini(&info);
}

View File

@ -22,13 +22,10 @@
* Authors: Ben Skeggs
*/
#include "drmP.h"
#include "nouveau_drv.h"
#include <core/mm.h>
#include "nvc0.h"
static void
nve0_grctx_generate_icmd(struct drm_device *priv)
nve0_grctx_generate_icmd(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x400208, 0x80000000);
nv_icmd(priv, 0x001000, 0x00000004);
@ -916,7 +913,7 @@ nve0_grctx_generate_icmd(struct drm_device *priv)
}
static void
nve0_grctx_generate_a097(struct drm_device *priv)
nve0_grctx_generate_a097(struct nvc0_graph_priv *priv)
{
nv_mthd(priv, 0xa097, 0x0800, 0x00000000);
nv_mthd(priv, 0xa097, 0x0840, 0x00000000);
@ -2146,7 +2143,7 @@ nve0_grctx_generate_a097(struct drm_device *priv)
}
static void
nve0_grctx_generate_902d(struct drm_device *priv)
nve0_grctx_generate_902d(struct nvc0_graph_priv *priv)
{
nv_mthd(priv, 0x902d, 0x0200, 0x000000cf);
nv_mthd(priv, 0x902d, 0x0204, 0x00000001);
@ -2169,7 +2166,7 @@ nve0_grctx_generate_902d(struct drm_device *priv)
}
static void
nve0_graph_generate_unk40xx(struct drm_device *priv)
nve0_graph_generate_unk40xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x404010, 0x0);
nv_wr32(priv, 0x404014, 0x0);
@ -2213,7 +2210,7 @@ nve0_graph_generate_unk40xx(struct drm_device *priv)
}
static void
nve0_graph_generate_unk44xx(struct drm_device *priv)
nve0_graph_generate_unk44xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x404404, 0x0);
nv_wr32(priv, 0x404408, 0x0);
@ -2238,7 +2235,7 @@ nve0_graph_generate_unk44xx(struct drm_device *priv)
}
static void
nve0_graph_generate_unk46xx(struct drm_device *priv)
nve0_graph_generate_unk46xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x404604, 0x14);
nv_wr32(priv, 0x404608, 0x0);
@ -2278,7 +2275,7 @@ nve0_graph_generate_unk46xx(struct drm_device *priv)
}
static void
nve0_graph_generate_unk47xx(struct drm_device *priv)
nve0_graph_generate_unk47xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x404700, 0x0);
nv_wr32(priv, 0x404704, 0x0);
@ -2299,7 +2296,7 @@ nve0_graph_generate_unk47xx(struct drm_device *priv)
}
static void
nve0_graph_generate_unk58xx(struct drm_device *priv)
nve0_graph_generate_unk58xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x405800, 0xf8000bf);
nv_wr32(priv, 0x405830, 0x2180648);
@ -2318,7 +2315,7 @@ nve0_graph_generate_unk58xx(struct drm_device *priv)
}
static void
nve0_graph_generate_unk60xx(struct drm_device *priv)
nve0_graph_generate_unk60xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x406020, 0x4103c1);
nv_wr32(priv, 0x406028, 0x1);
@ -2328,7 +2325,7 @@ nve0_graph_generate_unk60xx(struct drm_device *priv)
}
static void
nve0_graph_generate_unk64xx(struct drm_device *priv)
nve0_graph_generate_unk64xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x4064a8, 0x0);
nv_wr32(priv, 0x4064ac, 0x3fff);
@ -2350,13 +2347,13 @@ nve0_graph_generate_unk64xx(struct drm_device *priv)
}
static void
nve0_graph_generate_unk70xx(struct drm_device *priv)
nve0_graph_generate_unk70xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x407040, 0x0);
}
static void
nve0_graph_generate_unk78xx(struct drm_device *priv)
nve0_graph_generate_unk78xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x407804, 0x23);
nv_wr32(priv, 0x40780c, 0xa418820);
@ -2369,7 +2366,7 @@ nve0_graph_generate_unk78xx(struct drm_device *priv)
}
static void
nve0_graph_generate_unk80xx(struct drm_device *priv)
nve0_graph_generate_unk80xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x408000, 0x0);
nv_wr32(priv, 0x408004, 0x0);
@ -2382,7 +2379,7 @@ nve0_graph_generate_unk80xx(struct drm_device *priv)
}
static void
nve0_graph_generate_unk88xx(struct drm_device *priv)
nve0_graph_generate_unk88xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x408800, 0x2802a3c);
nv_wr32(priv, 0x408804, 0x40);
@ -2395,7 +2392,7 @@ nve0_graph_generate_unk88xx(struct drm_device *priv)
}
static void
nve0_graph_generate_gpc(struct drm_device *priv)
nve0_graph_generate_gpc(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x418380, 0x16);
nv_wr32(priv, 0x418400, 0x38004e00);
@ -2521,7 +2518,7 @@ nve0_graph_generate_gpc(struct drm_device *priv)
}
static void
nve0_graph_generate_tpc(struct drm_device *priv)
nve0_graph_generate_tpc(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x419848, 0x0);
nv_wr32(priv, 0x419864, 0x129);
@ -2586,7 +2583,7 @@ nve0_graph_generate_tpc(struct drm_device *priv)
}
static void
nve0_graph_generate_tpcunk(struct drm_device *priv)
nve0_graph_generate_tpcunk(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x41be24, 0x6);
nv_wr32(priv, 0x41bec0, 0x12180000);
@ -2604,9 +2601,8 @@ nve0_graph_generate_tpcunk(struct drm_device *priv)
}
int
nve0_grctx_generate(struct drm_device *priv)
nve0_grctx_generate(struct nvc0_graph_priv *priv)
{
struct nvc0_graph_priv *oprv = nv_engine(priv, NVOBJ_ENGINE_GR);
struct nvc0_grctx info;
int ret, i, gpc, tpc, id;
u32 data[6] = {}, data2[2] = {}, tmp;
@ -2615,7 +2611,7 @@ nve0_grctx_generate(struct drm_device *priv)
u8 tpcnr[GPC_MAX], a, b;
u8 shift, ntpcv;
ret = nvc0_grctx_init(priv, oprv, &info);
ret = nvc0_grctx_init(priv, &info);
if (ret)
return ret;
@ -2657,17 +2653,17 @@ nve0_grctx_generate(struct drm_device *priv)
mmio_list(0x419848, 0x10000000, 12, 2);
mmio_list(0x405830, 0x02180648, 0, 0);
mmio_list(0x4064c4, 0x0192ffff, 0, 0);
for (gpc = 0, offset = 0; gpc < oprv->gpc_nr; gpc++) {
u16 magic0 = 0x0218 * oprv->tpc_nr[gpc];
u16 magic1 = 0x0648 * oprv->tpc_nr[gpc];
for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
u16 magic1 = 0x0648 * priv->tpc_nr[gpc];
magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
magic[gpc][1] = 0x00000000 | (magic1 << 16);
offset += 0x0324 * oprv->tpc_nr[gpc];
offset += 0x0324 * priv->tpc_nr[gpc];
}
for (gpc = 0; gpc < oprv->gpc_nr; gpc++) {
for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0);
mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0);
offset += 0x07ff * oprv->tpc_nr[gpc];
offset += 0x07ff * priv->tpc_nr[gpc];
}
mmio_list(0x17e91c, 0x06060609, 0, 0);
mmio_list(0x17e920, 0x00090a05, 0, 0);
@ -2680,22 +2676,22 @@ nve0_grctx_generate(struct drm_device *priv)
nv_wr32(priv, 0x419c00, 0xa);
for (tpc = 0, id = 0; tpc < 4; tpc++) {
for (gpc = 0; gpc < oprv->gpc_nr; gpc++) {
if (tpc < oprv->tpc_nr[gpc]) {
for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
if (tpc < priv->tpc_nr[gpc]) {
nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0698), id);
nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x04e8), id);
nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0088), id++);
}
nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), oprv->tpc_nr[gpc]);
nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), oprv->tpc_nr[gpc]);
nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
}
}
tmp = 0;
for (i = 0; i < oprv->gpc_nr; i++)
tmp |= oprv->tpc_nr[i] << (i * 4);
for (i = 0; i < priv->gpc_nr; i++)
tmp |= priv->tpc_nr[i] << (i * 4);
nv_wr32(priv, 0x406028, tmp);
nv_wr32(priv, 0x405870, tmp);
@ -2707,12 +2703,12 @@ nve0_grctx_generate(struct drm_device *priv)
nv_wr32(priv, 0x40587c, 0x0);
/* calculate first set of magics */
memcpy(tpcnr, oprv->tpc_nr, sizeof(oprv->tpc_nr));
memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
gpc = -1;
for (tpc = 0; tpc < oprv->tpc_total; tpc++) {
for (tpc = 0; tpc < priv->tpc_total; tpc++) {
do {
gpc = (gpc + 1) % oprv->gpc_nr;
gpc = (gpc + 1) % priv->gpc_nr;
} while (!tpcnr[gpc]);
tpcnr[gpc]--;
@ -2724,7 +2720,7 @@ nve0_grctx_generate(struct drm_device *priv)
/* and the second... */
shift = 0;
ntpcv = oprv->tpc_total;
ntpcv = priv->tpc_total;
while (!(ntpcv & (1 << 4))) {
ntpcv <<= 1;
shift++;
@ -2733,13 +2729,13 @@ nve0_grctx_generate(struct drm_device *priv)
data2[0] = ntpcv << 16;
data2[0] |= shift << 21;
data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
data2[0] |= oprv->tpc_total << 8;
data2[0] |= oprv->magic_not_rop_nr;
data2[0] |= priv->tpc_total << 8;
data2[0] |= priv->magic_not_rop_nr;
for (i = 1; i < 7; i++)
data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
/* and write it all the various parts of PGRAPH */
nv_wr32(priv, 0x418bb8, (oprv->tpc_total << 8) | oprv->magic_not_rop_nr);
nv_wr32(priv, 0x418bb8, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
for (i = 0; i < 6; i++)
nv_wr32(priv, 0x418b08 + (i * 4), data[i]);
@ -2748,23 +2744,23 @@ nve0_grctx_generate(struct drm_device *priv)
for (i = 0; i < 6; i++)
nv_wr32(priv, 0x41bf00 + (i * 4), data[i]);
nv_wr32(priv, 0x4078bc, (oprv->tpc_total << 8) | oprv->magic_not_rop_nr);
nv_wr32(priv, 0x4078bc, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
for (i = 0; i < 6; i++)
nv_wr32(priv, 0x40780c + (i * 4), data[i]);
memcpy(tpcnr, oprv->tpc_nr, sizeof(oprv->tpc_nr));
for (gpc = 0; gpc < oprv->gpc_nr; gpc++)
tpc_mask |= ((1 << oprv->tpc_nr[gpc]) - 1) << (gpc * 8);
memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
for (gpc = 0; gpc < priv->gpc_nr; gpc++)
tpc_mask |= ((1 << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
for (i = 0, gpc = -1, b = -1; i < 32; i++) {
a = (i * (oprv->tpc_total - 1)) / 32;
a = (i * (priv->tpc_total - 1)) / 32;
if (a != b) {
b = a;
do {
gpc = (gpc + 1) % oprv->gpc_nr;
gpc = (gpc + 1) % priv->gpc_nr;
} while (!tpcnr[gpc]);
tpc = oprv->tpc_nr[gpc] - tpcnr[gpc]--;
tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
tpc_set |= 1 << ((gpc * 8) + tpc);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,31 @@
#ifndef __NV20_GRAPH_H__
#define __NV20_GRAPH_H__
#include <core/enum.h>
#include <engine/graph.h>
#include <engine/fifo.h>
struct nv20_graph_priv {
struct nouveau_graph base;
struct nouveau_gpuobj *ctxtab;
};
struct nv20_graph_chan {
struct nouveau_graph_chan base;
int chid;
};
extern struct nouveau_oclass nv25_graph_sclass[];
int nv20_graph_context_init(struct nouveau_object *);
int nv20_graph_context_fini(struct nouveau_object *, bool);
void nv20_graph_tile_prog(struct nouveau_engine *, int);
void nv20_graph_intr(struct nouveau_subdev *);
void nv20_graph_dtor(struct nouveau_object *);
int nv20_graph_init(struct nouveau_object *);
int nv30_graph_init(struct nouveau_object *);
#endif

View File

@ -0,0 +1,167 @@
#include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
#include <core/enum.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
#include <engine/graph.h>
#include "nv20.h"
#include "regs.h"
/*******************************************************************************
* Graphics object classes
******************************************************************************/
struct nouveau_oclass
nv25_graph_sclass[] = {
{ 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
{ 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
{ 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
{ 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
{ 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
{ 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
{ 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
{ 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
{ 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
{ 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
{ 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
{ 0x0096, &nv04_graph_ofuncs, NULL }, /* celcius */
{ 0x009e, &nv04_graph_ofuncs, NULL }, /* swzsurf */
{ 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
{ 0x0597, &nv04_graph_ofuncs, NULL }, /* kelvin */
{},
};
/*******************************************************************************
* PGRAPH context
******************************************************************************/
static int
nv25_graph_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv20_graph_chan *chan;
int ret, i;
ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x3724,
16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
chan->chid = nouveau_fifo_chan(parent)->chid;
nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
nv_wo32(chan, 0x035c, 0xffff0000);
nv_wo32(chan, 0x03c0, 0x0fff0000);
nv_wo32(chan, 0x03c4, 0x0fff0000);
nv_wo32(chan, 0x049c, 0x00000101);
nv_wo32(chan, 0x04b0, 0x00000111);
nv_wo32(chan, 0x04c8, 0x00000080);
nv_wo32(chan, 0x04cc, 0xffff0000);
nv_wo32(chan, 0x04d0, 0x00000001);
nv_wo32(chan, 0x04e4, 0x44400000);
nv_wo32(chan, 0x04fc, 0x4b800000);
for (i = 0x0510; i <= 0x051c; i += 4)
nv_wo32(chan, i, 0x00030303);
for (i = 0x0530; i <= 0x053c; i += 4)
nv_wo32(chan, i, 0x00080000);
for (i = 0x0548; i <= 0x0554; i += 4)
nv_wo32(chan, i, 0x01012000);
for (i = 0x0558; i <= 0x0564; i += 4)
nv_wo32(chan, i, 0x000105b8);
for (i = 0x0568; i <= 0x0574; i += 4)
nv_wo32(chan, i, 0x00080008);
for (i = 0x0598; i <= 0x05d4; i += 4)
nv_wo32(chan, i, 0x07ff0000);
nv_wo32(chan, 0x05e0, 0x4b7fffff);
nv_wo32(chan, 0x0620, 0x00000080);
nv_wo32(chan, 0x0624, 0x30201000);
nv_wo32(chan, 0x0628, 0x70605040);
nv_wo32(chan, 0x062c, 0xb0a09080);
nv_wo32(chan, 0x0630, 0xf0e0d0c0);
nv_wo32(chan, 0x0664, 0x00000001);
nv_wo32(chan, 0x066c, 0x00004000);
nv_wo32(chan, 0x0678, 0x00000001);
nv_wo32(chan, 0x0680, 0x00040000);
nv_wo32(chan, 0x0684, 0x00010000);
for (i = 0x1b04; i <= 0x2374; i += 16) {
nv_wo32(chan, (i + 0), 0x10700ff9);
nv_wo32(chan, (i + 4), 0x0436086c);
nv_wo32(chan, (i + 8), 0x000c001b);
}
nv_wo32(chan, 0x2704, 0x3f800000);
nv_wo32(chan, 0x2718, 0x3f800000);
nv_wo32(chan, 0x2744, 0x40000000);
nv_wo32(chan, 0x2748, 0x3f800000);
nv_wo32(chan, 0x274c, 0x3f000000);
nv_wo32(chan, 0x2754, 0x40000000);
nv_wo32(chan, 0x2758, 0x3f800000);
nv_wo32(chan, 0x2760, 0xbf800000);
nv_wo32(chan, 0x2768, 0xbf800000);
nv_wo32(chan, 0x308c, 0x000fe000);
nv_wo32(chan, 0x3108, 0x000003f8);
nv_wo32(chan, 0x3468, 0x002fe000);
for (i = 0x3484; i <= 0x34a0; i += 4)
nv_wo32(chan, i, 0x001c527c);
return 0;
}
static struct nouveau_oclass
nv25_graph_cclass = {
.handle = NV_ENGCTX(GR, 0x25),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv25_graph_context_ctor,
.dtor = _nouveau_graph_context_dtor,
.init = nv20_graph_context_init,
.fini = nv20_graph_context_fini,
.rd32 = _nouveau_graph_context_rd32,
.wr32 = _nouveau_graph_context_wr32,
},
};
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
static int
nv25_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv20_graph_priv *priv;
int ret;
ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00001000;
nv_subdev(priv)->intr = nv20_graph_intr;
nv_engine(priv)->cclass = &nv25_graph_cclass;
nv_engine(priv)->sclass = nv25_graph_sclass;
nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
return 0;
}
struct nouveau_oclass
nv25_graph_oclass = {
.handle = NV_ENGINE(GR, 0x25),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv25_graph_ctor,
.dtor = nv20_graph_dtor,
.init = nv20_graph_init,
.fini = _nouveau_graph_fini,
},
};

View File

@ -0,0 +1,134 @@
#include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
#include <core/enum.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
#include <engine/graph.h>
#include "nv20.h"
#include "regs.h"
/*******************************************************************************
* PGRAPH context
******************************************************************************/
static int
nv2a_graph_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv20_graph_chan *chan;
int ret, i;
ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x36b0,
16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
chan->chid = nouveau_fifo_chan(parent)->chid;
nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24));
nv_wo32(chan, 0x033c, 0xffff0000);
nv_wo32(chan, 0x03a0, 0x0fff0000);
nv_wo32(chan, 0x03a4, 0x0fff0000);
nv_wo32(chan, 0x047c, 0x00000101);
nv_wo32(chan, 0x0490, 0x00000111);
nv_wo32(chan, 0x04a8, 0x44400000);
for (i = 0x04d4; i <= 0x04e0; i += 4)
nv_wo32(chan, i, 0x00030303);
for (i = 0x04f4; i <= 0x0500; i += 4)
nv_wo32(chan, i, 0x00080000);
for (i = 0x050c; i <= 0x0518; i += 4)
nv_wo32(chan, i, 0x01012000);
for (i = 0x051c; i <= 0x0528; i += 4)
nv_wo32(chan, i, 0x000105b8);
for (i = 0x052c; i <= 0x0538; i += 4)
nv_wo32(chan, i, 0x00080008);
for (i = 0x055c; i <= 0x0598; i += 4)
nv_wo32(chan, i, 0x07ff0000);
nv_wo32(chan, 0x05a4, 0x4b7fffff);
nv_wo32(chan, 0x05fc, 0x00000001);
nv_wo32(chan, 0x0604, 0x00004000);
nv_wo32(chan, 0x0610, 0x00000001);
nv_wo32(chan, 0x0618, 0x00040000);
nv_wo32(chan, 0x061c, 0x00010000);
for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
nv_wo32(chan, (i + 0), 0x10700ff9);
nv_wo32(chan, (i + 4), 0x0436086c);
nv_wo32(chan, (i + 8), 0x000c001b);
}
nv_wo32(chan, 0x269c, 0x3f800000);
nv_wo32(chan, 0x26b0, 0x3f800000);
nv_wo32(chan, 0x26dc, 0x40000000);
nv_wo32(chan, 0x26e0, 0x3f800000);
nv_wo32(chan, 0x26e4, 0x3f000000);
nv_wo32(chan, 0x26ec, 0x40000000);
nv_wo32(chan, 0x26f0, 0x3f800000);
nv_wo32(chan, 0x26f8, 0xbf800000);
nv_wo32(chan, 0x2700, 0xbf800000);
nv_wo32(chan, 0x3024, 0x000fe000);
nv_wo32(chan, 0x30a0, 0x000003f8);
nv_wo32(chan, 0x33fc, 0x002fe000);
for (i = 0x341c; i <= 0x3438; i += 4)
nv_wo32(chan, i, 0x001c527c);
return 0;
}
static struct nouveau_oclass
nv2a_graph_cclass = {
.handle = NV_ENGCTX(GR, 0x2a),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv2a_graph_context_ctor,
.dtor = _nouveau_graph_context_dtor,
.init = nv20_graph_context_init,
.fini = nv20_graph_context_fini,
.rd32 = _nouveau_graph_context_rd32,
.wr32 = _nouveau_graph_context_wr32,
},
};
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
static int
nv2a_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv20_graph_priv *priv;
int ret;
ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00001000;
nv_subdev(priv)->intr = nv20_graph_intr;
nv_engine(priv)->cclass = &nv2a_graph_cclass;
nv_engine(priv)->sclass = nv25_graph_sclass;
nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
return 0;
}
struct nouveau_oclass
nv2a_graph_oclass = {
.handle = NV_ENGINE(GR, 0x2a),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv2a_graph_ctor,
.dtor = nv20_graph_dtor,
.init = nv20_graph_init,
.fini = _nouveau_graph_fini,
},
};

View File

@ -0,0 +1,238 @@
#include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
#include <core/enum.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
#include <engine/graph.h>
#include "nv20.h"
#include "regs.h"
/*******************************************************************************
* Graphics object classes
******************************************************************************/
static struct nouveau_oclass
nv30_graph_sclass[] = {
{ 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
{ 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
{ 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
{ 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
{ 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
{ 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
{ 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
{ 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
{ 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
{ 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
{ 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
{ 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
{ 0x0362, &nv04_graph_ofuncs, NULL }, /* surf2d (nv30) */
{ 0x0389, &nv04_graph_ofuncs, NULL }, /* sifm (nv30) */
{ 0x038a, &nv04_graph_ofuncs, NULL }, /* ifc (nv30) */
{ 0x039e, &nv04_graph_ofuncs, NULL }, /* swzsurf (nv30) */
{ 0x0397, &nv04_graph_ofuncs, NULL }, /* rankine */
{},
};
/*******************************************************************************
* PGRAPH context
******************************************************************************/
static int
nv30_graph_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv20_graph_chan *chan;
int ret, i;
ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x5f48,
16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
chan->chid = nouveau_fifo_chan(parent)->chid;
nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
nv_wo32(chan, 0x0410, 0x00000101);
nv_wo32(chan, 0x0424, 0x00000111);
nv_wo32(chan, 0x0428, 0x00000060);
nv_wo32(chan, 0x0444, 0x00000080);
nv_wo32(chan, 0x0448, 0xffff0000);
nv_wo32(chan, 0x044c, 0x00000001);
nv_wo32(chan, 0x0460, 0x44400000);
nv_wo32(chan, 0x048c, 0xffff0000);
for (i = 0x04e0; i < 0x04e8; i += 4)
nv_wo32(chan, i, 0x0fff0000);
nv_wo32(chan, 0x04ec, 0x00011100);
for (i = 0x0508; i < 0x0548; i += 4)
nv_wo32(chan, i, 0x07ff0000);
nv_wo32(chan, 0x0550, 0x4b7fffff);
nv_wo32(chan, 0x058c, 0x00000080);
nv_wo32(chan, 0x0590, 0x30201000);
nv_wo32(chan, 0x0594, 0x70605040);
nv_wo32(chan, 0x0598, 0xb8a89888);
nv_wo32(chan, 0x059c, 0xf8e8d8c8);
nv_wo32(chan, 0x05b0, 0xb0000000);
for (i = 0x0600; i < 0x0640; i += 4)
nv_wo32(chan, i, 0x00010588);
for (i = 0x0640; i < 0x0680; i += 4)
nv_wo32(chan, i, 0x00030303);
for (i = 0x06c0; i < 0x0700; i += 4)
nv_wo32(chan, i, 0x0008aae4);
for (i = 0x0700; i < 0x0740; i += 4)
nv_wo32(chan, i, 0x01012000);
for (i = 0x0740; i < 0x0780; i += 4)
nv_wo32(chan, i, 0x00080008);
nv_wo32(chan, 0x085c, 0x00040000);
nv_wo32(chan, 0x0860, 0x00010000);
for (i = 0x0864; i < 0x0874; i += 4)
nv_wo32(chan, i, 0x00040004);
for (i = 0x1f18; i <= 0x3088 ; i += 16) {
nv_wo32(chan, i + 0, 0x10700ff9);
nv_wo32(chan, i + 1, 0x0436086c);
nv_wo32(chan, i + 2, 0x000c001b);
}
for (i = 0x30b8; i < 0x30c8; i += 4)
nv_wo32(chan, i, 0x0000ffff);
nv_wo32(chan, 0x344c, 0x3f800000);
nv_wo32(chan, 0x3808, 0x3f800000);
nv_wo32(chan, 0x381c, 0x3f800000);
nv_wo32(chan, 0x3848, 0x40000000);
nv_wo32(chan, 0x384c, 0x3f800000);
nv_wo32(chan, 0x3850, 0x3f000000);
nv_wo32(chan, 0x3858, 0x40000000);
nv_wo32(chan, 0x385c, 0x3f800000);
nv_wo32(chan, 0x3864, 0xbf800000);
nv_wo32(chan, 0x386c, 0xbf800000);
return 0;
}
static struct nouveau_oclass
nv30_graph_cclass = {
.handle = NV_ENGCTX(GR, 0x30),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv30_graph_context_ctor,
.dtor = _nouveau_graph_context_dtor,
.init = nv20_graph_context_init,
.fini = nv20_graph_context_fini,
.rd32 = _nouveau_graph_context_rd32,
.wr32 = _nouveau_graph_context_wr32,
},
};
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
static int
nv30_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv20_graph_priv *priv;
int ret;
ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00001000;
nv_subdev(priv)->intr = nv20_graph_intr;
nv_engine(priv)->cclass = &nv30_graph_cclass;
nv_engine(priv)->sclass = nv30_graph_sclass;
nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
return 0;
}
int
nv30_graph_init(struct nouveau_object *object)
{
struct nouveau_engine *engine = nv_engine(object);
struct nv20_graph_priv *priv = (void *)engine;
struct nouveau_fb *pfb = nouveau_fb(object);
int ret, i;
ret = nouveau_graph_init(&priv->base);
if (ret)
return ret;
nv_wr32(priv, NV20_PGRAPH_CHANNEL_CTX_TABLE, priv->ctxtab->addr >> 4);
nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF);
nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x401287c0);
nv_wr32(priv, 0x400890, 0x01b463ff);
nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00008000);
nv_wr32(priv, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
nv_wr32(priv, 0x400B80, 0x1003d888);
nv_wr32(priv, 0x400B84, 0x0c000000);
nv_wr32(priv, 0x400098, 0x00000000);
nv_wr32(priv, 0x40009C, 0x0005ad00);
nv_wr32(priv, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
nv_wr32(priv, 0x4000a0, 0x00000000);
nv_wr32(priv, 0x4000a4, 0x00000008);
nv_wr32(priv, 0x4008a8, 0xb784a400);
nv_wr32(priv, 0x400ba0, 0x002f8685);
nv_wr32(priv, 0x400ba4, 0x00231f3f);
nv_wr32(priv, 0x4008a4, 0x40000020);
if (nv_device(priv)->chipset == 0x34) {
nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00200201);
nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000008);
nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000032);
nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000002);
}
nv_wr32(priv, 0x4000c0, 0x00000016);
/* Turn all the tiling regions off. */
for (i = 0; i < pfb->tile.regions; i++)
engine->tile_prog(engine, i);
nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
nv_wr32(priv, NV10_PGRAPH_STATE , 0xFFFFFFFF);
nv_wr32(priv, 0x0040075c , 0x00000001);
/* begin RAM config */
/* vramsz = pci_resource_len(priv->dev->pdev, 0) - 1; */
nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
if (nv_device(priv)->chipset != 0x34) {
nv_wr32(priv, 0x400750, 0x00EA0000);
nv_wr32(priv, 0x400754, nv_rd32(priv, 0x100200));
nv_wr32(priv, 0x400750, 0x00EA0004);
nv_wr32(priv, 0x400754, nv_rd32(priv, 0x100204));
}
return 0;
}
struct nouveau_oclass
nv30_graph_oclass = {
.handle = NV_ENGINE(GR, 0x30),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv30_graph_ctor,
.dtor = nv20_graph_dtor,
.init = nv30_graph_init,
.fini = _nouveau_graph_fini,
},
};

View File

@ -0,0 +1,168 @@
#include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
#include <core/enum.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
#include <engine/graph.h>
#include "nv20.h"
#include "regs.h"
/*******************************************************************************
* Graphics object classes
******************************************************************************/
static struct nouveau_oclass
nv34_graph_sclass[] = {
{ 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
{ 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
{ 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
{ 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
{ 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
{ 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
{ 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
{ 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
{ 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
{ 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
{ 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
{ 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
{ 0x0362, &nv04_graph_ofuncs, NULL }, /* surf2d (nv30) */
{ 0x0389, &nv04_graph_ofuncs, NULL }, /* sifm (nv30) */
{ 0x038a, &nv04_graph_ofuncs, NULL }, /* ifc (nv30) */
{ 0x039e, &nv04_graph_ofuncs, NULL }, /* swzsurf (nv30) */
{ 0x0697, &nv04_graph_ofuncs, NULL }, /* rankine */
{},
};
/*******************************************************************************
* PGRAPH context
******************************************************************************/
static int
nv34_graph_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv20_graph_chan *chan;
int ret, i;
ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x46dc,
16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
chan->chid = nouveau_fifo_chan(parent)->chid;
nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
nv_wo32(chan, 0x040c, 0x01000101);
nv_wo32(chan, 0x0420, 0x00000111);
nv_wo32(chan, 0x0424, 0x00000060);
nv_wo32(chan, 0x0440, 0x00000080);
nv_wo32(chan, 0x0444, 0xffff0000);
nv_wo32(chan, 0x0448, 0x00000001);
nv_wo32(chan, 0x045c, 0x44400000);
nv_wo32(chan, 0x0480, 0xffff0000);
for (i = 0x04d4; i < 0x04dc; i += 4)
nv_wo32(chan, i, 0x0fff0000);
nv_wo32(chan, 0x04e0, 0x00011100);
for (i = 0x04fc; i < 0x053c; i += 4)
nv_wo32(chan, i, 0x07ff0000);
nv_wo32(chan, 0x0544, 0x4b7fffff);
nv_wo32(chan, 0x057c, 0x00000080);
nv_wo32(chan, 0x0580, 0x30201000);
nv_wo32(chan, 0x0584, 0x70605040);
nv_wo32(chan, 0x0588, 0xb8a89888);
nv_wo32(chan, 0x058c, 0xf8e8d8c8);
nv_wo32(chan, 0x05a0, 0xb0000000);
for (i = 0x05f0; i < 0x0630; i += 4)
nv_wo32(chan, i, 0x00010588);
for (i = 0x0630; i < 0x0670; i += 4)
nv_wo32(chan, i, 0x00030303);
for (i = 0x06b0; i < 0x06f0; i += 4)
nv_wo32(chan, i, 0x0008aae4);
for (i = 0x06f0; i < 0x0730; i += 4)
nv_wo32(chan, i, 0x01012000);
for (i = 0x0730; i < 0x0770; i += 4)
nv_wo32(chan, i, 0x00080008);
nv_wo32(chan, 0x0850, 0x00040000);
nv_wo32(chan, 0x0854, 0x00010000);
for (i = 0x0858; i < 0x0868; i += 4)
nv_wo32(chan, i, 0x00040004);
for (i = 0x15ac; i <= 0x271c ; i += 16) {
nv_wo32(chan, i + 0, 0x10700ff9);
nv_wo32(chan, i + 1, 0x0436086c);
nv_wo32(chan, i + 2, 0x000c001b);
}
for (i = 0x274c; i < 0x275c; i += 4)
nv_wo32(chan, i, 0x0000ffff);
nv_wo32(chan, 0x2ae0, 0x3f800000);
nv_wo32(chan, 0x2e9c, 0x3f800000);
nv_wo32(chan, 0x2eb0, 0x3f800000);
nv_wo32(chan, 0x2edc, 0x40000000);
nv_wo32(chan, 0x2ee0, 0x3f800000);
nv_wo32(chan, 0x2ee4, 0x3f000000);
nv_wo32(chan, 0x2eec, 0x40000000);
nv_wo32(chan, 0x2ef0, 0x3f800000);
nv_wo32(chan, 0x2ef8, 0xbf800000);
nv_wo32(chan, 0x2f00, 0xbf800000);
return 0;
}
static struct nouveau_oclass
nv34_graph_cclass = {
.handle = NV_ENGCTX(GR, 0x34),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv34_graph_context_ctor,
.dtor = _nouveau_graph_context_dtor,
.init = nv20_graph_context_init,
.fini = nv20_graph_context_fini,
.rd32 = _nouveau_graph_context_rd32,
.wr32 = _nouveau_graph_context_wr32,
},
};
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
static int
nv34_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv20_graph_priv *priv;
int ret;
ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00001000;
nv_subdev(priv)->intr = nv20_graph_intr;
nv_engine(priv)->cclass = &nv34_graph_cclass;
nv_engine(priv)->sclass = nv34_graph_sclass;
nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
return 0;
}
struct nouveau_oclass
nv34_graph_oclass = {
.handle = NV_ENGINE(GR, 0x34),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv34_graph_ctor,
.dtor = nv20_graph_dtor,
.init = nv30_graph_init,
.fini = _nouveau_graph_fini,
},
};

View File

@ -0,0 +1,166 @@
#include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
#include <core/enum.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
#include "nv20.h"
#include "regs.h"
/*******************************************************************************
* Graphics object classes
******************************************************************************/
static struct nouveau_oclass
nv35_graph_sclass[] = {
{ 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
{ 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
{ 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
{ 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
{ 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
{ 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
{ 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
{ 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
{ 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
{ 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
{ 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
{ 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
{ 0x0362, &nv04_graph_ofuncs, NULL }, /* surf2d (nv30) */
{ 0x0389, &nv04_graph_ofuncs, NULL }, /* sifm (nv30) */
{ 0x038a, &nv04_graph_ofuncs, NULL }, /* ifc (nv30) */
{ 0x039e, &nv04_graph_ofuncs, NULL }, /* swzsurf (nv30) */
{ 0x0497, &nv04_graph_ofuncs, NULL }, /* rankine */
{},
};
/*******************************************************************************
* PGRAPH context
******************************************************************************/
static int
nv35_graph_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv20_graph_chan *chan;
int ret, i;
ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x577c,
16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
chan->chid = nouveau_fifo_chan(parent)->chid;
nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
nv_wo32(chan, 0x040c, 0x00000101);
nv_wo32(chan, 0x0420, 0x00000111);
nv_wo32(chan, 0x0424, 0x00000060);
nv_wo32(chan, 0x0440, 0x00000080);
nv_wo32(chan, 0x0444, 0xffff0000);
nv_wo32(chan, 0x0448, 0x00000001);
nv_wo32(chan, 0x045c, 0x44400000);
nv_wo32(chan, 0x0488, 0xffff0000);
for (i = 0x04dc; i < 0x04e4; i += 4)
nv_wo32(chan, i, 0x0fff0000);
nv_wo32(chan, 0x04e8, 0x00011100);
for (i = 0x0504; i < 0x0544; i += 4)
nv_wo32(chan, i, 0x07ff0000);
nv_wo32(chan, 0x054c, 0x4b7fffff);
nv_wo32(chan, 0x0588, 0x00000080);
nv_wo32(chan, 0x058c, 0x30201000);
nv_wo32(chan, 0x0590, 0x70605040);
nv_wo32(chan, 0x0594, 0xb8a89888);
nv_wo32(chan, 0x0598, 0xf8e8d8c8);
nv_wo32(chan, 0x05ac, 0xb0000000);
for (i = 0x0604; i < 0x0644; i += 4)
nv_wo32(chan, i, 0x00010588);
for (i = 0x0644; i < 0x0684; i += 4)
nv_wo32(chan, i, 0x00030303);
for (i = 0x06c4; i < 0x0704; i += 4)
nv_wo32(chan, i, 0x0008aae4);
for (i = 0x0704; i < 0x0744; i += 4)
nv_wo32(chan, i, 0x01012000);
for (i = 0x0744; i < 0x0784; i += 4)
nv_wo32(chan, i, 0x00080008);
nv_wo32(chan, 0x0860, 0x00040000);
nv_wo32(chan, 0x0864, 0x00010000);
for (i = 0x0868; i < 0x0878; i += 4)
nv_wo32(chan, i, 0x00040004);
for (i = 0x1f1c; i <= 0x308c ; i += 16) {
nv_wo32(chan, i + 0, 0x10700ff9);
nv_wo32(chan, i + 4, 0x0436086c);
nv_wo32(chan, i + 8, 0x000c001b);
}
for (i = 0x30bc; i < 0x30cc; i += 4)
nv_wo32(chan, i, 0x0000ffff);
nv_wo32(chan, 0x3450, 0x3f800000);
nv_wo32(chan, 0x380c, 0x3f800000);
nv_wo32(chan, 0x3820, 0x3f800000);
nv_wo32(chan, 0x384c, 0x40000000);
nv_wo32(chan, 0x3850, 0x3f800000);
nv_wo32(chan, 0x3854, 0x3f000000);
nv_wo32(chan, 0x385c, 0x40000000);
nv_wo32(chan, 0x3860, 0x3f800000);
nv_wo32(chan, 0x3868, 0xbf800000);
nv_wo32(chan, 0x3870, 0xbf800000);
return 0;
}
static struct nouveau_oclass
nv35_graph_cclass = {
.handle = NV_ENGCTX(GR, 0x35),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv35_graph_context_ctor,
.dtor = _nouveau_graph_context_dtor,
.init = nv20_graph_context_init,
.fini = nv20_graph_context_fini,
.rd32 = _nouveau_graph_context_rd32,
.wr32 = _nouveau_graph_context_wr32,
},
};
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
static int
nv35_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv20_graph_priv *priv;
int ret;
ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00001000;
nv_subdev(priv)->intr = nv20_graph_intr;
nv_engine(priv)->cclass = &nv35_graph_cclass;
nv_engine(priv)->sclass = nv35_graph_sclass;
nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
return 0;
}
struct nouveau_oclass
nv35_graph_oclass = {
.handle = NV_ENGINE(GR, 0x35),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv35_graph_ctor,
.dtor = nv20_graph_dtor,
.init = nv30_graph_init,
.fini = _nouveau_graph_fini,
},
};

View File

@ -1,151 +1,238 @@
/*
* Copyright (C) 2007 Ben Skeggs.
* All Rights Reserved.
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#include <engine/fifo.h>
#include <core/ramht.h>
#include <core/os.h>
#include <core/class.h>
#include <core/handle.h>
#include <core/engctx.h>
struct nv40_graph_engine {
struct nouveau_exec_engine base;
u32 grctx_size;
#include <subdev/fb.h>
#include <subdev/timer.h>
#include <engine/graph.h>
#include <engine/fifo.h>
#include "nv40.h"
#include "regs.h"
struct nv40_graph_priv {
struct nouveau_graph base;
u32 size;
};
struct nv40_graph_chan {
struct nouveau_graph_chan base;
};
/*******************************************************************************
* Graphics object classes
******************************************************************************/
static int
nv40_graph_context_new(struct nouveau_channel *chan, int engine)
nv40_graph_object_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv40_graph_engine *pgraph = nv_engine(chan->dev, engine);
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *grctx = NULL;
unsigned long flags;
struct nouveau_gpuobj *obj;
int ret;
ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 16,
NVOBJ_FLAG_ZERO_ALLOC, &grctx);
ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
20, 16, 0, &obj);
*pobject = nv_object(obj);
if (ret)
return ret;
/* Initialise default context values */
nv40_grctx_fill(dev, grctx);
nv_wo32(grctx, 0, grctx->addr);
/* init grctx pointer in ramfc, and on PFIFO if channel is
* already active there
*/
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
nv_wo32(chan->ramfc, 0x38, grctx->addr >> 4);
nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id)
nv_wr32(dev, 0x0032e0, grctx->addr >> 4);
nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
chan->engctx[engine] = grctx;
return 0;
}
static void
nv40_graph_context_del(struct nouveau_channel *chan, int engine)
{
struct nouveau_gpuobj *grctx = chan->engctx[engine];
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
u32 inst = 0x01000000 | (grctx->addr >> 4);
unsigned long flags;
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
nv_mask(dev, 0x400720, 0x00000000, 0x00000001);
if (nv_rd32(dev, 0x40032c) == inst)
nv_mask(dev, 0x40032c, 0x01000000, 0x00000000);
if (nv_rd32(dev, 0x400330) == inst)
nv_mask(dev, 0x400330, 0x01000000, 0x00000000);
nv_mask(dev, 0x400720, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
/* Free the context resources */
nouveau_gpuobj_ref(NULL, &grctx);
chan->engctx[engine] = NULL;
}
int
nv40_graph_object_new(struct nouveau_channel *chan, int engine,
u32 handle, u16 class)
{
struct drm_device *dev = chan->dev;
struct nouveau_gpuobj *obj = NULL;
int ret;
ret = nouveau_gpuobj_new(dev, chan, 20, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
if (ret)
return ret;
obj->engine = 1;
obj->class = class;
nv_wo32(obj, 0x00, class);
nv_wo32(obj, 0x00, nv_mclass(obj));
nv_wo32(obj, 0x04, 0x00000000);
#ifndef __BIG_ENDIAN
nv_wo32(obj, 0x08, 0x00000000);
#else
nv_wo32(obj, 0x08, 0x01000000);
#ifdef __BIG_ENDIAN
nv_mo32(obj, 0x08, 0x01000000, 0x01000000);
#endif
nv_wo32(obj, 0x0c, 0x00000000);
nv_wo32(obj, 0x10, 0x00000000);
return 0;
}
ret = nouveau_ramht_insert(chan, handle, obj);
nouveau_gpuobj_ref(NULL, &obj);
struct nouveau_ofuncs
nv40_graph_ofuncs = {
.ctor = nv40_graph_object_ctor,
.dtor = _nouveau_gpuobj_dtor,
.init = _nouveau_gpuobj_init,
.fini = _nouveau_gpuobj_fini,
.rd32 = _nouveau_gpuobj_rd32,
.wr32 = _nouveau_gpuobj_wr32,
};
static struct nouveau_oclass
nv40_graph_sclass[] = {
{ 0x0012, &nv40_graph_ofuncs, NULL }, /* beta1 */
{ 0x0019, &nv40_graph_ofuncs, NULL }, /* clip */
{ 0x0030, &nv40_graph_ofuncs, NULL }, /* null */
{ 0x0039, &nv40_graph_ofuncs, NULL }, /* m2mf */
{ 0x0043, &nv40_graph_ofuncs, NULL }, /* rop */
{ 0x0044, &nv40_graph_ofuncs, NULL }, /* patt */
{ 0x004a, &nv40_graph_ofuncs, NULL }, /* gdi */
{ 0x0062, &nv40_graph_ofuncs, NULL }, /* surf2d */
{ 0x0072, &nv40_graph_ofuncs, NULL }, /* beta4 */
{ 0x0089, &nv40_graph_ofuncs, NULL }, /* sifm */
{ 0x008a, &nv40_graph_ofuncs, NULL }, /* ifc */
{ 0x009f, &nv40_graph_ofuncs, NULL }, /* imageblit */
{ 0x3062, &nv40_graph_ofuncs, NULL }, /* surf2d (nv40) */
{ 0x3089, &nv40_graph_ofuncs, NULL }, /* sifm (nv40) */
{ 0x309e, &nv40_graph_ofuncs, NULL }, /* swzsurf (nv40) */
{ 0x4097, &nv40_graph_ofuncs, NULL }, /* curie */
{},
};
static struct nouveau_oclass
nv44_graph_sclass[] = {
{ 0x0012, &nv40_graph_ofuncs, NULL }, /* beta1 */
{ 0x0019, &nv40_graph_ofuncs, NULL }, /* clip */
{ 0x0030, &nv40_graph_ofuncs, NULL }, /* null */
{ 0x0039, &nv40_graph_ofuncs, NULL }, /* m2mf */
{ 0x0043, &nv40_graph_ofuncs, NULL }, /* rop */
{ 0x0044, &nv40_graph_ofuncs, NULL }, /* patt */
{ 0x004a, &nv40_graph_ofuncs, NULL }, /* gdi */
{ 0x0062, &nv40_graph_ofuncs, NULL }, /* surf2d */
{ 0x0072, &nv40_graph_ofuncs, NULL }, /* beta4 */
{ 0x0089, &nv40_graph_ofuncs, NULL }, /* sifm */
{ 0x008a, &nv40_graph_ofuncs, NULL }, /* ifc */
{ 0x009f, &nv40_graph_ofuncs, NULL }, /* imageblit */
{ 0x3062, &nv40_graph_ofuncs, NULL }, /* surf2d (nv40) */
{ 0x3089, &nv40_graph_ofuncs, NULL }, /* sifm (nv40) */
{ 0x309e, &nv40_graph_ofuncs, NULL }, /* swzsurf (nv40) */
{ 0x4497, &nv40_graph_ofuncs, NULL }, /* curie */
{},
};
/*******************************************************************************
* PGRAPH context
******************************************************************************/
static int
nv40_graph_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv40_graph_priv *priv = (void *)engine;
struct nv40_graph_chan *chan;
int ret;
ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
priv->size, 16,
NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
nv40_grctx_fill(nv_device(priv), nv_gpuobj(chan));
nv_wo32(chan, 0x00000, nv_gpuobj(chan)->addr >> 4);
return 0;
}
static int
nv40_graph_context_fini(struct nouveau_object *object, bool suspend)
{
struct nv04_graph_priv *priv = (void *)object->engine;
struct nv04_graph_chan *chan = (void *)object;
u32 inst = 0x01000000 | nv_gpuobj(chan)->addr >> 4;
int ret = 0;
nv_mask(priv, 0x400720, 0x00000001, 0x00000000);
if (nv_rd32(priv, 0x40032c) == inst) {
if (suspend) {
nv_wr32(priv, 0x400720, 0x00000000);
nv_wr32(priv, 0x400784, inst);
nv_mask(priv, 0x400310, 0x00000020, 0x00000020);
nv_mask(priv, 0x400304, 0x00000001, 0x00000001);
if (!nv_wait(priv, 0x400300, 0x00000001, 0x00000000)) {
u32 insn = nv_rd32(priv, 0x400308);
nv_warn(priv, "ctxprog timeout 0x%08x\n", insn);
ret = -EBUSY;
}
}
nv_mask(priv, 0x40032c, 0x01000000, 0x00000000);
}
if (nv_rd32(priv, 0x400330) == inst)
nv_mask(priv, 0x400330, 0x01000000, 0x00000000);
nv_mask(priv, 0x400720, 0x00000001, 0x00000001);
return ret;
}
static void
nv40_graph_set_tile_region(struct drm_device *dev, int i)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fb_tile *tile = nvfb_tile(dev, i);
static struct nouveau_oclass
nv40_graph_cclass = {
.handle = NV_ENGCTX(GR, 0x40),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv40_graph_context_ctor,
.dtor = _nouveau_graph_context_dtor,
.init = _nouveau_graph_context_init,
.fini = nv40_graph_context_fini,
.rd32 = _nouveau_graph_context_rd32,
.wr32 = _nouveau_graph_context_wr32,
},
};
switch (dev_priv->chipset) {
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
static void
nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
{
struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
struct nouveau_fifo *pfifo = nouveau_fifo(engine);
struct nv40_graph_priv *priv = (void *)engine;
unsigned long flags;
pfifo->pause(pfifo, &flags);
nv04_graph_idle(priv);
switch (nv_device(priv)->chipset) {
case 0x40:
case 0x41: /* guess */
case 0x42:
case 0x43:
case 0x45: /* guess */
case 0x4e:
nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
break;
case 0x44:
case 0x4a:
nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
break;
case 0x46:
case 0x47:
@ -154,149 +241,213 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i)
case 0x4c:
case 0x67:
default:
nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
nv_wr32(priv, NV47_PGRAPH_TSIZE(i), tile->pitch);
nv_wr32(priv, NV47_PGRAPH_TLIMIT(i), tile->limit);
nv_wr32(priv, NV47_PGRAPH_TILE(i), tile->addr);
nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
break;
}
pfifo->start(pfifo, &flags);
}
static void
nv40_graph_intr(struct nouveau_subdev *subdev)
{
struct nv40_graph_priv *priv = (void *)subdev;
struct nouveau_engine *engine = nv_engine(subdev);
struct nouveau_handle *handle = NULL;
u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
u32 inst = (nv_rd32(priv, 0x40032c) & 0x000fffff) << 4;
u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
u32 subc = (addr & 0x00070000) >> 16;
u32 mthd = (addr & 0x00001ffc);
u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xffff;
u32 show = stat;
if (stat & NV_PGRAPH_INTR_ERROR) {
if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
handle = nouveau_engctx_lookup_class(engine, inst, class);
if (handle && !nv_call(handle->object, mthd, data))
show &= ~NV_PGRAPH_INTR_ERROR;
nouveau_engctx_handle_put(handle);
}
if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
nv_mask(priv, 0x402000, 0, 0);
}
}
nv_wr32(priv, NV03_PGRAPH_INTR, stat);
nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
if (show) {
nv_info(priv, "");
nouveau_bitfield_print(nv10_graph_intr_name, show);
printk(" nsource:");
nouveau_bitfield_print(nv04_graph_nsource, nsource);
printk(" nstatus:");
nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
printk("\n");
nv_error(priv, "ch 0x%08x subc %d class 0x%04x "
"mthd 0x%04x data 0x%08x\n",
inst, subc, class, mthd, data);
}
}
/*
* G70 0x47
* G71 0x49
* NV45 0x48
* G72[M] 0x46
* G73 0x4b
* C51_G7X 0x4c
* C51 0x4e
*/
int
nv40_graph_init(struct drm_device *dev, int engine)
static int
nv40_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv40_graph_engine *pgraph = nv_engine(dev, engine);
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t vramsz;
int i, j;
struct nv40_graph_priv *priv;
int ret;
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
~NV_PMC_ENABLE_PGRAPH);
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
NV_PMC_ENABLE_PGRAPH);
ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00001000;
nv_subdev(priv)->intr = nv40_graph_intr;
nv_engine(priv)->cclass = &nv40_graph_cclass;
if (nv44_graph_class(priv))
nv_engine(priv)->sclass = nv44_graph_sclass;
else
nv_engine(priv)->sclass = nv40_graph_sclass;
nv_engine(priv)->tile_prog = nv40_graph_tile_prog;
return 0;
}
static int
nv40_graph_init(struct nouveau_object *object)
{
struct nouveau_engine *engine = nv_engine(object);
struct nouveau_fb *pfb = nouveau_fb(object);
struct nv40_graph_priv *priv = (void *)engine;
int ret, i, j;
u32 vramsz;
ret = nouveau_graph_init(&priv->base);
if (ret)
return ret;
/* generate and upload context program */
nv40_grctx_init(dev, &pgraph->grctx_size);
nv40_grctx_init(nv_device(priv), &priv->size);
/* No context present currently */
nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
nv_wr32(priv, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF);
nv_wr32(priv, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x401287c0);
nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00008000);
nv_wr32(priv, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
nv_wr32(priv, NV10_PGRAPH_STATE , 0xFFFFFFFF);
j = nv_rd32(dev, 0x1540) & 0xff;
j = nv_rd32(priv, 0x1540) & 0xff;
if (j) {
for (i = 0; !(j & 1); j >>= 1, i++)
;
nv_wr32(dev, 0x405000, i);
nv_wr32(priv, 0x405000, i);
}
if (dev_priv->chipset == 0x40) {
nv_wr32(dev, 0x4009b0, 0x83280fff);
nv_wr32(dev, 0x4009b4, 0x000000a0);
if (nv_device(priv)->chipset == 0x40) {
nv_wr32(priv, 0x4009b0, 0x83280fff);
nv_wr32(priv, 0x4009b4, 0x000000a0);
} else {
nv_wr32(dev, 0x400820, 0x83280eff);
nv_wr32(dev, 0x400824, 0x000000a0);
nv_wr32(priv, 0x400820, 0x83280eff);
nv_wr32(priv, 0x400824, 0x000000a0);
}
switch (dev_priv->chipset) {
switch (nv_device(priv)->chipset) {
case 0x40:
case 0x45:
nv_wr32(dev, 0x4009b8, 0x0078e366);
nv_wr32(dev, 0x4009bc, 0x0000014c);
nv_wr32(priv, 0x4009b8, 0x0078e366);
nv_wr32(priv, 0x4009bc, 0x0000014c);
break;
case 0x41:
case 0x42: /* pciid also 0x00Cx */
/* case 0x0120: XXX (pciid) */
nv_wr32(dev, 0x400828, 0x007596ff);
nv_wr32(dev, 0x40082c, 0x00000108);
nv_wr32(priv, 0x400828, 0x007596ff);
nv_wr32(priv, 0x40082c, 0x00000108);
break;
case 0x43:
nv_wr32(dev, 0x400828, 0x0072cb77);
nv_wr32(dev, 0x40082c, 0x00000108);
nv_wr32(priv, 0x400828, 0x0072cb77);
nv_wr32(priv, 0x40082c, 0x00000108);
break;
case 0x44:
case 0x46: /* G72 */
case 0x4a:
case 0x4c: /* G7x-based C51 */
case 0x4e:
nv_wr32(dev, 0x400860, 0);
nv_wr32(dev, 0x400864, 0);
nv_wr32(priv, 0x400860, 0);
nv_wr32(priv, 0x400864, 0);
break;
case 0x47: /* G70 */
case 0x49: /* G71 */
case 0x4b: /* G73 */
nv_wr32(dev, 0x400828, 0x07830610);
nv_wr32(dev, 0x40082c, 0x0000016A);
nv_wr32(priv, 0x400828, 0x07830610);
nv_wr32(priv, 0x40082c, 0x0000016A);
break;
default:
break;
}
nv_wr32(dev, 0x400b38, 0x2ffff800);
nv_wr32(dev, 0x400b3c, 0x00006000);
nv_wr32(priv, 0x400b38, 0x2ffff800);
nv_wr32(priv, 0x400b3c, 0x00006000);
/* Tiling related stuff. */
switch (dev_priv->chipset) {
switch (nv_device(priv)->chipset) {
case 0x44:
case 0x4a:
nv_wr32(dev, 0x400bc4, 0x1003d888);
nv_wr32(dev, 0x400bbc, 0xb7a7b500);
nv_wr32(priv, 0x400bc4, 0x1003d888);
nv_wr32(priv, 0x400bbc, 0xb7a7b500);
break;
case 0x46:
nv_wr32(dev, 0x400bc4, 0x0000e024);
nv_wr32(dev, 0x400bbc, 0xb7a7b520);
nv_wr32(priv, 0x400bc4, 0x0000e024);
nv_wr32(priv, 0x400bbc, 0xb7a7b520);
break;
case 0x4c:
case 0x4e:
case 0x67:
nv_wr32(dev, 0x400bc4, 0x1003d888);
nv_wr32(dev, 0x400bbc, 0xb7a7b540);
nv_wr32(priv, 0x400bc4, 0x1003d888);
nv_wr32(priv, 0x400bbc, 0xb7a7b540);
break;
default:
break;
}
/* Turn all the tiling regions off. */
for (i = 0; i < nvfb_tile_nr(dev); i++)
nv40_graph_set_tile_region(dev, i);
for (i = 0; i < pfb->tile.regions; i++)
engine->tile_prog(engine, i);
/* begin RAM config */
vramsz = pci_resource_len(dev->pdev, 0) - 1;
switch (dev_priv->chipset) {
vramsz = pci_resource_len(nv_device(priv)->pdev, 0) - 1;
switch (nv_device(priv)->chipset) {
case 0x40:
nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
nv_wr32(dev, 0x4069A4, nv_rd32(dev, NV04_PFB_CFG0));
nv_wr32(dev, 0x4069A8, nv_rd32(dev, NV04_PFB_CFG1));
nv_wr32(dev, 0x400820, 0);
nv_wr32(dev, 0x400824, 0);
nv_wr32(dev, 0x400864, vramsz);
nv_wr32(dev, 0x400868, vramsz);
nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
nv_wr32(priv, 0x4069A4, nv_rd32(priv, 0x100200));
nv_wr32(priv, 0x4069A8, nv_rd32(priv, 0x100204));
nv_wr32(priv, 0x400820, 0);
nv_wr32(priv, 0x400824, 0);
nv_wr32(priv, 0x400864, vramsz);
nv_wr32(priv, 0x400868, vramsz);
break;
default:
switch (dev_priv->chipset) {
switch (nv_device(priv)->chipset) {
case 0x41:
case 0x42:
case 0x43:
@ -304,163 +455,33 @@ nv40_graph_init(struct drm_device *dev, int engine)
case 0x4e:
case 0x44:
case 0x4a:
nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
nv_wr32(priv, 0x4009F0, nv_rd32(priv, 0x100200));
nv_wr32(priv, 0x4009F4, nv_rd32(priv, 0x100204));
break;
default:
nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
nv_wr32(priv, 0x400DF0, nv_rd32(priv, 0x100200));
nv_wr32(priv, 0x400DF4, nv_rd32(priv, 0x100204));
break;
}
nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
nv_wr32(dev, 0x400840, 0);
nv_wr32(dev, 0x400844, 0);
nv_wr32(dev, 0x4008A0, vramsz);
nv_wr32(dev, 0x4008A4, vramsz);
nv_wr32(priv, 0x4069F0, nv_rd32(priv, 0x100200));
nv_wr32(priv, 0x4069F4, nv_rd32(priv, 0x100204));
nv_wr32(priv, 0x400840, 0);
nv_wr32(priv, 0x400844, 0);
nv_wr32(priv, 0x4008A0, vramsz);
nv_wr32(priv, 0x4008A4, vramsz);
break;
}
return 0;
}
static int
nv40_graph_fini(struct drm_device *dev, int engine, bool suspend)
{
u32 inst = nv_rd32(dev, 0x40032c);
if (inst & 0x01000000) {
nv_wr32(dev, 0x400720, 0x00000000);
nv_wr32(dev, 0x400784, inst);
nv_mask(dev, 0x400310, 0x00000020, 0x00000020);
nv_mask(dev, 0x400304, 0x00000001, 0x00000001);
if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000)) {
u32 insn = nv_rd32(dev, 0x400308);
NV_ERROR(dev, "PGRAPH: ctxprog timeout 0x%08x\n", insn);
}
nv_mask(dev, 0x40032c, 0x01000000, 0x00000000);
}
return 0;
}
static int
nv40_graph_isr_chid(struct drm_device *dev, u32 inst)
{
struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *grctx;
unsigned long flags;
int i;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
for (i = 0; i < pfifo->channels; i++) {
if (!dev_priv->channels.ptr[i])
continue;
grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR];
if (grctx && grctx->addr == inst)
break;
}
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
return i;
}
static void
nv40_graph_isr(struct drm_device *dev)
{
u32 stat;
while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
u32 inst = (nv_rd32(dev, 0x40032c) & 0x000fffff) << 4;
u32 chid = nv40_graph_isr_chid(dev, inst);
u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
u32 subc = (addr & 0x00070000) >> 16;
u32 mthd = (addr & 0x00001ffc);
u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xffff;
u32 show = stat;
if (stat & NV_PGRAPH_INTR_ERROR) {
if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
show &= ~NV_PGRAPH_INTR_ERROR;
} else
if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
nv_mask(dev, 0x402000, 0, 0);
}
}
nv_wr32(dev, NV03_PGRAPH_INTR, stat);
nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
if (show && nouveau_ratelimit()) {
NV_INFO(dev, "PGRAPH -");
nouveau_bitfield_print(nv10_graph_intr, show);
printk(" nsource:");
nouveau_bitfield_print(nv04_graph_nsource, nsource);
printk(" nstatus:");
nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
printk("\n");
NV_INFO(dev, "PGRAPH - ch %d (0x%08x) subc %d "
"class 0x%04x mthd 0x%04x data 0x%08x\n",
chid, inst, subc, class, mthd, data);
}
}
}
static void
nv40_graph_destroy(struct drm_device *dev, int engine)
{
struct nv40_graph_engine *pgraph = nv_engine(dev, engine);
nouveau_irq_unregister(dev, 12);
NVOBJ_ENGINE_DEL(dev, GR);
kfree(pgraph);
}
int
nv40_graph_create(struct drm_device *dev)
{
struct nv40_graph_engine *pgraph;
pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
if (!pgraph)
return -ENOMEM;
pgraph->base.destroy = nv40_graph_destroy;
pgraph->base.init = nv40_graph_init;
pgraph->base.fini = nv40_graph_fini;
pgraph->base.context_new = nv40_graph_context_new;
pgraph->base.context_del = nv40_graph_context_del;
pgraph->base.object_new = nv40_graph_object_new;
pgraph->base.set_tile_region = nv40_graph_set_tile_region;
NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
nouveau_irq_register(dev, 12, nv40_graph_isr);
NVOBJ_CLASS(dev, 0x0030, GR); /* null */
NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */
NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */
NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
/* curie */
if (nv44_graph_class(dev))
NVOBJ_CLASS(dev, 0x4497, GR);
else
NVOBJ_CLASS(dev, 0x4097, GR);
return 0;
}
struct nouveau_oclass
nv40_graph_oclass = {
.handle = NV_ENGINE(GR, 0x40),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv40_graph_ctor,
.dtor = _nouveau_graph_dtor,
.init = nv40_graph_init,
.fini = _nouveau_graph_fini,
},
};

View File

@ -0,0 +1,21 @@
#ifndef __NV40_GRAPH_H__
#define __NV40_GRAPH_H__
/* returns 1 if device is one of the nv4x using the 0x4497 object class,
* helpful to determine a number of other hardware features
*/
static inline int
nv44_graph_class(void *priv)
{
struct nouveau_device *device = nv_device(priv);
if ((device->chipset & 0xf0) == 0x60)
return 1;
return !(0x0baf & (1 << (device->chipset & 0x0f)));
}
void nv40_grctx_init(struct nouveau_device *, u32 *size);
void nv40_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,7 @@
#ifndef __NV50_GRAPH_H__
#define __NV50_GRAPH_H__
int nv50_grctx_init(struct nouveau_device *, u32 *size);
void nv50_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -25,6 +25,18 @@
#ifndef __NVC0_GRAPH_H__
#define __NVC0_GRAPH_H__
#include <core/client.h>
#include <core/handle.h>
#include <core/gpuobj.h>
#include <core/option.h>
#include <subdev/fb.h>
#include <subdev/vm.h>
#include <subdev/bar.h>
#include <subdev/timer.h>
#include <engine/graph.h>
#define GPC_MAX 4
#define TPC_MAX 32
@ -53,7 +65,7 @@ struct nvc0_graph_fuc {
};
struct nvc0_graph_priv {
struct nouveau_exec_engine base;
struct nouveau_graph base;
struct nvc0_graph_fuc fuc409c;
struct nvc0_graph_fuc fuc409d;
@ -78,11 +90,10 @@ struct nvc0_graph_priv {
};
struct nvc0_graph_chan {
struct nouveau_gpuobj *grctx;
struct nouveau_vma grctx_vma;
struct nouveau_graph_chan base;
struct nouveau_gpuobj *mmio;
struct nouveau_vma mmio_vma;
struct nouveau_vma mmio_vma;
int mmio_nr;
struct {
struct nouveau_gpuobj *mem;
@ -91,11 +102,11 @@ struct nvc0_graph_chan {
};
static inline u32
nvc0_graph_class(struct drm_device *priv)
nvc0_graph_class(void *obj)
{
struct drm_nouveau_private *dev_priv = priv->dev_private;
struct nouveau_device *device = nv_device(obj);
switch (dev_priv->chipset) {
switch (device->chipset) {
case 0xc0:
case 0xc3:
case 0xc4:
@ -115,17 +126,16 @@ nvc0_graph_class(struct drm_device *priv)
}
}
void nv_icmd(struct drm_device *priv, u32 icmd, u32 data);
void nv_icmd(struct nvc0_graph_priv *priv, u32 icmd, u32 data);
static inline void
nv_mthd(struct drm_device *priv, u32 class, u32 mthd, u32 data)
nv_mthd(struct nvc0_graph_priv *priv, u32 class, u32 mthd, u32 data)
{
nv_wr32(priv, 0x40448c, data);
nv_wr32(priv, 0x404488, 0x80000000 | (mthd << 14) | class);
}
struct nvc0_grctx {
struct drm_device *dev;
struct nvc0_graph_priv *priv;
struct nvc0_graph_data *data;
struct nvc0_graph_mmio *mmio;
@ -135,18 +145,18 @@ struct nvc0_grctx {
u64 addr;
};
int nvc0_grctx_generate(struct drm_device *);
int nvc0_grctx_init(struct drm_device *, struct nvc0_graph_priv *,
struct nvc0_grctx *);
int nvc0_grctx_generate(struct nvc0_graph_priv *);
int nvc0_grctx_init(struct nvc0_graph_priv *, struct nvc0_grctx *);
void nvc0_grctx_data(struct nvc0_grctx *, u32, u32, u32);
void nvc0_grctx_mmio(struct nvc0_grctx *, u32, u32, u32, u32);
int nvc0_grctx_fini(struct nvc0_grctx *);
int nve0_grctx_generate(struct drm_device *);
int nve0_grctx_generate(struct nvc0_graph_priv *);
#define mmio_data(s,a,p) nvc0_grctx_data(&info, (s), (a), (p))
#define mmio_list(r,d,s,b) nvc0_grctx_mmio(&info, (r), (d), (s), (b))
void nvc0_graph_ctxctl_debug(struct nvc0_graph_priv *);
int nvc0_graph_ctor_fw(struct nvc0_graph_priv *, const char *,
struct nvc0_graph_fuc *);
void nvc0_graph_dtor(struct nouveau_object *);
@ -157,9 +167,4 @@ int nvc0_graph_context_ctor(struct nouveau_object *, struct nouveau_object *,
struct nouveau_object **);
void nvc0_graph_context_dtor(struct nouveau_object *);
void nvc0_graph_ctxctl_debug(struct drm_device *);
int nvc0_graph_context_new(struct nouveau_channel *, int);
void nvc0_graph_context_del(struct nouveau_channel *, int);
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright 2010 Red Hat Inc.
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -22,586 +22,213 @@
* Authors: Ben Skeggs
*/
#include <linux/firmware.h>
#include <linux/module.h>
#include "drmP.h"
#include "nouveau_drv.h"
#include <core/mm.h>
#include <engine/fifo.h>
#include "nvc0.h"
static void
nve0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base)
{
NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base,
nv_rd32(dev, base + 0x400));
NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804),
nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c));
NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814),
nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c));
}
/*******************************************************************************
* Graphics object classes
******************************************************************************/
static struct nouveau_oclass
nve0_graph_sclass[] = {
{ 0x902d, &nouveau_object_ofuncs },
{ 0xa040, &nouveau_object_ofuncs },
{ 0xa097, &nouveau_object_ofuncs },
{ 0xa0c0, &nouveau_object_ofuncs },
{ 0xa0b5, &nouveau_object_ofuncs },
{}
};
/*******************************************************************************
* PGRAPH context
******************************************************************************/
static struct nouveau_oclass
nve0_graph_cclass = {
.handle = NV_ENGCTX(GR, 0xe0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_graph_context_ctor,
.dtor = nvc0_graph_context_dtor,
.init = _nouveau_graph_context_init,
.fini = _nouveau_graph_context_fini,
.rd32 = _nouveau_graph_context_rd32,
.wr32 = _nouveau_graph_context_wr32,
},
};
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
static void
nve0_graph_ctxctl_debug(struct drm_device *dev)
nve0_graph_ctxctl_isr(struct nvc0_graph_priv *priv)
{
u32 gpcnr = nv_rd32(dev, 0x409604) & 0xffff;
u32 gpc;
nve0_graph_ctxctl_debug_unit(dev, 0x409000);
for (gpc = 0; gpc < gpcnr; gpc++)
nve0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000));
}
static int
nve0_graph_object_new(struct nouveau_channel *chan, int engine,
u32 handle, u16 class)
{
return 0;
}
static int
nve0_graph_fini(struct drm_device *dev, int engine, bool suspend)
{
return 0;
}
static void
nve0_graph_init_obj418880(struct drm_device *dev)
{
struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
int i;
nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000);
nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
for (i = 0; i < 4; i++)
nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
}
static void
nve0_graph_init_regs(struct drm_device *dev)
{
nv_wr32(dev, 0x400080, 0x003083c2);
nv_wr32(dev, 0x400088, 0x0001ffe7);
nv_wr32(dev, 0x40008c, 0x00000000);
nv_wr32(dev, 0x400090, 0x00000030);
nv_wr32(dev, 0x40013c, 0x003901f7);
nv_wr32(dev, 0x400140, 0x00000100);
nv_wr32(dev, 0x400144, 0x00000000);
nv_wr32(dev, 0x400148, 0x00000110);
nv_wr32(dev, 0x400138, 0x00000000);
nv_wr32(dev, 0x400130, 0x00000000);
nv_wr32(dev, 0x400134, 0x00000000);
nv_wr32(dev, 0x400124, 0x00000002);
}
static void
nve0_graph_init_units(struct drm_device *dev)
{
nv_wr32(dev, 0x409ffc, 0x00000000);
nv_wr32(dev, 0x409c14, 0x00003e3e);
nv_wr32(dev, 0x409c24, 0x000f0000);
nv_wr32(dev, 0x404000, 0xc0000000);
nv_wr32(dev, 0x404600, 0xc0000000);
nv_wr32(dev, 0x408030, 0xc0000000);
nv_wr32(dev, 0x404490, 0xc0000000);
nv_wr32(dev, 0x406018, 0xc0000000);
nv_wr32(dev, 0x407020, 0xc0000000);
nv_wr32(dev, 0x405840, 0xc0000000);
nv_wr32(dev, 0x405844, 0x00ffffff);
nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008);
nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000);
}
static void
nve0_graph_init_gpc_0(struct drm_device *dev)
{
struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
u32 data[TPC_MAX / 8];
u8 tpcnr[GPC_MAX];
int i, gpc, tpc;
nv_wr32(dev, GPC_UNIT(0, 0x3018), 0x00000001);
memset(data, 0x00, sizeof(data));
memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
do {
gpc = (gpc + 1) % priv->gpc_nr;
} while (!tpcnr[gpc]);
tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
data[i / 8] |= tpc << ((i % 8) * 4);
}
nv_wr32(dev, GPC_BCAST(0x0980), data[0]);
nv_wr32(dev, GPC_BCAST(0x0984), data[1]);
nv_wr32(dev, GPC_BCAST(0x0988), data[2]);
nv_wr32(dev, GPC_BCAST(0x098c), data[3]);
for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
priv->tpc_nr[gpc]);
nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total);
nv_wr32(dev, GPC_UNIT(gpc, 0x0918), magicgpc918);
}
nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918);
nv_wr32(dev, GPC_BCAST(0x08ac), nv_rd32(dev, 0x100800));
}
static void
nve0_graph_init_gpc_1(struct drm_device *dev)
{
struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
int gpc, tpc;
for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
nv_wr32(dev, GPC_UNIT(gpc, 0x3038), 0xc0000000);
nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000);
nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000);
nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000);
nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000);
for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
}
nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
}
}
static void
nve0_graph_init_rop(struct drm_device *dev)
{
struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
int rop;
for (rop = 0; rop < priv->rop_nr; rop++) {
nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff);
nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff);
}
}
static void
nve0_graph_init_fuc(struct drm_device *dev, u32 fuc_base,
struct nvc0_graph_fuc *code, struct nvc0_graph_fuc *data)
{
int i;
nv_wr32(dev, fuc_base + 0x01c0, 0x01000000);
for (i = 0; i < data->size / 4; i++)
nv_wr32(dev, fuc_base + 0x01c4, data->data[i]);
nv_wr32(dev, fuc_base + 0x0180, 0x01000000);
for (i = 0; i < code->size / 4; i++) {
if ((i & 0x3f) == 0)
nv_wr32(dev, fuc_base + 0x0188, i >> 6);
nv_wr32(dev, fuc_base + 0x0184, code->data[i]);
}
}
static int
nve0_graph_init_ctxctl(struct drm_device *dev)
{
struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
u32 r000260;
/* load fuc microcode */
r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
nve0_graph_init_fuc(dev, 0x409000, &priv->fuc409c, &priv->fuc409d);
nve0_graph_init_fuc(dev, 0x41a000, &priv->fuc41ac, &priv->fuc41ad);
nv_wr32(dev, 0x000260, r000260);
/* start both of them running */
nv_wr32(dev, 0x409840, 0xffffffff);
nv_wr32(dev, 0x41a10c, 0x00000000);
nv_wr32(dev, 0x40910c, 0x00000000);
nv_wr32(dev, 0x41a100, 0x00000002);
nv_wr32(dev, 0x409100, 0x00000002);
if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001))
NV_INFO(dev, "0x409800 wait failed\n");
nv_wr32(dev, 0x409840, 0xffffffff);
nv_wr32(dev, 0x409500, 0x7fffffff);
nv_wr32(dev, 0x409504, 0x00000021);
nv_wr32(dev, 0x409840, 0xffffffff);
nv_wr32(dev, 0x409500, 0x00000000);
nv_wr32(dev, 0x409504, 0x00000010);
if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
NV_ERROR(dev, "fuc09 req 0x10 timeout\n");
return -EBUSY;
}
priv->size = nv_rd32(dev, 0x409800);
nv_wr32(dev, 0x409840, 0xffffffff);
nv_wr32(dev, 0x409500, 0x00000000);
nv_wr32(dev, 0x409504, 0x00000016);
if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
NV_ERROR(dev, "fuc09 req 0x16 timeout\n");
return -EBUSY;
}
nv_wr32(dev, 0x409840, 0xffffffff);
nv_wr32(dev, 0x409500, 0x00000000);
nv_wr32(dev, 0x409504, 0x00000025);
if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
NV_ERROR(dev, "fuc09 req 0x25 timeout\n");
return -EBUSY;
}
nv_wr32(dev, 0x409800, 0x00000000);
nv_wr32(dev, 0x409500, 0x00000001);
nv_wr32(dev, 0x409504, 0x00000030);
if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
NV_ERROR(dev, "fuc09 req 0x30 timeout\n");
return -EBUSY;
}
nv_wr32(dev, 0x409810, 0xb00095c8);
nv_wr32(dev, 0x409800, 0x00000000);
nv_wr32(dev, 0x409500, 0x00000001);
nv_wr32(dev, 0x409504, 0x00000031);
if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
NV_ERROR(dev, "fuc09 req 0x31 timeout\n");
return -EBUSY;
}
nv_wr32(dev, 0x409810, 0x00080420);
nv_wr32(dev, 0x409800, 0x00000000);
nv_wr32(dev, 0x409500, 0x00000001);
nv_wr32(dev, 0x409504, 0x00000032);
if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
NV_ERROR(dev, "fuc09 req 0x32 timeout\n");
return -EBUSY;
}
nv_wr32(dev, 0x409614, 0x00000070);
nv_wr32(dev, 0x409614, 0x00000770);
nv_wr32(dev, 0x40802c, 0x00000001);
if (priv->data == NULL) {
int ret = nve0_grctx_generate(dev);
if (ret) {
NV_ERROR(dev, "PGRAPH: failed to construct context\n");
return ret;
}
return 1;
}
return 0;
}
static int
nve0_graph_init(struct drm_device *dev, int engine)
{
int ret;
reset:
nv_mask(dev, 0x000200, 0x18001000, 0x00000000);
nv_mask(dev, 0x000200, 0x18001000, 0x18001000);
nve0_graph_init_obj418880(dev);
nve0_graph_init_regs(dev);
nve0_graph_init_gpc_0(dev);
nv_wr32(dev, 0x400500, 0x00010001);
nv_wr32(dev, 0x400100, 0xffffffff);
nv_wr32(dev, 0x40013c, 0xffffffff);
nve0_graph_init_units(dev);
nve0_graph_init_gpc_1(dev);
nve0_graph_init_rop(dev);
nv_wr32(dev, 0x400108, 0xffffffff);
nv_wr32(dev, 0x400138, 0xffffffff);
nv_wr32(dev, 0x400118, 0xffffffff);
nv_wr32(dev, 0x400130, 0xffffffff);
nv_wr32(dev, 0x40011c, 0xffffffff);
nv_wr32(dev, 0x400134, 0xffffffff);
nv_wr32(dev, 0x400054, 0x34ce3464);
ret = nve0_graph_init_ctxctl(dev);
if (ret) {
if (ret == 1)
goto reset;
return ret;
}
return 0;
}
int
nve0_graph_isr_chid(struct drm_device *dev, u64 inst)
{
struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan;
unsigned long flags;
int i;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
for (i = 0; i < pfifo->channels; i++) {
chan = dev_priv->channels.ptr[i];
if (!chan || !chan->ramin)
continue;
if (inst == chan->ramin->addr)
break;
}
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
return i;
}
static void
nve0_graph_ctxctl_isr(struct drm_device *dev)
{
u32 ustat = nv_rd32(dev, 0x409c18);
u32 ustat = nv_rd32(priv, 0x409c18);
if (ustat & 0x00000001)
NV_INFO(dev, "PGRAPH: CTXCTRL ucode error\n");
nv_error(priv, "CTXCTRL ucode error\n");
if (ustat & 0x00080000)
NV_INFO(dev, "PGRAPH: CTXCTRL watchdog timeout\n");
nv_error(priv, "CTXCTRL watchdog timeout\n");
if (ustat & ~0x00080001)
NV_INFO(dev, "PGRAPH: CTXCTRL 0x%08x\n", ustat);
nv_error(priv, "CTXCTRL 0x%08x\n", ustat);
nve0_graph_ctxctl_debug(dev);
nv_wr32(dev, 0x409c20, ustat);
nvc0_graph_ctxctl_debug(priv);
nv_wr32(priv, 0x409c20, ustat);
}
static void
nve0_graph_trap_isr(struct drm_device *dev, int chid)
nve0_graph_trap_isr(struct nvc0_graph_priv *priv, u64 inst)
{
struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
u32 trap = nv_rd32(dev, 0x400108);
u32 trap = nv_rd32(priv, 0x400108);
int rop;
if (trap & 0x00000001) {
u32 stat = nv_rd32(dev, 0x404000);
NV_INFO(dev, "PGRAPH: DISPATCH ch %d 0x%08x\n", chid, stat);
nv_wr32(dev, 0x404000, 0xc0000000);
nv_wr32(dev, 0x400108, 0x00000001);
u32 stat = nv_rd32(priv, 0x404000);
nv_error(priv, "DISPATCH ch 0x%010llx 0x%08x\n", inst, stat);
nv_wr32(priv, 0x404000, 0xc0000000);
nv_wr32(priv, 0x400108, 0x00000001);
trap &= ~0x00000001;
}
if (trap & 0x00000010) {
u32 stat = nv_rd32(dev, 0x405840);
NV_INFO(dev, "PGRAPH: SHADER ch %d 0x%08x\n", chid, stat);
nv_wr32(dev, 0x405840, 0xc0000000);
nv_wr32(dev, 0x400108, 0x00000010);
u32 stat = nv_rd32(priv, 0x405840);
nv_error(priv, "SHADER ch 0x%010llx 0x%08x\n", inst, stat);
nv_wr32(priv, 0x405840, 0xc0000000);
nv_wr32(priv, 0x400108, 0x00000010);
trap &= ~0x00000010;
}
if (trap & 0x02000000) {
for (rop = 0; rop < priv->rop_nr; rop++) {
u32 statz = nv_rd32(dev, ROP_UNIT(rop, 0x070));
u32 statc = nv_rd32(dev, ROP_UNIT(rop, 0x144));
NV_INFO(dev, "PGRAPH: ROP%d ch %d 0x%08x 0x%08x\n",
rop, chid, statz, statc);
nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070));
u32 statc = nv_rd32(priv, ROP_UNIT(rop, 0x144));
nv_error(priv, "ROP%d ch 0x%010llx 0x%08x 0x%08x\n",
rop, inst, statz, statc);
nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
}
nv_wr32(dev, 0x400108, 0x02000000);
nv_wr32(priv, 0x400108, 0x02000000);
trap &= ~0x02000000;
}
if (trap) {
NV_INFO(dev, "PGRAPH: TRAP ch %d 0x%08x\n", chid, trap);
nv_wr32(dev, 0x400108, trap);
nv_error(priv, "TRAP ch 0x%010llx 0x%08x\n", inst, trap);
nv_wr32(priv, 0x400108, trap);
}
}
static void
nve0_graph_isr(struct drm_device *dev)
nve0_graph_intr(struct nouveau_subdev *subdev)
{
u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
u32 chid = nve0_graph_isr_chid(dev, inst);
u32 stat = nv_rd32(dev, 0x400100);
u32 addr = nv_rd32(dev, 0x400704);
struct nvc0_graph_priv *priv = (void *)subdev;
struct nouveau_engine *engine = nv_engine(subdev);
struct nouveau_handle *handle = NULL;
u64 inst = (u64)(nv_rd32(priv, 0x409b00) & 0x0fffffff) << 12;
u32 stat = nv_rd32(priv, 0x400100);
u32 addr = nv_rd32(priv, 0x400704);
u32 mthd = (addr & 0x00003ffc);
u32 subc = (addr & 0x00070000) >> 16;
u32 data = nv_rd32(dev, 0x400708);
u32 code = nv_rd32(dev, 0x400110);
u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
u32 data = nv_rd32(priv, 0x400708);
u32 code = nv_rd32(priv, 0x400110);
u32 class = nv_rd32(priv, 0x404200 + (subc * 4));
if (stat & 0x00000010) {
if (nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) {
NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] "
handle = nouveau_engctx_lookup_class(engine, inst, class);
if (!handle || nv_call(handle->object, mthd, data)) {
nv_error(priv, "ILLEGAL_MTHD ch 0x%010llx "
"subc %d class 0x%04x mthd 0x%04x "
"data 0x%08x\n",
chid, inst, subc, class, mthd, data);
inst, subc, class, mthd, data);
}
nv_wr32(dev, 0x400100, 0x00000010);
nouveau_engctx_handle_put(handle);
nv_wr32(priv, 0x400100, 0x00000010);
stat &= ~0x00000010;
}
if (stat & 0x00000020) {
NV_INFO(dev, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
nv_error(priv, "ILLEGAL_CLASS ch 0x%010llx subc %d "
"class 0x%04x mthd 0x%04x data 0x%08x\n",
chid, inst, subc, class, mthd, data);
nv_wr32(dev, 0x400100, 0x00000020);
inst, subc, class, mthd, data);
nv_wr32(priv, 0x400100, 0x00000020);
stat &= ~0x00000020;
}
if (stat & 0x00100000) {
NV_INFO(dev, "PGRAPH: DATA_ERROR [");
nv_error(priv, "DATA_ERROR [");
nouveau_enum_print(nv50_data_error_names, code);
printk("] ch %d [0x%010llx] subc %d class 0x%04x "
printk("] ch 0x%010llx subc %d class 0x%04x "
"mthd 0x%04x data 0x%08x\n",
chid, inst, subc, class, mthd, data);
nv_wr32(dev, 0x400100, 0x00100000);
inst, subc, class, mthd, data);
nv_wr32(priv, 0x400100, 0x00100000);
stat &= ~0x00100000;
}
if (stat & 0x00200000) {
nve0_graph_trap_isr(dev, chid);
nv_wr32(dev, 0x400100, 0x00200000);
nve0_graph_trap_isr(priv, inst);
nv_wr32(priv, 0x400100, 0x00200000);
stat &= ~0x00200000;
}
if (stat & 0x00080000) {
nve0_graph_ctxctl_isr(dev);
nv_wr32(dev, 0x400100, 0x00080000);
nve0_graph_ctxctl_isr(priv);
nv_wr32(priv, 0x400100, 0x00080000);
stat &= ~0x00080000;
}
if (stat) {
NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat);
nv_wr32(dev, 0x400100, stat);
nv_error(priv, "unknown stat 0x%08x\n", stat);
nv_wr32(priv, 0x400100, stat);
}
nv_wr32(dev, 0x400500, 0x00010001);
nv_wr32(priv, 0x400500, 0x00010001);
}
static int
nve0_graph_create_fw(struct drm_device *dev, const char *fwname,
struct nvc0_graph_fuc *fuc)
nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
const struct firmware *fw;
char f[32];
int ret;
struct nvc0_graph_priv *priv;
int ret, i;
snprintf(f, sizeof(f), "nouveau/nv%02x_%s", dev_priv->chipset, fwname);
ret = request_firmware(&fw, f, &dev->pdev->dev);
ret = nouveau_graph_create(parent, engine, oclass, false, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
fuc->size = fw->size;
fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
release_firmware(fw);
return (fuc->data != NULL) ? 0 : -ENOMEM;
}
nv_subdev(priv)->unit = 0x18001000;
nv_subdev(priv)->intr = nve0_graph_intr;
nv_engine(priv)->cclass = &nve0_graph_cclass;
nv_engine(priv)->sclass = nve0_graph_sclass;
static void
nve0_graph_destroy_fw(struct nvc0_graph_fuc *fuc)
{
if (fuc->data) {
kfree(fuc->data);
fuc->data = NULL;
}
}
static void
nve0_graph_destroy(struct drm_device *dev, int engine)
{
struct nvc0_graph_priv *priv = nv_engine(dev, engine);
nve0_graph_destroy_fw(&priv->fuc409c);
nve0_graph_destroy_fw(&priv->fuc409d);
nve0_graph_destroy_fw(&priv->fuc41ac);
nve0_graph_destroy_fw(&priv->fuc41ad);
nouveau_irq_unregister(dev, 12);
nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
if (priv->data)
kfree(priv->data);
NVOBJ_ENGINE_DEL(dev, GR);
kfree(priv);
}
int
nve0_graph_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvc0_graph_priv *priv;
int ret, gpc, i;
u32 kepler;
kepler = nvc0_graph_class(dev);
if (!kepler) {
NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
return 0;
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->base.destroy = nve0_graph_destroy;
priv->base.init = nve0_graph_init;
priv->base.fini = nve0_graph_fini;
priv->base.context_new = nvc0_graph_context_new;
priv->base.context_del = nvc0_graph_context_del;
priv->base.object_new = nve0_graph_object_new;
NVOBJ_ENGINE_ADD(dev, GR, &priv->base);
nouveau_irq_register(dev, 12, nve0_graph_isr);
NV_INFO(dev, "PGRAPH: using external firmware\n");
if (nve0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
nve0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
nve0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
nve0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
ret = 0;
goto error;
}
nv_info(priv, "using external firmware\n");
if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
nvc0_graph_ctor_fw(priv, "fuc409d", &priv->fuc409d) ||
nvc0_graph_ctor_fw(priv, "fuc41ac", &priv->fuc41ac) ||
nvc0_graph_ctor_fw(priv, "fuc41ad", &priv->fuc41ad))
return -EINVAL;
priv->firmware = true;
ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b4);
if (ret)
goto error;
return ret;
ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b8);
if (ret)
goto error;
return ret;
for (i = 0; i < 0x1000; i += 4) {
nv_wo32(priv->unk4188b4, i, 0x00000010);
nv_wo32(priv->unk4188b8, i, 0x00000010);
}
priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f;
priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
priv->tpc_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
priv->tpc_total += priv->tpc_nr[gpc];
priv->gpc_nr = nv_rd32(priv, 0x409604) & 0x0000001f;
priv->rop_nr = (nv_rd32(priv, 0x409604) & 0x001f0000) >> 16;
for (i = 0; i < priv->gpc_nr; i++) {
priv->tpc_nr[i] = nv_rd32(priv, GPC_UNIT(i, 0x2608));
priv->tpc_total += priv->tpc_nr[i];
}
switch (dev_priv->chipset) {
switch (nv_device(priv)->chipset) {
case 0xe4:
if (priv->tpc_total == 8)
priv->magic_not_rop_nr = 3;
@ -616,21 +243,275 @@ nve0_graph_create(struct drm_device *dev)
break;
}
if (!priv->magic_not_rop_nr) {
NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
priv->tpc_nr[0], priv->tpc_nr[1], priv->tpc_nr[2],
priv->tpc_nr[3], priv->rop_nr);
priv->magic_not_rop_nr = 0x00;
return 0;
}
static void
nve0_graph_init_obj418880(struct nvc0_graph_priv *priv)
{
int i;
nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000);
nv_wr32(priv, GPC_BCAST(0x08a4), 0x00000000);
for (i = 0; i < 4; i++)
nv_wr32(priv, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
}
static void
nve0_graph_init_regs(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x400080, 0x003083c2);
nv_wr32(priv, 0x400088, 0x0001ffe7);
nv_wr32(priv, 0x40008c, 0x00000000);
nv_wr32(priv, 0x400090, 0x00000030);
nv_wr32(priv, 0x40013c, 0x003901f7);
nv_wr32(priv, 0x400140, 0x00000100);
nv_wr32(priv, 0x400144, 0x00000000);
nv_wr32(priv, 0x400148, 0x00000110);
nv_wr32(priv, 0x400138, 0x00000000);
nv_wr32(priv, 0x400130, 0x00000000);
nv_wr32(priv, 0x400134, 0x00000000);
nv_wr32(priv, 0x400124, 0x00000002);
}
static void
nve0_graph_init_units(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x409ffc, 0x00000000);
nv_wr32(priv, 0x409c14, 0x00003e3e);
nv_wr32(priv, 0x409c24, 0x000f0000);
nv_wr32(priv, 0x404000, 0xc0000000);
nv_wr32(priv, 0x404600, 0xc0000000);
nv_wr32(priv, 0x408030, 0xc0000000);
nv_wr32(priv, 0x404490, 0xc0000000);
nv_wr32(priv, 0x406018, 0xc0000000);
nv_wr32(priv, 0x407020, 0xc0000000);
nv_wr32(priv, 0x405840, 0xc0000000);
nv_wr32(priv, 0x405844, 0x00ffffff);
nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
nv_mask(priv, 0x419eb4, 0x00001000, 0x00001000);
}
static void
nve0_graph_init_gpc_0(struct nvc0_graph_priv *priv)
{
const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
u32 data[TPC_MAX / 8];
u8 tpcnr[GPC_MAX];
int i, gpc, tpc;
nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001);
memset(data, 0x00, sizeof(data));
memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
do {
gpc = (gpc + 1) % priv->gpc_nr;
} while (!tpcnr[gpc]);
tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
data[i / 8] |= tpc << ((i % 8) * 4);
}
NVOBJ_CLASS(dev, 0xa097, GR); /* subc 0: 3D */
NVOBJ_CLASS(dev, 0xa0c0, GR); /* subc 1: COMPUTE */
NVOBJ_CLASS(dev, 0xa040, GR); /* subc 2: P2MF */
NVOBJ_CLASS(dev, 0x902d, GR); /* subc 3: 2D */
NVOBJ_CLASS(dev, 0xa0b5, GR); /* subc 4: COPY */
return 0;
nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
error:
nve0_graph_destroy(dev, NVOBJ_ENGINE_GR);
return ret;
for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
nv_wr32(priv, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
priv->tpc_nr[gpc]);
nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total);
nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
}
nv_wr32(priv, GPC_BCAST(0x1bd4), magicgpc918);
nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
}
static void
nve0_graph_init_gpc_1(struct nvc0_graph_priv *priv)
{
int gpc, tpc;
for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
nv_wr32(priv, GPC_UNIT(gpc, 0x3038), 0xc0000000);
nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
}
nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
}
}
static void
nve0_graph_init_rop(struct nvc0_graph_priv *priv)
{
int rop;
for (rop = 0; rop < priv->rop_nr; rop++) {
nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
}
}
static int
nve0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
{
u32 r000260;
/* load fuc microcode */
r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
nvc0_graph_init_fw(priv, 0x409000, &priv->fuc409c, &priv->fuc409d);
nvc0_graph_init_fw(priv, 0x41a000, &priv->fuc41ac, &priv->fuc41ad);
nv_wr32(priv, 0x000260, r000260);
/* start both of them running */
nv_wr32(priv, 0x409840, 0xffffffff);
nv_wr32(priv, 0x41a10c, 0x00000000);
nv_wr32(priv, 0x40910c, 0x00000000);
nv_wr32(priv, 0x41a100, 0x00000002);
nv_wr32(priv, 0x409100, 0x00000002);
if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001))
nv_error(priv, "0x409800 wait failed\n");
nv_wr32(priv, 0x409840, 0xffffffff);
nv_wr32(priv, 0x409500, 0x7fffffff);
nv_wr32(priv, 0x409504, 0x00000021);
nv_wr32(priv, 0x409840, 0xffffffff);
nv_wr32(priv, 0x409500, 0x00000000);
nv_wr32(priv, 0x409504, 0x00000010);
if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
nv_error(priv, "fuc09 req 0x10 timeout\n");
return -EBUSY;
}
priv->size = nv_rd32(priv, 0x409800);
nv_wr32(priv, 0x409840, 0xffffffff);
nv_wr32(priv, 0x409500, 0x00000000);
nv_wr32(priv, 0x409504, 0x00000016);
if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
nv_error(priv, "fuc09 req 0x16 timeout\n");
return -EBUSY;
}
nv_wr32(priv, 0x409840, 0xffffffff);
nv_wr32(priv, 0x409500, 0x00000000);
nv_wr32(priv, 0x409504, 0x00000025);
if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
nv_error(priv, "fuc09 req 0x25 timeout\n");
return -EBUSY;
}
nv_wr32(priv, 0x409800, 0x00000000);
nv_wr32(priv, 0x409500, 0x00000001);
nv_wr32(priv, 0x409504, 0x00000030);
if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
nv_error(priv, "fuc09 req 0x30 timeout\n");
return -EBUSY;
}
nv_wr32(priv, 0x409810, 0xb00095c8);
nv_wr32(priv, 0x409800, 0x00000000);
nv_wr32(priv, 0x409500, 0x00000001);
nv_wr32(priv, 0x409504, 0x00000031);
if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
nv_error(priv, "fuc09 req 0x31 timeout\n");
return -EBUSY;
}
nv_wr32(priv, 0x409810, 0x00080420);
nv_wr32(priv, 0x409800, 0x00000000);
nv_wr32(priv, 0x409500, 0x00000001);
nv_wr32(priv, 0x409504, 0x00000032);
if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
nv_error(priv, "fuc09 req 0x32 timeout\n");
return -EBUSY;
}
nv_wr32(priv, 0x409614, 0x00000070);
nv_wr32(priv, 0x409614, 0x00000770);
nv_wr32(priv, 0x40802c, 0x00000001);
if (priv->data == NULL) {
int ret = nve0_grctx_generate(priv);
if (ret) {
nv_error(priv, "failed to construct context\n");
return ret;
}
return 1;
}
return 0;
}
static int
nve0_graph_init(struct nouveau_object *object)
{
struct nvc0_graph_priv *priv = (void *)object;
int ret;
reset:
ret = nouveau_graph_init(&priv->base);
if (ret)
return ret;
nve0_graph_init_obj418880(priv);
nve0_graph_init_regs(priv);
nve0_graph_init_gpc_0(priv);
nv_wr32(priv, 0x400500, 0x00010001);
nv_wr32(priv, 0x400100, 0xffffffff);
nv_wr32(priv, 0x40013c, 0xffffffff);
nve0_graph_init_units(priv);
nve0_graph_init_gpc_1(priv);
nve0_graph_init_rop(priv);
nv_wr32(priv, 0x400108, 0xffffffff);
nv_wr32(priv, 0x400138, 0xffffffff);
nv_wr32(priv, 0x400118, 0xffffffff);
nv_wr32(priv, 0x400130, 0xffffffff);
nv_wr32(priv, 0x40011c, 0xffffffff);
nv_wr32(priv, 0x400134, 0xffffffff);
nv_wr32(priv, 0x400054, 0x34ce3464);
ret = nve0_graph_init_ctxctl(priv);
if (ret) {
if (ret == 1)
goto reset;
return ret;
}
return 0;
}
struct nouveau_oclass
nve0_graph_oclass = {
.handle = NV_ENGINE(GR, 0xe0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nve0_graph_ctor,
.dtor = nvc0_graph_dtor,
.init = nve0_graph_init,
.fini = _nouveau_graph_fini,
},
};

View File

@ -0,0 +1,269 @@
#ifndef __NOUVEAU_GRAPH_REGS_H__
#define __NOUVEAU_GRAPH_REGS_H__
#define NV04_PGRAPH_DEBUG_0 0x00400080
#define NV04_PGRAPH_DEBUG_1 0x00400084
#define NV04_PGRAPH_DEBUG_2 0x00400088
#define NV04_PGRAPH_DEBUG_3 0x0040008c
#define NV10_PGRAPH_DEBUG_4 0x00400090
#define NV03_PGRAPH_INTR 0x00400100
#define NV03_PGRAPH_NSTATUS 0x00400104
# define NV04_PGRAPH_NSTATUS_STATE_IN_USE (1<<11)
# define NV04_PGRAPH_NSTATUS_INVALID_STATE (1<<12)
# define NV04_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<13)
# define NV04_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<14)
# define NV10_PGRAPH_NSTATUS_STATE_IN_USE (1<<23)
# define NV10_PGRAPH_NSTATUS_INVALID_STATE (1<<24)
# define NV10_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<25)
# define NV10_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<26)
#define NV03_PGRAPH_NSOURCE 0x00400108
# define NV03_PGRAPH_NSOURCE_NOTIFICATION (1<<0)
# define NV03_PGRAPH_NSOURCE_DATA_ERROR (1<<1)
# define NV03_PGRAPH_NSOURCE_PROTECTION_ERROR (1<<2)
# define NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION (1<<3)
# define NV03_PGRAPH_NSOURCE_LIMIT_COLOR (1<<4)
# define NV03_PGRAPH_NSOURCE_LIMIT_ZETA (1<<5)
# define NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD (1<<6)
# define NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION (1<<7)
# define NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION (1<<8)
# define NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION (1<<9)
# define NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION (1<<10)
# define NV03_PGRAPH_NSOURCE_STATE_INVALID (1<<11)
# define NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY (1<<12)
# define NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE (1<<13)
# define NV03_PGRAPH_NSOURCE_METHOD_CNT (1<<14)
# define NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION (1<<15)
# define NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION (1<<16)
# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_A (1<<17)
# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_B (1<<18)
#define NV03_PGRAPH_INTR_EN 0x00400140
#define NV40_PGRAPH_INTR_EN 0x0040013C
# define NV_PGRAPH_INTR_NOTIFY (1<<0)
# define NV_PGRAPH_INTR_MISSING_HW (1<<4)
# define NV_PGRAPH_INTR_CONTEXT_SWITCH (1<<12)
# define NV_PGRAPH_INTR_BUFFER_NOTIFY (1<<16)
# define NV_PGRAPH_INTR_ERROR (1<<20)
#define NV10_PGRAPH_CTX_CONTROL 0x00400144
#define NV10_PGRAPH_CTX_USER 0x00400148
#define NV10_PGRAPH_CTX_SWITCH(i) (0x0040014C + 0x4*(i))
#define NV04_PGRAPH_CTX_SWITCH1 0x00400160
#define NV10_PGRAPH_CTX_CACHE(i, j) (0x00400160 \
+ 0x4*(i) + 0x20*(j))
#define NV04_PGRAPH_CTX_SWITCH2 0x00400164
#define NV04_PGRAPH_CTX_SWITCH3 0x00400168
#define NV04_PGRAPH_CTX_SWITCH4 0x0040016C
#define NV04_PGRAPH_CTX_CONTROL 0x00400170
#define NV04_PGRAPH_CTX_USER 0x00400174
#define NV04_PGRAPH_CTX_CACHE1 0x00400180
#define NV03_PGRAPH_CTX_CONTROL 0x00400190
#define NV03_PGRAPH_CTX_USER 0x00400194
#define NV04_PGRAPH_CTX_CACHE2 0x004001A0
#define NV04_PGRAPH_CTX_CACHE3 0x004001C0
#define NV04_PGRAPH_CTX_CACHE4 0x004001E0
#define NV40_PGRAPH_CTXCTL_0304 0x00400304
#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX 0x00000001
#define NV40_PGRAPH_CTXCTL_UCODE_STAT 0x00400308
#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_MASK 0xff000000
#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT 24
#define NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK 0x00ffffff
#define NV40_PGRAPH_CTXCTL_0310 0x00400310
#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE 0x00000020
#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD 0x00000040
#define NV40_PGRAPH_CTXCTL_030C 0x0040030c
#define NV40_PGRAPH_CTXCTL_UCODE_INDEX 0x00400324
#define NV40_PGRAPH_CTXCTL_UCODE_DATA 0x00400328
#define NV40_PGRAPH_CTXCTL_CUR 0x0040032c
#define NV40_PGRAPH_CTXCTL_CUR_LOADED 0x01000000
#define NV40_PGRAPH_CTXCTL_CUR_INSTANCE 0x000FFFFF
#define NV40_PGRAPH_CTXCTL_NEXT 0x00400330
#define NV40_PGRAPH_CTXCTL_NEXT_INSTANCE 0x000fffff
#define NV50_PGRAPH_CTXCTL_CUR 0x0040032c
#define NV50_PGRAPH_CTXCTL_CUR_LOADED 0x80000000
#define NV50_PGRAPH_CTXCTL_CUR_INSTANCE 0x00ffffff
#define NV50_PGRAPH_CTXCTL_NEXT 0x00400330
#define NV50_PGRAPH_CTXCTL_NEXT_INSTANCE 0x00ffffff
#define NV03_PGRAPH_ABS_X_RAM 0x00400400
#define NV03_PGRAPH_ABS_Y_RAM 0x00400480
#define NV03_PGRAPH_X_MISC 0x00400500
#define NV03_PGRAPH_Y_MISC 0x00400504
#define NV04_PGRAPH_VALID1 0x00400508
#define NV04_PGRAPH_SOURCE_COLOR 0x0040050C
#define NV04_PGRAPH_MISC24_0 0x00400510
#define NV03_PGRAPH_XY_LOGIC_MISC0 0x00400514
#define NV03_PGRAPH_XY_LOGIC_MISC1 0x00400518
#define NV03_PGRAPH_XY_LOGIC_MISC2 0x0040051C
#define NV03_PGRAPH_XY_LOGIC_MISC3 0x00400520
#define NV03_PGRAPH_CLIPX_0 0x00400524
#define NV03_PGRAPH_CLIPX_1 0x00400528
#define NV03_PGRAPH_CLIPY_0 0x0040052C
#define NV03_PGRAPH_CLIPY_1 0x00400530
#define NV03_PGRAPH_ABS_ICLIP_XMAX 0x00400534
#define NV03_PGRAPH_ABS_ICLIP_YMAX 0x00400538
#define NV03_PGRAPH_ABS_UCLIP_XMIN 0x0040053C
#define NV03_PGRAPH_ABS_UCLIP_YMIN 0x00400540
#define NV03_PGRAPH_ABS_UCLIP_XMAX 0x00400544
#define NV03_PGRAPH_ABS_UCLIP_YMAX 0x00400548
#define NV03_PGRAPH_ABS_UCLIPA_XMIN 0x00400560
#define NV03_PGRAPH_ABS_UCLIPA_YMIN 0x00400564
#define NV03_PGRAPH_ABS_UCLIPA_XMAX 0x00400568
#define NV03_PGRAPH_ABS_UCLIPA_YMAX 0x0040056C
#define NV04_PGRAPH_MISC24_1 0x00400570
#define NV04_PGRAPH_MISC24_2 0x00400574
#define NV04_PGRAPH_VALID2 0x00400578
#define NV04_PGRAPH_PASSTHRU_0 0x0040057C
#define NV04_PGRAPH_PASSTHRU_1 0x00400580
#define NV04_PGRAPH_PASSTHRU_2 0x00400584
#define NV10_PGRAPH_DIMX_TEXTURE 0x00400588
#define NV10_PGRAPH_WDIMX_TEXTURE 0x0040058C
#define NV04_PGRAPH_COMBINE_0_ALPHA 0x00400590
#define NV04_PGRAPH_COMBINE_0_COLOR 0x00400594
#define NV04_PGRAPH_COMBINE_1_ALPHA 0x00400598
#define NV04_PGRAPH_COMBINE_1_COLOR 0x0040059C
#define NV04_PGRAPH_FORMAT_0 0x004005A8
#define NV04_PGRAPH_FORMAT_1 0x004005AC
#define NV04_PGRAPH_FILTER_0 0x004005B0
#define NV04_PGRAPH_FILTER_1 0x004005B4
#define NV03_PGRAPH_MONO_COLOR0 0x00400600
#define NV04_PGRAPH_ROP3 0x00400604
#define NV04_PGRAPH_BETA_AND 0x00400608
#define NV04_PGRAPH_BETA_PREMULT 0x0040060C
#define NV04_PGRAPH_LIMIT_VIOL_PIX 0x00400610
#define NV04_PGRAPH_FORMATS 0x00400618
#define NV10_PGRAPH_DEBUG_2 0x00400620
#define NV04_PGRAPH_BOFFSET0 0x00400640
#define NV04_PGRAPH_BOFFSET1 0x00400644
#define NV04_PGRAPH_BOFFSET2 0x00400648
#define NV04_PGRAPH_BOFFSET3 0x0040064C
#define NV04_PGRAPH_BOFFSET4 0x00400650
#define NV04_PGRAPH_BOFFSET5 0x00400654
#define NV04_PGRAPH_BBASE0 0x00400658
#define NV04_PGRAPH_BBASE1 0x0040065C
#define NV04_PGRAPH_BBASE2 0x00400660
#define NV04_PGRAPH_BBASE3 0x00400664
#define NV04_PGRAPH_BBASE4 0x00400668
#define NV04_PGRAPH_BBASE5 0x0040066C
#define NV04_PGRAPH_BPITCH0 0x00400670
#define NV04_PGRAPH_BPITCH1 0x00400674
#define NV04_PGRAPH_BPITCH2 0x00400678
#define NV04_PGRAPH_BPITCH3 0x0040067C
#define NV04_PGRAPH_BPITCH4 0x00400680
#define NV04_PGRAPH_BLIMIT0 0x00400684
#define NV04_PGRAPH_BLIMIT1 0x00400688
#define NV04_PGRAPH_BLIMIT2 0x0040068C
#define NV04_PGRAPH_BLIMIT3 0x00400690
#define NV04_PGRAPH_BLIMIT4 0x00400694
#define NV04_PGRAPH_BLIMIT5 0x00400698
#define NV04_PGRAPH_BSWIZZLE2 0x0040069C
#define NV04_PGRAPH_BSWIZZLE5 0x004006A0
#define NV03_PGRAPH_STATUS 0x004006B0
#define NV04_PGRAPH_STATUS 0x00400700
# define NV40_PGRAPH_STATUS_SYNC_STALL 0x00004000
#define NV04_PGRAPH_TRAPPED_ADDR 0x00400704
#define NV04_PGRAPH_TRAPPED_DATA 0x00400708
#define NV04_PGRAPH_SURFACE 0x0040070C
#define NV10_PGRAPH_TRAPPED_DATA_HIGH 0x0040070C
#define NV04_PGRAPH_STATE 0x00400710
#define NV10_PGRAPH_SURFACE 0x00400710
#define NV04_PGRAPH_NOTIFY 0x00400714
#define NV10_PGRAPH_STATE 0x00400714
#define NV10_PGRAPH_NOTIFY 0x00400718
#define NV04_PGRAPH_FIFO 0x00400720
#define NV04_PGRAPH_BPIXEL 0x00400724
#define NV10_PGRAPH_RDI_INDEX 0x00400750
#define NV04_PGRAPH_FFINTFC_ST2 0x00400754
#define NV10_PGRAPH_RDI_DATA 0x00400754
#define NV04_PGRAPH_DMA_PITCH 0x00400760
#define NV10_PGRAPH_FFINTFC_FIFO_PTR 0x00400760
#define NV04_PGRAPH_DVD_COLORFMT 0x00400764
#define NV10_PGRAPH_FFINTFC_ST2 0x00400764
#define NV04_PGRAPH_SCALED_FORMAT 0x00400768
#define NV10_PGRAPH_FFINTFC_ST2_DL 0x00400768
#define NV10_PGRAPH_FFINTFC_ST2_DH 0x0040076c
#define NV10_PGRAPH_DMA_PITCH 0x00400770
#define NV10_PGRAPH_DVD_COLORFMT 0x00400774
#define NV10_PGRAPH_SCALED_FORMAT 0x00400778
#define NV20_PGRAPH_CHANNEL_CTX_TABLE 0x00400780
#define NV20_PGRAPH_CHANNEL_CTX_POINTER 0x00400784
#define NV20_PGRAPH_CHANNEL_CTX_XFER 0x00400788
#define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD 0x00000001
#define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE 0x00000002
#define NV04_PGRAPH_PATT_COLOR0 0x00400800
#define NV04_PGRAPH_PATT_COLOR1 0x00400804
#define NV04_PGRAPH_PATTERN 0x00400808
#define NV04_PGRAPH_PATTERN_SHAPE 0x00400810
#define NV04_PGRAPH_CHROMA 0x00400814
#define NV04_PGRAPH_CONTROL0 0x00400818
#define NV04_PGRAPH_CONTROL1 0x0040081C
#define NV04_PGRAPH_CONTROL2 0x00400820
#define NV04_PGRAPH_BLEND 0x00400824
#define NV04_PGRAPH_STORED_FMT 0x00400830
#define NV04_PGRAPH_PATT_COLORRAM 0x00400900
#define NV20_PGRAPH_TILE(i) (0x00400900 + (i*16))
#define NV20_PGRAPH_TLIMIT(i) (0x00400904 + (i*16))
#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16))
#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16))
#define NV20_PGRAPH_ZCOMP(i) (0x00400980 + 4*(i))
#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
#define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16))
#define NV04_PGRAPH_U_RAM 0x00400D00
#define NV47_PGRAPH_TILE(i) (0x00400D00 + (i*16))
#define NV47_PGRAPH_TLIMIT(i) (0x00400D04 + (i*16))
#define NV47_PGRAPH_TSIZE(i) (0x00400D08 + (i*16))
#define NV47_PGRAPH_TSTATUS(i) (0x00400D0C + (i*16))
#define NV04_PGRAPH_V_RAM 0x00400D40
#define NV04_PGRAPH_W_RAM 0x00400D80
#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40
#define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44
#define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48
#define NV10_PGRAPH_COMBINER1_IN_RGB 0x00400E4C
#define NV10_PGRAPH_COMBINER_COLOR0 0x00400E50
#define NV10_PGRAPH_COMBINER_COLOR1 0x00400E54
#define NV10_PGRAPH_COMBINER0_OUT_ALPHA 0x00400E58
#define NV10_PGRAPH_COMBINER1_OUT_ALPHA 0x00400E5C
#define NV10_PGRAPH_COMBINER0_OUT_RGB 0x00400E60
#define NV10_PGRAPH_COMBINER1_OUT_RGB 0x00400E64
#define NV10_PGRAPH_COMBINER_FINAL0 0x00400E68
#define NV10_PGRAPH_COMBINER_FINAL1 0x00400E6C
#define NV10_PGRAPH_WINDOWCLIP_HORIZONTAL 0x00400F00
#define NV10_PGRAPH_WINDOWCLIP_VERTICAL 0x00400F20
#define NV10_PGRAPH_XFMODE0 0x00400F40
#define NV10_PGRAPH_XFMODE1 0x00400F44
#define NV10_PGRAPH_GLOBALSTATE0 0x00400F48
#define NV10_PGRAPH_GLOBALSTATE1 0x00400F4C
#define NV10_PGRAPH_PIPE_ADDRESS 0x00400F50
#define NV10_PGRAPH_PIPE_DATA 0x00400F54
#define NV04_PGRAPH_DMA_START_0 0x00401000
#define NV04_PGRAPH_DMA_START_1 0x00401004
#define NV04_PGRAPH_DMA_LENGTH 0x00401008
#define NV04_PGRAPH_DMA_MISC 0x0040100C
#define NV04_PGRAPH_DMA_DATA_0 0x00401020
#define NV04_PGRAPH_DMA_DATA_1 0x00401024
#define NV04_PGRAPH_DMA_RM 0x00401030
#define NV04_PGRAPH_DMA_A_XLATE_INST 0x00401040
#define NV04_PGRAPH_DMA_A_CONTROL 0x00401044
#define NV04_PGRAPH_DMA_A_LIMIT 0x00401048
#define NV04_PGRAPH_DMA_A_TLB_PTE 0x0040104C
#define NV04_PGRAPH_DMA_A_TLB_TAG 0x00401050
#define NV04_PGRAPH_DMA_A_ADJ_OFFSET 0x00401054
#define NV04_PGRAPH_DMA_A_OFFSET 0x00401058
#define NV04_PGRAPH_DMA_A_SIZE 0x0040105C
#define NV04_PGRAPH_DMA_A_Y_SIZE 0x00401060
#define NV04_PGRAPH_DMA_B_XLATE_INST 0x00401080
#define NV04_PGRAPH_DMA_B_CONTROL 0x00401084
#define NV04_PGRAPH_DMA_B_LIMIT 0x00401088
#define NV04_PGRAPH_DMA_B_TLB_PTE 0x0040108C
#define NV04_PGRAPH_DMA_B_TLB_TAG 0x00401090
#define NV04_PGRAPH_DMA_B_ADJ_OFFSET 0x00401094
#define NV04_PGRAPH_DMA_B_OFFSET 0x00401098
#define NV04_PGRAPH_DMA_B_SIZE 0x0040109C
#define NV04_PGRAPH_DMA_B_Y_SIZE 0x004010A0
#define NV40_PGRAPH_TILE1(i) (0x00406900 + (i*16))
#define NV40_PGRAPH_TLIMIT1(i) (0x00406904 + (i*16))
#define NV40_PGRAPH_TSIZE1(i) (0x00406908 + (i*16))
#define NV40_PGRAPH_TSTATUS1(i) (0x0040690C + (i*16))
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright 2011 Red Hat Inc.
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -22,159 +22,62 @@
* Authors: Ben Skeggs
*/
#include "drmP.h"
#include "nouveau_drv.h"
#include <engine/fifo.h>
#include <core/ramht.h>
#include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
#include <core/handle.h>
#include <core/engine/graph/nv40.h>
struct nv31_mpeg_engine {
struct nouveau_exec_engine base;
#include <subdev/fb.h>
#include <subdev/timer.h>
#include <subdev/instmem.h>
#include <engine/mpeg.h>
struct nv31_mpeg_priv {
struct nouveau_mpeg base;
atomic_t refcount;
};
struct nv31_mpeg_chan {
struct nouveau_object base;
};
/*******************************************************************************
* MPEG object classes
******************************************************************************/
static int
nv31_mpeg_context_new(struct nouveau_channel *chan, int engine)
nv31_mpeg_object_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv31_mpeg_engine *pmpeg = nv_engine(chan->dev, engine);
if (!atomic_add_unless(&pmpeg->refcount, 1, 1))
return -EBUSY;
chan->engctx[engine] = (void *)0xdeadcafe;
return 0;
}
static void
nv31_mpeg_context_del(struct nouveau_channel *chan, int engine)
{
struct nv31_mpeg_engine *pmpeg = nv_engine(chan->dev, engine);
atomic_dec(&pmpeg->refcount);
chan->engctx[engine] = NULL;
}
static int
nv40_mpeg_context_new(struct nouveau_channel *chan, int engine)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *ctx = NULL;
unsigned long flags;
struct nouveau_gpuobj *obj;
int ret;
NV_DEBUG(dev, "ch%d\n", chan->id);
ret = nouveau_gpuobj_new(dev, NULL, 264 * 4, 16, NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, &ctx);
ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
20, 16, 0, &obj);
*pobject = nv_object(obj);
if (ret)
return ret;
nv_wo32(ctx, 0x78, 0x02001ec1);
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
if ((nv_rd32(dev, 0x003204) & 0x1f) == chan->id)
nv_wr32(dev, 0x00330c, ctx->addr >> 4);
nv_wo32(chan->ramfc, 0x54, ctx->addr >> 4);
nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
chan->engctx[engine] = ctx;
return 0;
}
static void
nv40_mpeg_context_del(struct nouveau_channel *chan, int engine)
{
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_gpuobj *ctx = chan->engctx[engine];
struct drm_device *dev = chan->dev;
unsigned long flags;
u32 inst = 0x80000000 | (ctx->addr >> 4);
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
if (nv_rd32(dev, 0x00b318) == inst)
nv_mask(dev, 0x00b318, 0x80000000, 0x00000000);
nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
nouveau_gpuobj_ref(NULL, &ctx);
chan->engctx[engine] = NULL;
}
static int
nv31_mpeg_object_new(struct nouveau_channel *chan, int engine,
u32 handle, u16 class)
{
struct drm_device *dev = chan->dev;
struct nouveau_gpuobj *obj = NULL;
int ret;
ret = nouveau_gpuobj_new(dev, chan, 20, 16, NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, &obj);
if (ret)
return ret;
obj->engine = 2;
obj->class = class;
nv_wo32(obj, 0x00, class);
ret = nouveau_ramht_insert(chan, handle, obj);
nouveau_gpuobj_ref(NULL, &obj);
return ret;
}
static int
nv31_mpeg_init(struct drm_device *dev, int engine)
{
struct nv31_mpeg_engine *pmpeg = nv_engine(dev, engine);
int i;
/* VPE init */
nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
nv_mask(dev, 0x000200, 0x00000002, 0x00000002);
nv_wr32(dev, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
nv_wr32(dev, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
for (i = 0; i < nvfb_tile_nr(dev); i++)
pmpeg->base.set_tile_region(dev, i);
/* PMPEG init */
nv_wr32(dev, 0x00b32c, 0x00000000);
nv_wr32(dev, 0x00b314, 0x00000100);
nv_wr32(dev, 0x00b220, nv44_graph_class(dev) ? 0x00000044 : 0x00000031);
nv_wr32(dev, 0x00b300, 0x02001ec1);
nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
nv_wr32(dev, 0x00b100, 0xffffffff);
nv_wr32(dev, 0x00b140, 0xffffffff);
if (!nv_wait(dev, 0x00b200, 0x00000001, 0x00000000)) {
NV_ERROR(dev, "PMPEG init: 0x%08x\n", nv_rd32(dev, 0x00b200));
return -EBUSY;
}
nv_wo32(obj, 0x00, nv_mclass(obj));
nv_wo32(obj, 0x04, 0x00000000);
nv_wo32(obj, 0x08, 0x00000000);
nv_wo32(obj, 0x0c, 0x00000000);
return 0;
}
static int
nv31_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
nv31_mpeg_mthd_dma(struct nouveau_object *object, u32 mthd, void *arg, u32 len)
{
/*XXX: context save? */
nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
nv_wr32(dev, 0x00b140, 0x00000000);
return 0;
}
static int
nv31_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
{
struct drm_device *dev = chan->dev;
u32 inst = data << 4;
u32 dma0 = nv_ri32(dev, inst + 0);
u32 dma1 = nv_ri32(dev, inst + 4);
u32 dma2 = nv_ri32(dev, inst + 8);
struct nouveau_instmem *imem = nouveau_instmem(object);
struct nv31_mpeg_priv *priv = (void *)object->engine;
u32 inst = *(u32 *)arg << 4;
u32 dma0 = nv_ro32(imem, inst + 0);
u32 dma1 = nv_ro32(imem, inst + 4);
u32 dma2 = nv_ro32(imem, inst + 8);
u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
u32 size = dma1 + 1;
@ -184,160 +87,215 @@ nv31_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
if (mthd == 0x0190) {
/* DMA_CMD */
nv_mask(dev, 0x00b300, 0x00030000, (dma0 & 0x00030000));
nv_wr32(dev, 0x00b334, base);
nv_wr32(dev, 0x00b324, size);
nv_mask(priv, 0x00b300, 0x00030000, (dma0 & 0x00030000));
nv_wr32(priv, 0x00b334, base);
nv_wr32(priv, 0x00b324, size);
} else
if (mthd == 0x01a0) {
/* DMA_DATA */
nv_mask(dev, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
nv_wr32(dev, 0x00b360, base);
nv_wr32(dev, 0x00b364, size);
nv_mask(priv, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
nv_wr32(priv, 0x00b360, base);
nv_wr32(priv, 0x00b364, size);
} else {
/* DMA_IMAGE, VRAM only */
if (dma0 & 0x000c0000)
return -EINVAL;
nv_wr32(dev, 0x00b370, base);
nv_wr32(dev, 0x00b374, size);
nv_wr32(priv, 0x00b370, base);
nv_wr32(priv, 0x00b374, size);
}
return 0;
}
struct nouveau_ofuncs
nv31_mpeg_ofuncs = {
.ctor = nv31_mpeg_object_ctor,
.dtor = _nouveau_gpuobj_dtor,
.init = _nouveau_gpuobj_init,
.fini = _nouveau_gpuobj_fini,
.rd32 = _nouveau_gpuobj_rd32,
.wr32 = _nouveau_gpuobj_wr32,
};
struct nouveau_omthds
nv31_mpeg_omthds[] = {
{ 0x0190, nv31_mpeg_mthd_dma },
{ 0x01a0, nv31_mpeg_mthd_dma },
{ 0x01b0, nv31_mpeg_mthd_dma },
{}
};
struct nouveau_oclass
nv31_mpeg_sclass[] = {
{ 0x3174, &nv31_mpeg_ofuncs, nv31_mpeg_omthds },
{}
};
/*******************************************************************************
* PMPEG context
******************************************************************************/
static int
nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst)
nv31_mpeg_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *ctx;
unsigned long flags;
int i;
struct nv31_mpeg_priv *priv = (void *)engine;
struct nv31_mpeg_chan *chan;
int ret;
/* hardcode drm channel id on nv3x, so swmthd lookup works */
if (dev_priv->card_type < NV_40)
return 0;
if (!atomic_add_unless(&priv->refcount, 1, 1))
return -EBUSY;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
for (i = 0; i < pfifo->channels; i++) {
if (!dev_priv->channels.ptr[i])
continue;
ret = nouveau_object_create(parent, engine, oclass, 0, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
ctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_MPEG];
if (ctx && ctx->addr == inst)
break;
}
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
return i;
return 0;
}
static void
nv31_vpe_set_tile_region(struct drm_device *dev, int i)
nv31_mpeg_context_dtor(struct nouveau_object *object)
{
struct nouveau_fb_tile *tile = nvfb_tile(dev, i);
nv_wr32(dev, 0x00b008 + (i * 0x10), tile->pitch);
nv_wr32(dev, 0x00b004 + (i * 0x10), tile->limit);
nv_wr32(dev, 0x00b000 + (i * 0x10), tile->addr);
struct nv31_mpeg_priv *priv = (void *)object->engine;
struct nv31_mpeg_chan *chan = (void *)object;
atomic_dec(&priv->refcount);
nouveau_object_destroy(&chan->base);
}
static void
nv31_mpeg_isr(struct drm_device *dev)
static struct nouveau_oclass
nv31_mpeg_cclass = {
.handle = NV_ENGCTX(MPEG, 0x31),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv31_mpeg_context_ctor,
.dtor = nv31_mpeg_context_dtor,
.init = nouveau_object_init,
.fini = nouveau_object_fini,
},
};
/*******************************************************************************
* PMPEG engine/subdev functions
******************************************************************************/
void
nv31_mpeg_tile_prog(struct nouveau_engine *engine, int i)
{
u32 inst = (nv_rd32(dev, 0x00b318) & 0x000fffff) << 4;
u32 chid = nv31_mpeg_isr_chid(dev, inst);
u32 stat = nv_rd32(dev, 0x00b100);
u32 type = nv_rd32(dev, 0x00b230);
u32 mthd = nv_rd32(dev, 0x00b234);
u32 data = nv_rd32(dev, 0x00b238);
struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
struct nv31_mpeg_priv *priv = (void *)engine;
nv_wr32(priv, 0x00b008 + (i * 0x10), tile->pitch);
nv_wr32(priv, 0x00b004 + (i * 0x10), tile->limit);
nv_wr32(priv, 0x00b000 + (i * 0x10), tile->addr);
}
void
nv31_mpeg_intr(struct nouveau_subdev *subdev)
{
struct nv31_mpeg_priv *priv = (void *)subdev;
struct nouveau_engine *engine = nv_engine(subdev);
struct nouveau_handle *handle = NULL;
u32 inst = (nv_rd32(priv, 0x00b318) & 0x000fffff) << 4;
u32 stat = nv_rd32(priv, 0x00b100);
u32 type = nv_rd32(priv, 0x00b230);
u32 mthd = nv_rd32(priv, 0x00b234);
u32 data = nv_rd32(priv, 0x00b238);
u32 show = stat;
if (stat & 0x01000000) {
/* happens on initial binding of the object */
if (type == 0x00000020 && mthd == 0x0000) {
nv_mask(dev, 0x00b308, 0x00000000, 0x00000000);
if (handle && type == 0x00000020 && mthd == 0x0000) {
nv_mask(priv, 0x00b308, 0x00000000, 0x00000000);
show &= ~0x01000000;
}
if (type == 0x00000010) {
if (!nouveau_gpuobj_mthd_call2(dev, chid, 0x3174, mthd, data))
if (handle && type == 0x00000010) {
handle = nouveau_engctx_lookup_class(engine, inst, 0x3174);
if (handle && !nv_call(handle->object, mthd, data)) {
nouveau_engctx_handle_put(handle);
show &= ~0x01000000;
}
}
}
nv_wr32(dev, 0x00b100, stat);
nv_wr32(dev, 0x00b230, 0x00000001);
nv_wr32(priv, 0x00b100, stat);
nv_wr32(priv, 0x00b230, 0x00000001);
if (show && nouveau_ratelimit()) {
NV_INFO(dev, "PMPEG: Ch %d [0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
chid, inst, stat, type, mthd, data);
if (show) {
nv_error(priv, "ch %d [0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
inst, stat, type, mthd, data);
}
}
static void
nv31_vpe_isr(struct drm_device *dev)
static int
nv31_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
if (nv_rd32(dev, 0x00b100))
nv31_mpeg_isr(dev);
struct nv31_mpeg_priv *priv;
int ret;
if (nv_rd32(dev, 0x00b800)) {
u32 stat = nv_rd32(dev, 0x00b800);
NV_INFO(dev, "PMSRCH: 0x%08x\n", stat);
nv_wr32(dev, 0xb800, stat);
}
}
ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
static void
nv31_mpeg_destroy(struct drm_device *dev, int engine)
{
struct nv31_mpeg_engine *pmpeg = nv_engine(dev, engine);
nouveau_irq_unregister(dev, 0);
NVOBJ_ENGINE_DEL(dev, MPEG);
kfree(pmpeg);
nv_subdev(priv)->unit = 0x00000002;
nv_subdev(priv)->intr = nv31_mpeg_intr;
nv_engine(priv)->cclass = &nv31_mpeg_cclass;
nv_engine(priv)->sclass = nv31_mpeg_sclass;
nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
return 0;
}
int
nv31_mpeg_create(struct drm_device *dev)
nv31_mpeg_init(struct nouveau_object *object)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv31_mpeg_engine *pmpeg;
struct nouveau_engine *engine = nv_engine(object->engine);
struct nv31_mpeg_priv *priv = (void *)engine;
struct nouveau_fb *pfb = nouveau_fb(object);
int ret, i;
pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL);
if (!pmpeg)
return -ENOMEM;
atomic_set(&pmpeg->refcount, 0);
ret = nouveau_mpeg_init(&priv->base);
if (ret)
return ret;
pmpeg->base.destroy = nv31_mpeg_destroy;
pmpeg->base.init = nv31_mpeg_init;
pmpeg->base.fini = nv31_mpeg_fini;
if (dev_priv->card_type < NV_40) {
pmpeg->base.context_new = nv31_mpeg_context_new;
pmpeg->base.context_del = nv31_mpeg_context_del;
} else {
pmpeg->base.context_new = nv40_mpeg_context_new;
pmpeg->base.context_del = nv40_mpeg_context_del;
/* VPE init */
nv_wr32(priv, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
nv_wr32(priv, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
for (i = 0; i < pfb->tile.regions; i++)
engine->tile_prog(engine, i);
/* PMPEG init */
nv_wr32(priv, 0x00b32c, 0x00000000);
nv_wr32(priv, 0x00b314, 0x00000100);
nv_wr32(priv, 0x00b220, nv44_graph_class(priv) ? 0x00000044 : 0x00000031);
nv_wr32(priv, 0x00b300, 0x02001ec1);
nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
nv_wr32(priv, 0x00b100, 0xffffffff);
nv_wr32(priv, 0x00b140, 0xffffffff);
if (!nv_wait(priv, 0x00b200, 0x00000001, 0x00000000)) {
nv_error(priv, "timeout 0x%08x\n", nv_rd32(priv, 0x00b200));
return -EBUSY;
}
pmpeg->base.object_new = nv31_mpeg_object_new;
/* ISR vector, PMC_ENABLE bit, and TILE regs are shared between
* all VPE engines, for this driver's purposes the PMPEG engine
* will be treated as the "master" and handle the global VPE
* bits too
*/
pmpeg->base.set_tile_region = nv31_vpe_set_tile_region;
nouveau_irq_register(dev, 0, nv31_vpe_isr);
NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
NVOBJ_CLASS(dev, 0x3174, MPEG);
NVOBJ_MTHD (dev, 0x3174, 0x0190, nv31_mpeg_mthd_dma);
NVOBJ_MTHD (dev, 0x3174, 0x01a0, nv31_mpeg_mthd_dma);
NVOBJ_MTHD (dev, 0x3174, 0x01b0, nv31_mpeg_mthd_dma);
#if 0
NVOBJ_ENGINE_ADD(dev, ME, &pme->base);
NVOBJ_CLASS(dev, 0x4075, ME);
#endif
return 0;
}
struct nouveau_oclass
nv31_mpeg_oclass = {
.handle = NV_ENGINE(MPEG, 0x31),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv31_mpeg_ctor,
.dtor = _nouveau_mpeg_dtor,
.init = nv31_mpeg_init,
.fini = _nouveau_mpeg_fini,
},
};

View File

@ -0,0 +1,144 @@
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
#include <subdev/fb.h>
#include <subdev/timer.h>
#include <subdev/instmem.h>
#include <engine/mpeg.h>
#include <engine/graph/nv40.h>
struct nv40_mpeg_priv {
struct nouveau_mpeg base;
};
struct nv40_mpeg_chan {
struct nouveau_mpeg base;
};
/*******************************************************************************
* PMPEG context
******************************************************************************/
static int
nv40_mpeg_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv40_mpeg_chan *chan;
int ret;
ret = nouveau_mpeg_context_create(parent, engine, oclass, NULL,
264 * 4, 16,
NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
return 0;
}
static int
nv40_mpeg_context_fini(struct nouveau_object *object, bool suspend)
{
struct nv40_mpeg_priv *priv = (void *)object->engine;
struct nv40_mpeg_chan *chan = (void *)object;
u32 inst = 0x80000000 | nv_gpuobj(chan)->addr >> 4;
nv_mask(priv, 0x00b32c, 0x00000001, 0x00000000);
if (nv_rd32(priv, 0x00b318) == inst)
nv_mask(priv, 0x00b318, 0x80000000, 0x00000000);
nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
return 0;
}
static struct nouveau_oclass
nv40_mpeg_cclass = {
.handle = NV_ENGCTX(MPEG, 0x40),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv40_mpeg_context_ctor,
.dtor = _nouveau_mpeg_context_dtor,
.init = _nouveau_mpeg_context_init,
.fini = nv40_mpeg_context_fini,
.rd32 = _nouveau_mpeg_context_rd32,
.wr32 = _nouveau_mpeg_context_wr32,
},
};
/*******************************************************************************
* PMPEG engine/subdev functions
******************************************************************************/
static void
nv40_mpeg_intr(struct nouveau_subdev *subdev)
{
struct nv40_mpeg_priv *priv = (void *)subdev;
u32 stat;
if ((stat = nv_rd32(priv, 0x00b100)))
nv31_mpeg_intr(subdev);
if ((stat = nv_rd32(priv, 0x00b800))) {
nv_error(priv, "PMSRCH 0x%08x\n", stat);
nv_wr32(priv, 0x00b800, stat);
}
}
static int
nv40_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv40_mpeg_priv *priv;
int ret;
ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00000002;
nv_subdev(priv)->intr = nv40_mpeg_intr;
nv_engine(priv)->cclass = &nv40_mpeg_cclass;
nv_engine(priv)->sclass = nv31_mpeg_sclass;
nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
return 0;
}
struct nouveau_oclass
nv40_mpeg_oclass = {
.handle = NV_ENGINE(MPEG, 0x40),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv40_mpeg_ctor,
.dtor = _nouveau_mpeg_dtor,
.init = nv31_mpeg_init,
.fini = _nouveau_mpeg_fini,
},
};

View File

@ -1,5 +1,5 @@
/*
* Copyright 2011 Red Hat Inc.
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -22,218 +22,219 @@
* Authors: Ben Skeggs
*/
#include "drmP.h"
#include "nouveau_drv.h"
#include <core/ramht.h>
#include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
struct nv50_mpeg_engine {
struct nouveau_exec_engine base;
#include <subdev/vm.h>
#include <subdev/bar.h>
#include <subdev/timer.h>
#include <engine/mpeg.h>
struct nv50_mpeg_priv {
struct nouveau_mpeg base;
};
static inline u32
CTX_PTR(struct drm_device *dev, u32 offset)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_mpeg_chan {
struct nouveau_mpeg_chan base;
};
if (dev_priv->chipset == 0x50)
offset += 0x0260;
else
offset += 0x0060;
return offset;
}
/*******************************************************************************
* MPEG object classes
******************************************************************************/
static int
nv50_mpeg_context_new(struct nouveau_channel *chan, int engine)
nv50_mpeg_object_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct drm_device *dev = chan->dev;
struct nouveau_gpuobj *ramin = chan->ramin;
struct nouveau_gpuobj *ctx = NULL;
struct nouveau_gpuobj *obj;
int ret;
NV_DEBUG(dev, "ch%d\n", chan->id);
ret = nouveau_gpuobj_new(dev, chan, 128 * 4, 0, NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, &ctx);
ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
16, 16, 0, &obj);
*pobject = nv_object(obj);
if (ret)
return ret;
nv_wo32(ramin, CTX_PTR(dev, 0x00), 0x80190002);
nv_wo32(ramin, CTX_PTR(dev, 0x04), ctx->addr + ctx->size - 1);
nv_wo32(ramin, CTX_PTR(dev, 0x08), ctx->addr);
nv_wo32(ramin, CTX_PTR(dev, 0x0c), 0);
nv_wo32(ramin, CTX_PTR(dev, 0x10), 0);
nv_wo32(ramin, CTX_PTR(dev, 0x14), 0x00010000);
nv_wo32(ctx, 0x70, 0x00801ec1);
nv_wo32(ctx, 0x7c, 0x0000037c);
nvimem_flush(dev);
chan->engctx[engine] = ctx;
return 0;
}
static void
nv50_mpeg_context_del(struct nouveau_channel *chan, int engine)
{
struct nouveau_gpuobj *ctx = chan->engctx[engine];
struct drm_device *dev = chan->dev;
int i;
for (i = 0x00; i <= 0x14; i += 4)
nv_wo32(chan->ramin, CTX_PTR(dev, i), 0x00000000);
nouveau_gpuobj_ref(NULL, &ctx);
chan->engctx[engine] = NULL;
}
static int
nv50_mpeg_object_new(struct nouveau_channel *chan, int engine,
u32 handle, u16 class)
{
struct drm_device *dev = chan->dev;
struct nouveau_gpuobj *obj = NULL;
int ret;
ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
if (ret)
return ret;
obj->engine = 2;
obj->class = class;
nv_wo32(obj, 0x00, class);
nv_wo32(obj, 0x00, nv_mclass(obj));
nv_wo32(obj, 0x04, 0x00000000);
nv_wo32(obj, 0x08, 0x00000000);
nv_wo32(obj, 0x0c, 0x00000000);
nvimem_flush(dev);
return 0;
}
ret = nouveau_ramht_insert(chan, handle, obj);
nouveau_gpuobj_ref(NULL, &obj);
return ret;
struct nouveau_ofuncs
nv50_mpeg_ofuncs = {
.ctor = nv50_mpeg_object_ctor,
.dtor = _nouveau_gpuobj_dtor,
.init = _nouveau_gpuobj_init,
.fini = _nouveau_gpuobj_fini,
.rd32 = _nouveau_gpuobj_rd32,
.wr32 = _nouveau_gpuobj_wr32,
};
static struct nouveau_oclass
nv50_mpeg_sclass[] = {
{ 0x3174, &nv50_mpeg_ofuncs },
{}
};
/*******************************************************************************
* PMPEG context
******************************************************************************/
int
nv50_mpeg_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nouveau_bar *bar = nouveau_bar(parent);
struct nv50_mpeg_chan *chan;
int ret;
ret = nouveau_mpeg_context_create(parent, engine, oclass, NULL, 128 * 4,
0, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
nv_wo32(chan, 0x0070, 0x00801ec1);
nv_wo32(chan, 0x007c, 0x0000037c);
bar->flush(bar);
return 0;
}
static struct nouveau_oclass
nv50_mpeg_cclass = {
.handle = NV_ENGCTX(MPEG, 0x50),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_mpeg_context_ctor,
.dtor = _nouveau_mpeg_context_dtor,
.init = _nouveau_mpeg_context_init,
.fini = _nouveau_mpeg_context_fini,
.rd32 = _nouveau_mpeg_context_rd32,
.wr32 = _nouveau_mpeg_context_wr32,
},
};
/*******************************************************************************
* PMPEG engine/subdev functions
******************************************************************************/
int
nv50_mpeg_tlb_flush(struct nouveau_engine *engine)
{
nv50_vm_flush_engine(&engine->base, 0x08);
return 0;
}
void
nv50_mpeg_intr(struct nouveau_subdev *subdev)
{
struct nv50_mpeg_priv *priv = (void *)subdev;
u32 stat = nv_rd32(priv, 0x00b100);
u32 type = nv_rd32(priv, 0x00b230);
u32 mthd = nv_rd32(priv, 0x00b234);
u32 data = nv_rd32(priv, 0x00b238);
u32 show = stat;
if (stat & 0x01000000) {
/* happens on initial binding of the object */
if (type == 0x00000020 && mthd == 0x0000) {
nv_wr32(priv, 0x00b308, 0x00000100);
show &= ~0x01000000;
}
}
if (show) {
nv_info(priv, "0x%08x 0x%08x 0x%08x 0x%08x\n",
stat, type, mthd, data);
}
nv_wr32(priv, 0x00b100, stat);
nv_wr32(priv, 0x00b230, 0x00000001);
nv50_fb_trap(nouveau_fb(priv), 1);
}
static void
nv50_mpeg_tlb_flush(struct drm_device *dev, int engine)
nv50_vpe_intr(struct nouveau_subdev *subdev)
{
nv50_vm_flush_engine(dev, 0x08);
struct nv50_mpeg_priv *priv = (void *)subdev;
if (nv_rd32(priv, 0x00b100))
nv50_mpeg_intr(subdev);
if (nv_rd32(priv, 0x00b800)) {
u32 stat = nv_rd32(priv, 0x00b800);
nv_info(priv, "PMSRCH: 0x%08x\n", stat);
nv_wr32(priv, 0xb800, stat);
}
}
static int
nv50_mpeg_init(struct drm_device *dev, int engine)
nv50_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
nv_wr32(dev, 0x00b32c, 0x00000000);
nv_wr32(dev, 0x00b314, 0x00000100);
nv_wr32(dev, 0x00b0e0, 0x0000001a);
struct nv50_mpeg_priv *priv;
int ret;
nv_wr32(dev, 0x00b220, 0x00000044);
nv_wr32(dev, 0x00b300, 0x00801ec1);
nv_wr32(dev, 0x00b390, 0x00000000);
nv_wr32(dev, 0x00b394, 0x00000000);
nv_wr32(dev, 0x00b398, 0x00000000);
nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_wr32(dev, 0x00b100, 0xffffffff);
nv_wr32(dev, 0x00b140, 0xffffffff);
nv_subdev(priv)->unit = 0x00400002;
nv_subdev(priv)->intr = nv50_vpe_intr;
nv_engine(priv)->cclass = &nv50_mpeg_cclass;
nv_engine(priv)->sclass = nv50_mpeg_sclass;
nv_engine(priv)->tlb_flush = nv50_mpeg_tlb_flush;
return 0;
}
if (!nv_wait(dev, 0x00b200, 0x00000001, 0x00000000)) {
NV_ERROR(dev, "PMPEG init: 0x%08x\n", nv_rd32(dev, 0x00b200));
int
nv50_mpeg_init(struct nouveau_object *object)
{
struct nv50_mpeg_priv *priv = (void *)object;
int ret;
ret = nouveau_mpeg_init(&priv->base);
if (ret)
return ret;
nv_wr32(priv, 0x00b32c, 0x00000000);
nv_wr32(priv, 0x00b314, 0x00000100);
nv_wr32(priv, 0x00b0e0, 0x0000001a);
nv_wr32(priv, 0x00b220, 0x00000044);
nv_wr32(priv, 0x00b300, 0x00801ec1);
nv_wr32(priv, 0x00b390, 0x00000000);
nv_wr32(priv, 0x00b394, 0x00000000);
nv_wr32(priv, 0x00b398, 0x00000000);
nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
nv_wr32(priv, 0x00b100, 0xffffffff);
nv_wr32(priv, 0x00b140, 0xffffffff);
if (!nv_wait(priv, 0x00b200, 0x00000001, 0x00000000)) {
nv_error(priv, "timeout 0x%08x\n", nv_rd32(priv, 0x00b200));
return -EBUSY;
}
return 0;
}
static int
nv50_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
{
nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
nv_wr32(dev, 0x00b140, 0x00000000);
return 0;
}
static void
nv50_mpeg_isr(struct drm_device *dev)
{
u32 stat = nv_rd32(dev, 0x00b100);
u32 type = nv_rd32(dev, 0x00b230);
u32 mthd = nv_rd32(dev, 0x00b234);
u32 data = nv_rd32(dev, 0x00b238);
u32 show = stat;
if (stat & 0x01000000) {
/* happens on initial binding of the object */
if (type == 0x00000020 && mthd == 0x0000) {
nv_wr32(dev, 0x00b308, 0x00000100);
show &= ~0x01000000;
}
}
if (show && nouveau_ratelimit()) {
NV_INFO(dev, "PMPEG - 0x%08x 0x%08x 0x%08x 0x%08x\n",
stat, type, mthd, data);
}
nv_wr32(dev, 0x00b100, stat);
nv_wr32(dev, 0x00b230, 0x00000001);
nv50_fb_vm_trap(dev, 1);
}
static void
nv50_vpe_isr(struct drm_device *dev)
{
if (nv_rd32(dev, 0x00b100))
nv50_mpeg_isr(dev);
if (nv_rd32(dev, 0x00b800)) {
u32 stat = nv_rd32(dev, 0x00b800);
NV_INFO(dev, "PMSRCH: 0x%08x\n", stat);
nv_wr32(dev, 0xb800, stat);
}
}
static void
nv50_mpeg_destroy(struct drm_device *dev, int engine)
{
struct nv50_mpeg_engine *pmpeg = nv_engine(dev, engine);
nouveau_irq_unregister(dev, 0);
NVOBJ_ENGINE_DEL(dev, MPEG);
kfree(pmpeg);
}
int
nv50_mpeg_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_mpeg_engine *pmpeg;
pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL);
if (!pmpeg)
return -ENOMEM;
pmpeg->base.destroy = nv50_mpeg_destroy;
pmpeg->base.init = nv50_mpeg_init;
pmpeg->base.fini = nv50_mpeg_fini;
pmpeg->base.context_new = nv50_mpeg_context_new;
pmpeg->base.context_del = nv50_mpeg_context_del;
pmpeg->base.object_new = nv50_mpeg_object_new;
pmpeg->base.tlb_flush = nv50_mpeg_tlb_flush;
if (dev_priv->chipset == 0x50) {
nouveau_irq_register(dev, 0, nv50_vpe_isr);
NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
NVOBJ_CLASS(dev, 0x3174, MPEG);
#if 0
NVOBJ_ENGINE_ADD(dev, ME, &pme->base);
NVOBJ_CLASS(dev, 0x4075, ME);
#endif
} else {
nouveau_irq_register(dev, 0, nv50_mpeg_isr);
NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
NVOBJ_CLASS(dev, 0x8274, MPEG);
}
return 0;
}
struct nouveau_oclass
nv50_mpeg_oclass = {
.handle = NV_ENGINE(MPEG, 0x50),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_mpeg_ctor,
.dtor = _nouveau_mpeg_dtor,
.init = nv50_mpeg_init,
.fini = _nouveau_mpeg_fini,
},
};

View File

@ -0,0 +1,104 @@
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
#include <subdev/vm.h>
#include <subdev/bar.h>
#include <subdev/timer.h>
#include <engine/mpeg.h>
struct nv84_mpeg_priv {
struct nouveau_mpeg base;
};
struct nv84_mpeg_chan {
struct nouveau_mpeg_chan base;
};
/*******************************************************************************
* MPEG object classes
******************************************************************************/
static struct nouveau_oclass
nv84_mpeg_sclass[] = {
{ 0x8274, &nv50_mpeg_ofuncs },
{}
};
/*******************************************************************************
* PMPEG context
******************************************************************************/
static struct nouveau_oclass
nv84_mpeg_cclass = {
.handle = NV_ENGCTX(MPEG, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_mpeg_context_ctor,
.dtor = _nouveau_mpeg_context_dtor,
.init = _nouveau_mpeg_context_init,
.fini = _nouveau_mpeg_context_fini,
.rd32 = _nouveau_mpeg_context_rd32,
.wr32 = _nouveau_mpeg_context_wr32,
},
};
/*******************************************************************************
* PMPEG engine/subdev functions
******************************************************************************/
static int
nv84_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv84_mpeg_priv *priv;
int ret;
ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00000002;
nv_subdev(priv)->intr = nv50_mpeg_intr;
nv_engine(priv)->cclass = &nv84_mpeg_cclass;
nv_engine(priv)->sclass = nv84_mpeg_sclass;
nv_engine(priv)->tlb_flush = nv50_mpeg_tlb_flush;
return 0;
}
struct nouveau_oclass
nv84_mpeg_oclass = {
.handle = NV_ENGINE(MPEG, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv84_mpeg_ctor,
.dtor = _nouveau_mpeg_dtor,
.init = nv50_mpeg_init,
.fini = _nouveau_mpeg_fini,
},
};

View File

@ -1,5 +1,5 @@
/*
* Copyright 2011 Red Hat Inc.
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -22,56 +22,154 @@
* Authors: Ben Skeggs
*/
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_util.h"
#include <core/ramht.h>
#include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
struct nv98_ppp_engine {
struct nouveau_exec_engine base;
#include <engine/ppp.h>
struct nv98_ppp_priv {
struct nouveau_ppp base;
};
static int
nv98_ppp_fini(struct drm_device *dev, int engine, bool suspend)
{
if (!(nv_rd32(dev, 0x000200) & 0x00000002))
return 0;
struct nv98_ppp_chan {
struct nouveau_ppp_chan base;
};
nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
return 0;
}
/*******************************************************************************
* PPP object classes
******************************************************************************/
static struct nouveau_oclass
nv98_ppp_sclass[] = {
{},
};
/*******************************************************************************
* PPPP context
******************************************************************************/
static int
nv98_ppp_init(struct drm_device *dev, int engine)
nv98_ppp_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
nv_mask(dev, 0x000200, 0x00000002, 0x00000002);
struct nv98_ppp_chan *priv;
int ret;
ret = nouveau_ppp_context_create(parent, engine, oclass, NULL,
0, 0, 0, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
return 0;
}
static void
nv98_ppp_destroy(struct drm_device *dev, int engine)
nv98_ppp_context_dtor(struct nouveau_object *object)
{
struct nv98_ppp_engine *pppp = nv_engine(dev, engine);
NVOBJ_ENGINE_DEL(dev, PPP);
kfree(pppp);
struct nv98_ppp_chan *priv = (void *)object;
nouveau_ppp_context_destroy(&priv->base);
}
int
nv98_ppp_create(struct drm_device *dev)
static int
nv98_ppp_context_init(struct nouveau_object *object)
{
struct nv98_ppp_engine *pppp;
struct nv98_ppp_chan *priv = (void *)object;
int ret;
pppp = kzalloc(sizeof(*pppp), GFP_KERNEL);
if (!pppp)
return -ENOMEM;
ret = nouveau_ppp_context_init(&priv->base);
if (ret)
return ret;
pppp->base.destroy = nv98_ppp_destroy;
pppp->base.init = nv98_ppp_init;
pppp->base.fini = nv98_ppp_fini;
NVOBJ_ENGINE_ADD(dev, PPP, &pppp->base);
return 0;
}
static int
nv98_ppp_context_fini(struct nouveau_object *object, bool suspend)
{
struct nv98_ppp_chan *priv = (void *)object;
return nouveau_ppp_context_fini(&priv->base, suspend);
}
static struct nouveau_oclass
nv98_ppp_cclass = {
.handle = NV_ENGCTX(PPP, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv98_ppp_context_ctor,
.dtor = nv98_ppp_context_dtor,
.init = nv98_ppp_context_init,
.fini = nv98_ppp_context_fini,
.rd32 = _nouveau_ppp_context_rd32,
.wr32 = _nouveau_ppp_context_wr32,
},
};
/*******************************************************************************
* PPPP engine/subdev functions
******************************************************************************/
static void
nv98_ppp_intr(struct nouveau_subdev *subdev)
{
}
static int
nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv98_ppp_priv *priv;
int ret;
ret = nouveau_ppp_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00400002;
nv_subdev(priv)->intr = nv98_ppp_intr;
nv_engine(priv)->cclass = &nv98_ppp_cclass;
nv_engine(priv)->sclass = nv98_ppp_sclass;
return 0;
}
static void
nv98_ppp_dtor(struct nouveau_object *object)
{
struct nv98_ppp_priv *priv = (void *)object;
nouveau_ppp_destroy(&priv->base);
}
static int
nv98_ppp_init(struct nouveau_object *object)
{
struct nv98_ppp_priv *priv = (void *)object;
int ret;
ret = nouveau_ppp_init(&priv->base);
if (ret)
return ret;
return 0;
}
static int
nv98_ppp_fini(struct nouveau_object *object, bool suspend)
{
struct nv98_ppp_priv *priv = (void *)object;
return nouveau_ppp_fini(&priv->base, suspend);
}
struct nouveau_oclass
nv98_ppp_oclass = {
.handle = NV_ENGINE(PPP, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv98_ppp_ctor,
.dtor = nv98_ppp_dtor,
.init = nv98_ppp_init,
.fini = nv98_ppp_fini,
},
};

View File

@ -0,0 +1,140 @@
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
#include <engine/software.h>
#include <engine/fifo.h>
struct nv04_software_priv {
struct nouveau_software base;
};
struct nv04_software_chan {
struct nouveau_software_chan base;
};
/*******************************************************************************
* software object classes
******************************************************************************/
static int
nv04_software_set_ref(struct nouveau_object *object, u32 mthd,
void *data, u32 size)
{
struct nouveau_object *channel = (void *)nv_engctx(object->parent);
struct nouveau_fifo_chan *fifo = (void *)channel->parent;
atomic_set(&fifo->refcnt, *(u32*)data);
return 0;
}
static int
nv04_software_flip(struct nouveau_object *object, u32 mthd,
void *args, u32 size)
{
struct nv04_software_chan *chan = (void *)nv_engctx(object->parent);
if (chan->base.flip)
return chan->base.flip(chan->base.flip_data);
return -EINVAL;
}
static struct nouveau_omthds
nv04_software_omthds[] = {
{ 0x0150, nv04_software_set_ref },
{ 0x0500, nv04_software_flip },
{}
};
static struct nouveau_oclass
nv04_software_sclass[] = {
{ 0x006e, &nouveau_object_ofuncs, nv04_software_omthds },
{}
};
/*******************************************************************************
* software context
******************************************************************************/
static int
nv04_software_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv04_software_chan *chan;
int ret;
ret = nouveau_software_context_create(parent, engine, oclass, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
return 0;
}
static struct nouveau_oclass
nv04_software_cclass = {
.handle = NV_ENGCTX(SW, 0x04),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv04_software_context_ctor,
.dtor = _nouveau_software_context_dtor,
.init = _nouveau_software_context_init,
.fini = _nouveau_software_context_fini,
},
};
/*******************************************************************************
* software engine/subdev functions
******************************************************************************/
static int
nv04_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv04_software_priv *priv;
int ret;
ret = nouveau_software_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_engine(priv)->cclass = &nv04_software_cclass;
nv_engine(priv)->sclass = nv04_software_sclass;
return 0;
}
struct nouveau_oclass
nv04_software_oclass = {
.handle = NV_ENGINE(SW, 0x04),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv04_software_ctor,
.dtor = _nouveau_software_dtor,
.init = _nouveau_software_init,
.fini = _nouveau_software_fini,
},
};

View File

@ -0,0 +1,128 @@
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
#include <engine/software.h>
struct nv10_software_priv {
struct nouveau_software base;
};
struct nv10_software_chan {
struct nouveau_software_chan base;
};
/*******************************************************************************
* software object classes
******************************************************************************/
static int
nv10_software_flip(struct nouveau_object *object, u32 mthd,
void *args, u32 size)
{
struct nv10_software_chan *chan = (void *)nv_engctx(object->parent);
if (chan->base.flip)
return chan->base.flip(chan->base.flip_data);
return -EINVAL;
}
static struct nouveau_omthds
nv10_software_omthds[] = {
{ 0x0500, nv10_software_flip },
{}
};
static struct nouveau_oclass
nv10_software_sclass[] = {
{ 0x016e, &nouveau_object_ofuncs, nv10_software_omthds },
{}
};
/*******************************************************************************
* software context
******************************************************************************/
static int
nv10_software_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv10_software_chan *chan;
int ret;
ret = nouveau_software_context_create(parent, engine, oclass, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
return 0;
}
static struct nouveau_oclass
nv10_software_cclass = {
.handle = NV_ENGCTX(SW, 0x04),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv10_software_context_ctor,
.dtor = _nouveau_software_context_dtor,
.init = _nouveau_software_context_init,
.fini = _nouveau_software_context_fini,
},
};
/*******************************************************************************
* software engine/subdev functions
******************************************************************************/
static int
nv10_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv10_software_priv *priv;
int ret;
ret = nouveau_software_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_engine(priv)->cclass = &nv10_software_cclass;
nv_engine(priv)->sclass = nv10_software_sclass;
return 0;
}
struct nouveau_oclass
nv10_software_oclass = {
.handle = NV_ENGINE(SW, 0x10),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv10_software_ctor,
.dtor = _nouveau_software_dtor,
.init = _nouveau_software_init,
.fini = _nouveau_software_fini,
},
};

View File

@ -0,0 +1,198 @@
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
#include <core/namedb.h>
#include <core/handle.h>
#include <core/gpuobj.h>
#include <engine/software.h>
#include <engine/disp.h>
struct nv50_software_priv {
struct nouveau_software base;
};
struct nv50_software_chan {
struct nouveau_software_chan base;
};
/*******************************************************************************
* software object classes
******************************************************************************/
static int
nv50_software_mthd_dma_vblsem(struct nouveau_object *object, u32 mthd,
void *args, u32 size)
{
struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
struct nouveau_fifo_chan *fifo = (void *)nv_object(chan)->parent;
struct nouveau_handle *handle;
int ret = -EINVAL;
handle = nouveau_namedb_get(nv_namedb(fifo), *(u32 *)args);
if (!handle)
return -ENOENT;
if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
struct nouveau_gpuobj *gpuobj = nv_gpuobj(handle->object);
chan->base.vblank.ctxdma = gpuobj->node->offset >> 4;
ret = 0;
}
nouveau_namedb_put(handle);
return ret;
}
static int
nv50_software_mthd_vblsem_offset(struct nouveau_object *object, u32 mthd,
void *args, u32 size)
{
struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
chan->base.vblank.offset = *(u32 *)args;
return 0;
}
static int
nv50_software_mthd_vblsem_value(struct nouveau_object *object, u32 mthd,
void *args, u32 size)
{
struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
chan->base.vblank.value = *(u32 *)args;
return 0;
}
static int
nv50_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
void *args, u32 size)
{
struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
struct nouveau_disp *disp = nouveau_disp(object);
unsigned long flags;
u32 crtc = *(u32 *)args;
if (crtc > 1)
return -EINVAL;
disp->vblank.get(disp->vblank.data, crtc);
spin_lock_irqsave(&disp->vblank.lock, flags);
list_add(&chan->base.vblank.head, &disp->vblank.list);
chan->base.vblank.crtc = crtc;
spin_unlock_irqrestore(&disp->vblank.lock, flags);
return 0;
}
static int
nv50_software_mthd_flip(struct nouveau_object *object, u32 mthd,
void *args, u32 size)
{
struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
if (chan->base.flip)
return chan->base.flip(chan->base.flip_data);
return -EINVAL;
}
static struct nouveau_omthds
nv50_software_omthds[] = {
{ 0x018c, nv50_software_mthd_dma_vblsem },
{ 0x0400, nv50_software_mthd_vblsem_offset },
{ 0x0404, nv50_software_mthd_vblsem_value },
{ 0x0408, nv50_software_mthd_vblsem_release },
{ 0x0500, nv50_software_mthd_flip },
{}
};
static struct nouveau_oclass
nv50_software_sclass[] = {
{ 0x506e, &nouveau_object_ofuncs, nv50_software_omthds },
{}
};
/*******************************************************************************
* software context
******************************************************************************/
static int
nv50_software_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv50_software_chan *chan;
int ret;
ret = nouveau_software_context_create(parent, engine, oclass, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
return 0;
}
static struct nouveau_oclass
nv50_software_cclass = {
.handle = NV_ENGCTX(SW, 0x50),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_software_context_ctor,
.dtor = _nouveau_software_context_dtor,
.init = _nouveau_software_context_init,
.fini = _nouveau_software_context_fini,
},
};
/*******************************************************************************
* software engine/subdev functions
******************************************************************************/
static int
nv50_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv50_software_priv *priv;
int ret;
ret = nouveau_software_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_engine(priv)->cclass = &nv50_software_cclass;
nv_engine(priv)->sclass = nv50_software_sclass;
return 0;
}
struct nouveau_oclass
nv50_software_oclass = {
.handle = NV_ENGINE(SW, 0x50),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_software_ctor,
.dtor = _nouveau_software_dtor,
.init = _nouveau_software_init,
.fini = _nouveau_software_fini,
},
};

View File

@ -0,0 +1,180 @@
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
#include <engine/software.h>
#include <engine/disp.h>
struct nvc0_software_priv {
struct nouveau_software base;
};
struct nvc0_software_chan {
struct nouveau_software_chan base;
};
/*******************************************************************************
* software object classes
******************************************************************************/
static int
nvc0_software_mthd_vblsem_offset(struct nouveau_object *object, u32 mthd,
void *args, u32 size)
{
struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
u64 data = *(u32 *)args;
if (mthd == 0x0400) {
chan->base.vblank.offset &= 0x00ffffffffULL;
chan->base.vblank.offset |= data << 32;
} else {
chan->base.vblank.offset &= 0xff00000000ULL;
chan->base.vblank.offset |= data;
}
return 0;
}
static int
nvc0_software_mthd_vblsem_value(struct nouveau_object *object, u32 mthd,
void *args, u32 size)
{
struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
chan->base.vblank.value = *(u32 *)args;
return 0;
}
static int
nvc0_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
void *args, u32 size)
{
struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
struct nouveau_disp *disp = nouveau_disp(object);
unsigned long flags;
u32 crtc = *(u32 *)args;
if ((nv_device(object)->card_type < NV_E0 && crtc > 1) || crtc > 3)
return -EINVAL;
disp->vblank.get(disp->vblank.data, crtc);
spin_lock_irqsave(&disp->vblank.lock, flags);
list_add(&chan->base.vblank.head, &disp->vblank.list);
chan->base.vblank.crtc = crtc;
spin_unlock_irqrestore(&disp->vblank.lock, flags);
return 0;
}
static int
nvc0_software_mthd_flip(struct nouveau_object *object, u32 mthd,
void *args, u32 size)
{
struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
if (chan->base.flip)
return chan->base.flip(chan->base.flip_data);
return -EINVAL;
}
static struct nouveau_omthds
nvc0_software_omthds[] = {
{ 0x0400, nvc0_software_mthd_vblsem_offset },
{ 0x0404, nvc0_software_mthd_vblsem_offset },
{ 0x0408, nvc0_software_mthd_vblsem_value },
{ 0x040c, nvc0_software_mthd_vblsem_release },
{ 0x0500, nvc0_software_mthd_flip },
{}
};
static struct nouveau_oclass
nvc0_software_sclass[] = {
{ 0x906e, &nouveau_object_ofuncs, nvc0_software_omthds },
{}
};
/*******************************************************************************
* software context
******************************************************************************/
static int
nvc0_software_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nvc0_software_chan *chan;
int ret;
ret = nouveau_software_context_create(parent, engine, oclass, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
return 0;
}
static struct nouveau_oclass
nvc0_software_cclass = {
.handle = NV_ENGCTX(SW, 0xc0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_software_context_ctor,
.dtor = _nouveau_software_context_dtor,
.init = _nouveau_software_context_init,
.fini = _nouveau_software_context_fini,
},
};
/*******************************************************************************
* software engine/subdev functions
******************************************************************************/
static int
nvc0_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nvc0_software_priv *priv;
int ret;
ret = nouveau_software_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_engine(priv)->cclass = &nvc0_software_cclass;
nv_engine(priv)->sclass = nvc0_software_sclass;
return 0;
}
struct nouveau_oclass
nvc0_software_oclass = {
.handle = NV_ENGINE(SW, 0xc0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_software_ctor,
.dtor = _nouveau_software_dtor,
.init = _nouveau_software_init,
.fini = _nouveau_software_fini,
},
};

View File

@ -1,5 +1,5 @@
/*
* Copyright 2011 Red Hat Inc.
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -22,61 +22,154 @@
* Authors: Ben Skeggs
*/
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_util.h"
#include <core/ramht.h>
#include <core/os.h>
#include <core/class.h>
#include <core/engctx.h>
/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
* more than just an enable/disable stub this needs to be split out to
* nv98_vp.c...
*/
#include <engine/vp.h>
struct nv84_vp_engine {
struct nouveau_exec_engine base;
struct nv84_vp_priv {
struct nouveau_vp base;
};
static int
nv84_vp_fini(struct drm_device *dev, int engine, bool suspend)
{
if (!(nv_rd32(dev, 0x000200) & 0x00020000))
return 0;
struct nv84_vp_chan {
struct nouveau_vp_chan base;
};
nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
return 0;
}
/*******************************************************************************
* VP object classes
******************************************************************************/
static struct nouveau_oclass
nv84_vp_sclass[] = {
{},
};
/*******************************************************************************
* PVP context
******************************************************************************/
static int
nv84_vp_init(struct drm_device *dev, int engine)
nv84_vp_context_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
nv_mask(dev, 0x000200, 0x00020000, 0x00020000);
struct nv84_vp_chan *priv;
int ret;
ret = nouveau_vp_context_create(parent, engine, oclass, NULL,
0, 0, 0, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
return 0;
}
static void
nv84_vp_destroy(struct drm_device *dev, int engine)
nv84_vp_context_dtor(struct nouveau_object *object)
{
struct nv84_vp_engine *pvp = nv_engine(dev, engine);
NVOBJ_ENGINE_DEL(dev, VP);
kfree(pvp);
struct nv84_vp_chan *priv = (void *)object;
nouveau_vp_context_destroy(&priv->base);
}
int
nv84_vp_create(struct drm_device *dev)
static int
nv84_vp_context_init(struct nouveau_object *object)
{
struct nv84_vp_engine *pvp;
struct nv84_vp_chan *priv = (void *)object;
int ret;
pvp = kzalloc(sizeof(*pvp), GFP_KERNEL);
if (!pvp)
return -ENOMEM;
ret = nouveau_vp_context_init(&priv->base);
if (ret)
return ret;
pvp->base.destroy = nv84_vp_destroy;
pvp->base.init = nv84_vp_init;
pvp->base.fini = nv84_vp_fini;
NVOBJ_ENGINE_ADD(dev, VP, &pvp->base);
return 0;
}
static int
nv84_vp_context_fini(struct nouveau_object *object, bool suspend)
{
struct nv84_vp_chan *priv = (void *)object;
return nouveau_vp_context_fini(&priv->base, suspend);
}
static struct nouveau_oclass
nv84_vp_cclass = {
.handle = NV_ENGCTX(VP, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv84_vp_context_ctor,
.dtor = nv84_vp_context_dtor,
.init = nv84_vp_context_init,
.fini = nv84_vp_context_fini,
.rd32 = _nouveau_vp_context_rd32,
.wr32 = _nouveau_vp_context_wr32,
},
};
/*******************************************************************************
* PVP engine/subdev functions
******************************************************************************/
static void
nv84_vp_intr(struct nouveau_subdev *subdev)
{
}
static int
nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv84_vp_priv *priv;
int ret;
ret = nouveau_vp_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x01020000;
nv_subdev(priv)->intr = nv84_vp_intr;
nv_engine(priv)->cclass = &nv84_vp_cclass;
nv_engine(priv)->sclass = nv84_vp_sclass;
return 0;
}
static void
nv84_vp_dtor(struct nouveau_object *object)
{
struct nv84_vp_priv *priv = (void *)object;
nouveau_vp_destroy(&priv->base);
}
static int
nv84_vp_init(struct nouveau_object *object)
{
struct nv84_vp_priv *priv = (void *)object;
int ret;
ret = nouveau_vp_init(&priv->base);
if (ret)
return ret;
return 0;
}
static int
nv84_vp_fini(struct nouveau_object *object, bool suspend)
{
struct nv84_vp_priv *priv = (void *)object;
return nouveau_vp_fini(&priv->base, suspend);
}
struct nouveau_oclass
nv84_vp_oclass = {
.handle = NV_ENGINE(VP, 0x84),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv84_vp_ctor,
.dtor = nv84_vp_dtor,
.init = nv84_vp_init,
.fini = nv84_vp_fini,
},
};

View File

@ -1,55 +1,23 @@
/*
* Copyright 2010 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#ifndef __NOUVEAU_RAMHT_H__
#define __NOUVEAU_RAMHT_H__
struct nouveau_ramht_entry {
struct list_head head;
struct nouveau_channel *channel;
struct nouveau_gpuobj *gpuobj;
u32 handle;
};
#include <core/gpuobj.h>
struct nouveau_ramht {
struct drm_device *dev;
struct kref refcount;
spinlock_t lock;
struct nouveau_gpuobj *gpuobj;
struct list_head entries;
struct nouveau_gpuobj base;
int bits;
};
extern int nouveau_ramht_new(struct drm_device *, struct nouveau_gpuobj *,
struct nouveau_ramht **);
extern void nouveau_ramht_ref(struct nouveau_ramht *, struct nouveau_ramht **,
struct nouveau_channel *unref_channel);
int nouveau_ramht_insert(struct nouveau_ramht *, int chid,
u32 handle, u32 context);
void nouveau_ramht_remove(struct nouveau_ramht *, int cookie);
int nouveau_ramht_new(struct nouveau_object *, struct nouveau_object *,
u32 size, u32 align, struct nouveau_ramht **);
extern int nouveau_ramht_insert(struct nouveau_channel *, u32 handle,
struct nouveau_gpuobj *);
extern int nouveau_ramht_remove(struct nouveau_channel *, u32 handle);
extern struct nouveau_gpuobj *
nouveau_ramht_find(struct nouveau_channel *chan, u32 handle);
static inline void
nouveau_ramht_ref(struct nouveau_ramht *obj, struct nouveau_ramht **ref)
{
nouveau_gpuobj_ref(&obj->base, (struct nouveau_gpuobj **)ref);
}
#endif

View File

@ -0,0 +1,45 @@
#ifndef __NOUVEAU_BSP_H__
#define __NOUVEAU_BSP_H__
#include <core/engine.h>
#include <core/engctx.h>
struct nouveau_bsp_chan {
struct nouveau_engctx base;
};
#define nouveau_bsp_context_create(p,e,c,g,s,a,f,d) \
nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
#define nouveau_bsp_context_destroy(d) \
nouveau_engctx_destroy(&(d)->base)
#define nouveau_bsp_context_init(d) \
nouveau_engctx_init(&(d)->base)
#define nouveau_bsp_context_fini(d,s) \
nouveau_engctx_fini(&(d)->base, (s))
#define _nouveau_bsp_context_dtor _nouveau_engctx_dtor
#define _nouveau_bsp_context_init _nouveau_engctx_init
#define _nouveau_bsp_context_fini _nouveau_engctx_fini
#define _nouveau_bsp_context_rd32 _nouveau_engctx_rd32
#define _nouveau_bsp_context_wr32 _nouveau_engctx_wr32
struct nouveau_bsp {
struct nouveau_engine base;
};
#define nouveau_bsp_create(p,e,c,d) \
nouveau_engine_create((p), (e), (c), true, "PBSP", "bsp", (d))
#define nouveau_bsp_destroy(d) \
nouveau_engine_destroy(&(d)->base)
#define nouveau_bsp_init(d) \
nouveau_engine_init(&(d)->base)
#define nouveau_bsp_fini(d,s) \
nouveau_engine_fini(&(d)->base, (s))
#define _nouveau_bsp_dtor _nouveau_engine_dtor
#define _nouveau_bsp_init _nouveau_engine_init
#define _nouveau_bsp_fini _nouveau_engine_fini
extern struct nouveau_oclass nv84_bsp_oclass;
#endif

View File

@ -0,0 +1,47 @@
#ifndef __NOUVEAU_COPY_H__
#define __NOUVEAU_COPY_H__
#include <core/engine.h>
#include <core/engctx.h>
struct nouveau_copy_chan {
struct nouveau_engctx base;
};
#define nouveau_copy_context_create(p,e,c,g,s,a,f,d) \
nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
#define nouveau_copy_context_destroy(d) \
nouveau_engctx_destroy(&(d)->base)
#define nouveau_copy_context_init(d) \
nouveau_engctx_init(&(d)->base)
#define nouveau_copy_context_fini(d,s) \
nouveau_engctx_fini(&(d)->base, (s))
#define _nouveau_copy_context_dtor _nouveau_engctx_dtor
#define _nouveau_copy_context_init _nouveau_engctx_init
#define _nouveau_copy_context_fini _nouveau_engctx_fini
#define _nouveau_copy_context_rd32 _nouveau_engctx_rd32
#define _nouveau_copy_context_wr32 _nouveau_engctx_wr32
struct nouveau_copy {
struct nouveau_engine base;
};
#define nouveau_copy_create(p,e,c,y,i,d) \
nouveau_engine_create((p), (e), (c), (y), "PCE"#i, "copy"#i, (d))
#define nouveau_copy_destroy(d) \
nouveau_engine_destroy(&(d)->base)
#define nouveau_copy_init(d) \
nouveau_engine_init(&(d)->base)
#define nouveau_copy_fini(d,s) \
nouveau_engine_fini(&(d)->base, (s))
#define _nouveau_copy_dtor _nouveau_engine_dtor
#define _nouveau_copy_init _nouveau_engine_init
#define _nouveau_copy_fini _nouveau_engine_fini
extern struct nouveau_oclass nva3_copy_oclass;
extern struct nouveau_oclass nvc0_copy0_oclass;
extern struct nouveau_oclass nvc0_copy1_oclass;
#endif

View File

@ -0,0 +1,46 @@
#ifndef __NOUVEAU_CRYPT_H__
#define __NOUVEAU_CRYPT_H__
#include <core/engine.h>
#include <core/engctx.h>
struct nouveau_crypt_chan {
struct nouveau_engctx base;
};
#define nouveau_crypt_context_create(p,e,c,g,s,a,f,d) \
nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
#define nouveau_crypt_context_destroy(d) \
nouveau_engctx_destroy(&(d)->base)
#define nouveau_crypt_context_init(d) \
nouveau_engctx_init(&(d)->base)
#define nouveau_crypt_context_fini(d,s) \
nouveau_engctx_fini(&(d)->base, (s))
#define _nouveau_crypt_context_dtor _nouveau_engctx_dtor
#define _nouveau_crypt_context_init _nouveau_engctx_init
#define _nouveau_crypt_context_fini _nouveau_engctx_fini
#define _nouveau_crypt_context_rd32 _nouveau_engctx_rd32
#define _nouveau_crypt_context_wr32 _nouveau_engctx_wr32
struct nouveau_crypt {
struct nouveau_engine base;
};
#define nouveau_crypt_create(p,e,c,d) \
nouveau_engine_create((p), (e), (c), true, "PCRYPT", "crypt", (d))
#define nouveau_crypt_destroy(d) \
nouveau_engine_destroy(&(d)->base)
#define nouveau_crypt_init(d) \
nouveau_engine_init(&(d)->base)
#define nouveau_crypt_fini(d,s) \
nouveau_engine_fini(&(d)->base, (s))
#define _nouveau_crypt_dtor _nouveau_engine_dtor
#define _nouveau_crypt_init _nouveau_engine_init
#define _nouveau_crypt_fini _nouveau_engine_fini
extern struct nouveau_oclass nv84_crypt_oclass;
extern struct nouveau_oclass nv98_crypt_oclass;
#endif

View File

@ -0,0 +1,44 @@
#ifndef __NOUVEAU_DISP_H__
#define __NOUVEAU_DISP_H__
#include <core/object.h>
#include <core/engine.h>
#include <core/device.h>
struct nouveau_disp {
struct nouveau_engine base;
struct {
struct list_head list;
spinlock_t lock;
void (*notify)(void *, int);
void (*get)(void *, int);
void (*put)(void *, int);
void *data;
} vblank;
};
static inline struct nouveau_disp *
nouveau_disp(void *obj)
{
return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_DISP];
}
#define nouveau_disp_create(p,e,c,i,x,d) \
nouveau_engine_create((p), (e), (c), true, (i), (x), (d))
#define nouveau_disp_destroy(d) \
nouveau_engine_destroy(&(d)->base)
#define nouveau_disp_init(d) \
nouveau_engine_init(&(d)->base)
#define nouveau_disp_fini(d,s) \
nouveau_engine_fini(&(d)->base, (s))
#define _nouveau_disp_dtor _nouveau_engine_dtor
#define _nouveau_disp_init _nouveau_engine_init
#define _nouveau_disp_fini _nouveau_engine_fini
extern struct nouveau_oclass nv04_disp_oclass;
extern struct nouveau_oclass nv50_disp_oclass;
extern struct nouveau_oclass nvd0_disp_oclass;
#endif

View File

@ -0,0 +1,57 @@
#ifndef __NOUVEAU_DMAOBJ_H__
#define __NOUVEAU_DMAOBJ_H__
#include <core/object.h>
#include <core/engine.h>
struct nouveau_gpuobj;
struct nouveau_dmaobj {
struct nouveau_object base;
u32 target;
u32 access;
u64 start;
u64 limit;
};
#define nouveau_dmaobj_create(p,e,c,a,s,d) \
nouveau_dmaobj_create_((p), (e), (c), (a), (s), sizeof(**d), (void **)d)
#define nouveau_dmaobj_destroy(p) \
nouveau_object_destroy(&(p)->base)
#define nouveau_dmaobj_init(p) \
nouveau_object_init(&(p)->base)
#define nouveau_dmaobj_fini(p,s) \
nouveau_object_fini(&(p)->base, (s))
int nouveau_dmaobj_create_(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, void *data, u32 size,
int length, void **);
#define _nouveau_dmaobj_dtor nouveau_object_destroy
#define _nouveau_dmaobj_init nouveau_object_init
#define _nouveau_dmaobj_fini nouveau_object_fini
struct nouveau_dmaeng {
struct nouveau_engine base;
int (*bind)(struct nouveau_dmaeng *, struct nouveau_object *parent,
struct nouveau_dmaobj *, struct nouveau_gpuobj **);
};
#define nouveau_dmaeng_create(p,e,c,d) \
nouveau_engine_create((p), (e), (c), true, "DMAOBJ", "dmaobj", (d))
#define nouveau_dmaeng_destroy(p) \
nouveau_engine_destroy(&(p)->base)
#define nouveau_dmaeng_init(p) \
nouveau_engine_init(&(p)->base)
#define nouveau_dmaeng_fini(p,s) \
nouveau_engine_fini(&(p)->base, (s))
#define _nouveau_dmaeng_dtor _nouveau_engine_dtor
#define _nouveau_dmaeng_init _nouveau_engine_init
#define _nouveau_dmaeng_fini _nouveau_engine_fini
extern struct nouveau_oclass nv04_dmaeng_oclass;
extern struct nouveau_oclass nv50_dmaeng_oclass;
extern struct nouveau_oclass nvc0_dmaeng_oclass;
#endif

View File

@ -1,32 +1,109 @@
#ifndef __NOUVEAU_FIFO_H__
#define __NOUVEAU_FIFO_H__
struct nouveau_fifo_priv {
struct nouveau_exec_engine base;
u32 channels;
};
#include <core/namedb.h>
#include <core/gpuobj.h>
#include <core/engine.h>
struct nouveau_fifo_chan {
struct nouveau_namedb base;
struct nouveau_dmaobj *pushdma;
struct nouveau_gpuobj *pushgpu;
void __iomem *user;
u32 size;
u16 chid;
atomic_t refcnt; /* NV04_NVSW_SET_REF */
};
bool nv04_fifo_cache_pull(struct drm_device *, bool);
void nv04_fifo_context_del(struct nouveau_channel *, int);
int nv04_fifo_fini(struct drm_device *, int, bool);
int nv04_fifo_init(struct drm_device *, int);
void nv04_fifo_isr(struct drm_device *);
void nv04_fifo_destroy(struct drm_device *, int);
static inline struct nouveau_fifo_chan *
nouveau_fifo_chan(void *obj)
{
return (void *)nv_namedb(obj);
}
void nv50_fifo_playlist_update(struct drm_device *);
void nv50_fifo_destroy(struct drm_device *, int);
void nv50_fifo_tlb_flush(struct drm_device *, int);
#define nouveau_fifo_channel_create(p,e,c,b,a,s,n,m,d) \
nouveau_fifo_channel_create_((p), (e), (c), (b), (a), (s), (n), \
(m), sizeof(**d), (void **)d)
#define nouveau_fifo_channel_init(p) \
nouveau_namedb_init(&(p)->base)
#define nouveau_fifo_channel_fini(p,s) \
nouveau_namedb_fini(&(p)->base, (s))
int nv04_fifo_create(struct drm_device *);
int nv10_fifo_create(struct drm_device *);
int nv17_fifo_create(struct drm_device *);
int nv40_fifo_create(struct drm_device *);
int nv50_fifo_create(struct drm_device *);
int nv84_fifo_create(struct drm_device *);
int nvc0_fifo_create(struct drm_device *);
int nve0_fifo_create(struct drm_device *);
int nouveau_fifo_channel_create_(struct nouveau_object *,
struct nouveau_object *,
struct nouveau_oclass *,
int bar, u32 addr, u32 size, u32 push,
u32 engmask, int len, void **);
void nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *);
#define _nouveau_fifo_channel_init _nouveau_namedb_init
#define _nouveau_fifo_channel_fini _nouveau_namedb_fini
void _nouveau_fifo_channel_dtor(struct nouveau_object *);
u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u32);
void _nouveau_fifo_channel_wr32(struct nouveau_object *, u32, u32);
struct nouveau_fifo_base {
struct nouveau_gpuobj base;
};
#define nouveau_fifo_context_create(p,e,c,g,s,a,f,d) \
nouveau_gpuobj_create((p), (e), (c), 0, (g), (s), (a), (f), (d))
#define nouveau_fifo_context_destroy(p) \
nouveau_gpuobj_destroy(&(p)->base)
#define nouveau_fifo_context_init(p) \
nouveau_gpuobj_init(&(p)->base)
#define nouveau_fifo_context_fini(p,s) \
nouveau_gpuobj_fini(&(p)->base, (s))
#define _nouveau_fifo_context_dtor _nouveau_gpuobj_dtor
#define _nouveau_fifo_context_init _nouveau_gpuobj_init
#define _nouveau_fifo_context_fini _nouveau_gpuobj_fini
#define _nouveau_fifo_context_rd32 _nouveau_gpuobj_rd32
#define _nouveau_fifo_context_wr32 _nouveau_gpuobj_wr32
struct nouveau_fifo {
struct nouveau_engine base;
struct nouveau_object **channel;
spinlock_t lock;
u16 min;
u16 max;
void (*pause)(struct nouveau_fifo *, unsigned long *);
void (*start)(struct nouveau_fifo *, unsigned long *);
};
static inline struct nouveau_fifo *
nouveau_fifo(void *obj)
{
return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_FIFO];
}
#define nouveau_fifo_create(o,e,c,fc,lc,d) \
nouveau_fifo_create_((o), (e), (c), (fc), (lc), sizeof(**d), (void **)d)
#define nouveau_fifo_init(p) \
nouveau_engine_init(&(p)->base)
#define nouveau_fifo_fini(p,s) \
nouveau_engine_fini(&(p)->base, (s))
int nouveau_fifo_create_(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, int min, int max,
int size, void **);
void nouveau_fifo_destroy(struct nouveau_fifo *);
#define _nouveau_fifo_init _nouveau_engine_init
#define _nouveau_fifo_fini _nouveau_engine_fini
extern struct nouveau_oclass nv04_fifo_oclass;
extern struct nouveau_oclass nv10_fifo_oclass;
extern struct nouveau_oclass nv17_fifo_oclass;
extern struct nouveau_oclass nv40_fifo_oclass;
extern struct nouveau_oclass nv50_fifo_oclass;
extern struct nouveau_oclass nv84_fifo_oclass;
extern struct nouveau_oclass nvc0_fifo_oclass;
extern struct nouveau_oclass nve0_fifo_oclass;
void nv04_fifo_intr(struct nouveau_subdev *);
#endif

View File

@ -0,0 +1,72 @@
#ifndef __NOUVEAU_GRAPH_H__
#define __NOUVEAU_GRAPH_H__
#include <core/engine.h>
#include <core/engctx.h>
#include <core/enum.h>
struct nouveau_graph_chan {
struct nouveau_engctx base;
};
#define nouveau_graph_context_create(p,e,c,g,s,a,f,d) \
nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
#define nouveau_graph_context_destroy(d) \
nouveau_engctx_destroy(&(d)->base)
#define nouveau_graph_context_init(d) \
nouveau_engctx_init(&(d)->base)
#define nouveau_graph_context_fini(d,s) \
nouveau_engctx_fini(&(d)->base, (s))
#define _nouveau_graph_context_dtor _nouveau_engctx_dtor
#define _nouveau_graph_context_init _nouveau_engctx_init
#define _nouveau_graph_context_fini _nouveau_engctx_fini
#define _nouveau_graph_context_rd32 _nouveau_engctx_rd32
#define _nouveau_graph_context_wr32 _nouveau_engctx_wr32
struct nouveau_graph {
struct nouveau_engine base;
};
static inline struct nouveau_graph *
nouveau_graph(void *obj)
{
return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_GR];
}
#define nouveau_graph_create(p,e,c,y,d) \
nouveau_engine_create((p), (e), (c), (y), "PGRAPH", "graphics", (d))
#define nouveau_graph_destroy(d) \
nouveau_engine_destroy(&(d)->base)
#define nouveau_graph_init(d) \
nouveau_engine_init(&(d)->base)
#define nouveau_graph_fini(d,s) \
nouveau_engine_fini(&(d)->base, (s))
#define _nouveau_graph_dtor _nouveau_engine_dtor
#define _nouveau_graph_init _nouveau_engine_init
#define _nouveau_graph_fini _nouveau_engine_fini
extern struct nouveau_oclass nv04_graph_oclass;
extern struct nouveau_oclass nv10_graph_oclass;
extern struct nouveau_oclass nv20_graph_oclass;
extern struct nouveau_oclass nv25_graph_oclass;
extern struct nouveau_oclass nv2a_graph_oclass;
extern struct nouveau_oclass nv30_graph_oclass;
extern struct nouveau_oclass nv34_graph_oclass;
extern struct nouveau_oclass nv35_graph_oclass;
extern struct nouveau_oclass nv40_graph_oclass;
extern struct nouveau_oclass nv50_graph_oclass;
extern struct nouveau_oclass nvc0_graph_oclass;
extern struct nouveau_oclass nve0_graph_oclass;
extern struct nouveau_bitfield nv04_graph_nsource[];
extern struct nouveau_ofuncs nv04_graph_ofuncs;
bool nv04_graph_idle(void *obj);
extern struct nouveau_bitfield nv10_graph_intr_name[];
extern struct nouveau_bitfield nv10_graph_nstatus[];
extern struct nouveau_enum nv50_data_error_names[];
#endif

View File

@ -0,0 +1,61 @@
#ifndef __NOUVEAU_MPEG_H__
#define __NOUVEAU_MPEG_H__
#include <core/engine.h>
#include <core/engctx.h>
struct nouveau_mpeg_chan {
struct nouveau_engctx base;
};
#define nouveau_mpeg_context_create(p,e,c,g,s,a,f,d) \
nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
#define nouveau_mpeg_context_destroy(d) \
nouveau_engctx_destroy(&(d)->base)
#define nouveau_mpeg_context_init(d) \
nouveau_engctx_init(&(d)->base)
#define nouveau_mpeg_context_fini(d,s) \
nouveau_engctx_fini(&(d)->base, (s))
#define _nouveau_mpeg_context_dtor _nouveau_engctx_dtor
#define _nouveau_mpeg_context_init _nouveau_engctx_init
#define _nouveau_mpeg_context_fini _nouveau_engctx_fini
#define _nouveau_mpeg_context_rd32 _nouveau_engctx_rd32
#define _nouveau_mpeg_context_wr32 _nouveau_engctx_wr32
struct nouveau_mpeg {
struct nouveau_engine base;
};
#define nouveau_mpeg_create(p,e,c,d) \
nouveau_engine_create((p), (e), (c), true, "PMPEG", "mpeg", (d))
#define nouveau_mpeg_destroy(d) \
nouveau_engine_destroy(&(d)->base)
#define nouveau_mpeg_init(d) \
nouveau_engine_init(&(d)->base)
#define nouveau_mpeg_fini(d,s) \
nouveau_engine_fini(&(d)->base, (s))
#define _nouveau_mpeg_dtor _nouveau_engine_dtor
#define _nouveau_mpeg_init _nouveau_engine_init
#define _nouveau_mpeg_fini _nouveau_engine_fini
extern struct nouveau_oclass nv31_mpeg_oclass;
extern struct nouveau_oclass nv40_mpeg_oclass;
extern struct nouveau_oclass nv50_mpeg_oclass;
extern struct nouveau_oclass nv84_mpeg_oclass;
extern struct nouveau_oclass nv31_mpeg_sclass[];
void nv31_mpeg_intr(struct nouveau_subdev *);
void nv31_mpeg_tile_prog(struct nouveau_engine *, int);
int nv31_mpeg_init(struct nouveau_object *);
extern struct nouveau_ofuncs nv50_mpeg_ofuncs;
int nv50_mpeg_context_ctor(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, void *, u32,
struct nouveau_object **);
int nv50_mpeg_tlb_flush(struct nouveau_engine *);
void nv50_mpeg_intr(struct nouveau_subdev *);
int nv50_mpeg_init(struct nouveau_object *);
#endif

View File

@ -0,0 +1,45 @@
#ifndef __NOUVEAU_PPP_H__
#define __NOUVEAU_PPP_H__
#include <core/engine.h>
#include <core/engctx.h>
struct nouveau_ppp_chan {
struct nouveau_engctx base;
};
#define nouveau_ppp_context_create(p,e,c,g,s,a,f,d) \
nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
#define nouveau_ppp_context_destroy(d) \
nouveau_engctx_destroy(&(d)->base)
#define nouveau_ppp_context_init(d) \
nouveau_engctx_init(&(d)->base)
#define nouveau_ppp_context_fini(d,s) \
nouveau_engctx_fini(&(d)->base, (s))
#define _nouveau_ppp_context_dtor _nouveau_engctx_dtor
#define _nouveau_ppp_context_init _nouveau_engctx_init
#define _nouveau_ppp_context_fini _nouveau_engctx_fini
#define _nouveau_ppp_context_rd32 _nouveau_engctx_rd32
#define _nouveau_ppp_context_wr32 _nouveau_engctx_wr32
struct nouveau_ppp {
struct nouveau_engine base;
};
#define nouveau_ppp_create(p,e,c,d) \
nouveau_engine_create((p), (e), (c), true, "PPPP", "ppp", (d))
#define nouveau_ppp_destroy(d) \
nouveau_engine_destroy(&(d)->base)
#define nouveau_ppp_init(d) \
nouveau_engine_init(&(d)->base)
#define nouveau_ppp_fini(d,s) \
nouveau_engine_fini(&(d)->base, (s))
#define _nouveau_ppp_dtor _nouveau_engine_dtor
#define _nouveau_ppp_init _nouveau_engine_init
#define _nouveau_ppp_fini _nouveau_engine_fini
extern struct nouveau_oclass nv98_ppp_oclass;
#endif

View File

@ -0,0 +1,58 @@
#ifndef __NOUVEAU_SOFTWARE_H__
#define __NOUVEAU_SOFTWARE_H__
#include <core/engine.h>
#include <core/engctx.h>
struct nouveau_software_chan {
struct nouveau_engctx base;
struct {
struct list_head head;
u32 channel;
u32 ctxdma;
u64 offset;
u32 value;
u32 crtc;
} vblank;
int (*flip)(void *);
void *flip_data;
};
#define nouveau_software_context_create(p,e,c,d) \
nouveau_engctx_create((p), (e), (c), (p), 0, 0, 0, (d))
#define nouveau_software_context_destroy(d) \
nouveau_engctx_destroy(&(d)->base)
#define nouveau_software_context_init(d) \
nouveau_engctx_init(&(d)->base)
#define nouveau_software_context_fini(d,s) \
nouveau_engctx_fini(&(d)->base, (s))
#define _nouveau_software_context_dtor _nouveau_engctx_dtor
#define _nouveau_software_context_init _nouveau_engctx_init
#define _nouveau_software_context_fini _nouveau_engctx_fini
struct nouveau_software {
struct nouveau_engine base;
};
#define nouveau_software_create(p,e,c,d) \
nouveau_engine_create((p), (e), (c), true, "SW", "software", (d))
#define nouveau_software_destroy(d) \
nouveau_engine_destroy(&(d)->base)
#define nouveau_software_init(d) \
nouveau_engine_init(&(d)->base)
#define nouveau_software_fini(d,s) \
nouveau_engine_fini(&(d)->base, (s))
#define _nouveau_software_dtor _nouveau_engine_dtor
#define _nouveau_software_init _nouveau_engine_init
#define _nouveau_software_fini _nouveau_engine_fini
extern struct nouveau_oclass nv04_software_oclass;
extern struct nouveau_oclass nv10_software_oclass;
extern struct nouveau_oclass nv50_software_oclass;
extern struct nouveau_oclass nvc0_software_oclass;
#endif

View File

@ -0,0 +1,45 @@
#ifndef __NOUVEAU_VP_H__
#define __NOUVEAU_VP_H__
#include <core/engine.h>
#include <core/engctx.h>
struct nouveau_vp_chan {
struct nouveau_engctx base;
};
#define nouveau_vp_context_create(p,e,c,g,s,a,f,d) \
nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
#define nouveau_vp_context_destroy(d) \
nouveau_engctx_destroy(&(d)->base)
#define nouveau_vp_context_init(d) \
nouveau_engctx_init(&(d)->base)
#define nouveau_vp_context_fini(d,s) \
nouveau_engctx_fini(&(d)->base, (s))
#define _nouveau_vp_context_dtor _nouveau_engctx_dtor
#define _nouveau_vp_context_init _nouveau_engctx_init
#define _nouveau_vp_context_fini _nouveau_engctx_fini
#define _nouveau_vp_context_rd32 _nouveau_engctx_rd32
#define _nouveau_vp_context_wr32 _nouveau_engctx_wr32
struct nouveau_vp {
struct nouveau_engine base;
};
#define nouveau_vp_create(p,e,c,d) \
nouveau_engine_create((p), (e), (c), true, "PVP", "vp", (d))
#define nouveau_vp_destroy(d) \
nouveau_engine_destroy(&(d)->base)
#define nouveau_vp_init(d) \
nouveau_engine_init(&(d)->base)
#define nouveau_vp_fini(d,s) \
nouveau_engine_fini(&(d)->base, (s))
#define _nouveau_vp_dtor _nouveau_engine_dtor
#define _nouveau_vp_init _nouveau_engine_init
#define _nouveau_vp_fini _nouveau_engine_fini
extern struct nouveau_oclass nv84_vp_oclass;
#endif

View File

@ -8,7 +8,6 @@
int nouveau_device_create_(struct pci_dev *, u64 name, const char *sname,
const char *cfg, const char *dbg, int, void **);
void nouveau_device_destroy(struct nouveau_device **);
int nv04_identify(struct nouveau_device *);
int nv10_identify(struct nouveau_device *);

View File

@ -8,7 +8,6 @@
struct nouveau_instobj {
struct nouveau_object base;
struct list_head head;
struct nouveau_mm heap;
u32 *suspend;
u64 addr;
u32 size;

View File

@ -73,6 +73,7 @@ struct nouveau_vm {
struct nouveau_vmmgr {
struct nouveau_subdev base;
u64 limit;
u32 pgt_bits;
u8 spg_shift;
u8 lpg_shift;

View File

@ -241,6 +241,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
if (!device->subdev[i]) {
ret = nouveau_object_ctor(nv_object(device), NULL,
oclass, NULL, i, &subdev);
if (ret == -ENODEV)
continue;
if (ret)
return ret;
@ -404,10 +406,26 @@ nouveau_device_sclass[] = {
{}
};
static void
nouveau_device_dtor(struct nouveau_object *object)
{
struct nouveau_device *device = (void *)object;
mutex_lock(&nv_devices_mutex);
list_del(&device->head);
mutex_unlock(&nv_devices_mutex);
if (device->base.mmio)
iounmap(device->base.mmio);
nouveau_subdev_destroy(&device->base);
}
static struct nouveau_oclass
nouveau_device_oclass = {
.handle = NV_SUBDEV(DEVICE, 0x00),
.ofuncs = &(struct nouveau_ofuncs) {
.dtor = nouveau_device_dtor,
},
};
@ -444,18 +462,3 @@ done:
mutex_unlock(&nv_devices_mutex);
return ret;
}
void
nouveau_device_destroy(struct nouveau_device **pdevice)
{
struct nouveau_device *device = *pdevice;
if (device) {
mutex_lock(&nv_devices_mutex);
list_del(&device->head);
mutex_unlock(&nv_devices_mutex);
if (device->base.mmio)
iounmap(device->base.mmio);
nouveau_subdev_destroy(&device->base);
}
*pdevice = NULL;
}

View File

@ -33,6 +33,12 @@
#include <subdev/instmem.h>
#include <subdev/vm.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
#include <engine/software.h>
#include <engine/graph.h>
#include <engine/disp.h>
int
nv04_identify(struct nouveau_device *device)
{
@ -47,6 +53,11 @@ nv04_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv04_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv04_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x05:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -58,6 +69,11 @@ nv04_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv04_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv04_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
default:
nv_fatal(device, "unknown RIVA chipset\n");

View File

@ -34,6 +34,12 @@
#include <subdev/instmem.h>
#include <subdev/vm.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
#include <engine/software.h>
#include <engine/graph.h>
#include <engine/disp.h>
int
nv10_identify(struct nouveau_device *device)
{
@ -49,6 +55,9 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x15:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -61,6 +70,11 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x16:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -73,6 +87,11 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x1a:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -85,6 +104,11 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x11:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -97,6 +121,11 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x17:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -109,6 +138,11 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x1f:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -121,6 +155,11 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x18:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -133,6 +172,11 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
default:
nv_fatal(device, "unknown Celsius chipset\n");

View File

@ -34,6 +34,12 @@
#include <subdev/instmem.h>
#include <subdev/vm.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
#include <engine/software.h>
#include <engine/graph.h>
#include <engine/disp.h>
int
nv20_identify(struct nouveau_device *device)
{
@ -49,6 +55,11 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv20_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x25:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -61,6 +72,11 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x28:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -73,6 +89,11 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x2a:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -85,6 +106,11 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv2a_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
default:
nv_fatal(device, "unknown Kelvin chipset\n");

View File

@ -34,6 +34,13 @@
#include <subdev/instmem.h>
#include <subdev/vm.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
#include <engine/software.h>
#include <engine/graph.h>
#include <engine/mpeg.h>
#include <engine/disp.h>
int
nv30_identify(struct nouveau_device *device)
{
@ -49,6 +56,11 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x35:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -61,6 +73,11 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x31:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -73,6 +90,12 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x36:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -85,6 +108,12 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x34:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -97,6 +126,12 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv34_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
default:
nv_fatal(device, "unknown Rankine chipset\n");

View File

@ -34,6 +34,13 @@
#include <subdev/instmem.h>
#include <subdev/vm.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
#include <engine/software.h>
#include <engine/graph.h>
#include <engine/mpeg.h>
#include <engine/disp.h>
int
nv40_identify(struct nouveau_device *device)
{
@ -49,6 +56,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x41:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -61,6 +74,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x42:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -73,6 +92,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x43:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -85,6 +110,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x45:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -97,6 +128,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x47:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -109,6 +146,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x49:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -121,6 +164,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x4b:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -133,6 +182,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x44:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -145,6 +200,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x46:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -157,6 +218,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x4a:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -169,6 +236,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x4c:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -181,6 +254,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x4e:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -193,6 +272,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x63:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -205,6 +290,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x67:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -217,6 +308,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x68:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -229,6 +326,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
default:
nv_fatal(device, "unknown Curie chipset\n");

View File

@ -35,6 +35,18 @@
#include <subdev/vm.h>
#include <subdev/bar.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
#include <engine/software.h>
#include <engine/graph.h>
#include <engine/mpeg.h>
#include <engine/vp.h>
#include <engine/crypt.h>
#include <engine/bsp.h>
#include <engine/ppp.h>
#include <engine/copy.h>
#include <engine/disp.h>
int
nv50_identify(struct nouveau_device *device)
{
@ -51,6 +63,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv50_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv50_mpeg_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0x84:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -64,6 +82,15 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0x86:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -77,6 +104,15 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0x92:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -90,6 +126,15 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0x94:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -103,6 +148,15 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0x96:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -116,6 +170,15 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0x98:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -129,6 +192,15 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0xa0:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -142,6 +214,15 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0xaa:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -155,6 +236,15 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0xac:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -168,6 +258,15 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0xa3:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -181,6 +280,16 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0xa5:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -194,6 +303,15 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0xa8:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -207,6 +325,15 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0xaf:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -220,6 +347,15 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
default:
nv_fatal(device, "unknown Tesla chipset\n");

View File

@ -36,6 +36,16 @@
#include <subdev/vm.h>
#include <subdev/bar.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
#include <engine/software.h>
#include <engine/graph.h>
#include <engine/vp.h>
#include <engine/bsp.h>
#include <engine/ppp.h>
#include <engine/copy.h>
#include <engine/disp.h>
int
nvc0_identify(struct nouveau_device *device)
{
@ -53,6 +63,16 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0xc4:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -67,6 +87,16 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0xc3:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -81,6 +111,16 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0xce:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -95,6 +135,16 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0xcf:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -109,6 +159,16 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0xc1:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -123,6 +183,16 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0xc8:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -137,6 +207,16 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0xd9:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -151,6 +231,15 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
break;
default:
nv_fatal(device, "unknown Fermi chipset\n");

View File

@ -36,6 +36,12 @@
#include <subdev/vm.h>
#include <subdev/bar.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
#include <engine/software.h>
#include <engine/graph.h>
#include <engine/disp.h>
int
nve0_identify(struct nouveau_device *device)
{
@ -53,6 +59,11 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
break;
case 0xe7:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@ -67,6 +78,11 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
break;
default:
nv_fatal(device, "unknown Kepler chipset\n");

View File

@ -139,8 +139,7 @@ nv04_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
/* 0x10000-0x18000: reserve for RAMHT */
ret = nouveau_gpuobj_new(parent, NULL, 0x08000, 0,
NVOBJ_FLAG_ZERO_ALLOC, &priv->ramht);
ret = nouveau_ramht_new(parent, NULL, 0x08000, 0, &priv->ramht);
if (ret)
return ret;
@ -165,7 +164,7 @@ nv04_instmem_dtor(struct nouveau_object *object)
struct nv04_instmem_priv *priv = (void *)object;
nouveau_gpuobj_ref(NULL, &priv->ramfc);
nouveau_gpuobj_ref(NULL, &priv->ramro);
nouveau_gpuobj_ref(NULL, &priv->ramht);
nouveau_ramht_ref(NULL, &priv->ramht);
nouveau_gpuobj_ref(NULL, &priv->vbios);
nouveau_mm_fini(&priv->heap);
if (priv->iomem)

View File

@ -2,6 +2,7 @@
#define __NV04_INSTMEM_H__
#include <core/gpuobj.h>
#include <core/ramht.h>
#include <core/mm.h>
#include <subdev/instmem.h>
@ -14,11 +15,17 @@ struct nv04_instmem_priv {
struct nouveau_mm heap;
struct nouveau_gpuobj *vbios;
struct nouveau_gpuobj *ramht;
struct nouveau_ramht *ramht;
struct nouveau_gpuobj *ramro;
struct nouveau_gpuobj *ramfc;
};
static inline struct nv04_instmem_priv *
nv04_instmem(void *obj)
{
return (void *)nouveau_instmem(obj);
}
struct nv04_instobj_priv {
struct nouveau_instobj base;
struct nouveau_mm_node *mem;

View File

@ -87,8 +87,7 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
/* 0x10000-0x18000: reserve for RAMHT */
ret = nouveau_gpuobj_new(parent, NULL, 0x08000, 0,
NVOBJ_FLAG_ZERO_ALLOC, &priv->ramht);
ret = nouveau_ramht_new(parent, NULL, 0x08000, 0, &priv->ramht);
if (ret)
return ret;

View File

@ -299,6 +299,7 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
fpde = (vma->node->offset >> vmm->pgt_bits);
lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits;
for (pde = fpde; pde <= lpde; pde++) {
struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
int big = (vma->node->type != vmm->spg_shift);

View File

@ -96,6 +96,7 @@ nv04_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
priv->base.create = nv04_vm_create;
priv->base.limit = NV04_PDMA_SIZE;
priv->base.pgt_bits = 32 - 12;
priv->base.spg_shift = 12;
priv->base.lpg_shift = 12;

View File

@ -10,4 +10,10 @@ struct nv04_vmmgr_priv {
dma_addr_t null;
};
static inline struct nv04_vmmgr_priv *
nv04_vmmgr(void *obj)
{
return (void *)nouveau_vmmgr(obj);
}
#endif

View File

@ -97,6 +97,7 @@ nv41_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
priv->base.create = nv04_vm_create;
priv->base.limit = NV41_GART_SIZE;
priv->base.pgt_bits = 32 - 12;
priv->base.spg_shift = 12;
priv->base.lpg_shift = 12;

View File

@ -178,6 +178,7 @@ nv44_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
priv->base.create = nv04_vm_create;
priv->base.limit = NV44_GART_SIZE;
priv->base.pgt_bits = 32 - 12;
priv->base.spg_shift = 12;
priv->base.lpg_shift = 12;

View File

@ -154,7 +154,6 @@ nv50_vm_flush(struct nouveau_vm *vm)
struct nouveau_engine *engine;
int i;
#if 0
for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
if (atomic_read(&vm->engref[i])) {
engine = nouveau_engine(vm->vmm, i);
@ -162,11 +161,6 @@ nv50_vm_flush(struct nouveau_vm *vm)
engine->tlb_flush(engine);
}
}
#else
nv50_vm_flush_engine(nv_subdev(vm->vmm), 0x06); /* bar */
nv50_vm_flush_engine(nv_subdev(vm->vmm), 0x05); /* fifo */
nv50_vm_flush_engine(nv_subdev(vm->vmm), 0x00); /* gr */
#endif
}
void
@ -206,6 +200,7 @@ nv50_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
priv->base.limit = 1ULL << 40;
priv->base.pgt_bits = 29 - 12;
priv->base.spg_shift = 12;
priv->base.lpg_shift = 16;

View File

@ -162,6 +162,7 @@ nvc0_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
priv->base.limit = 1ULL << 40;
priv->base.pgt_bits = 27 - 12;
priv->base.spg_shift = 12;
priv->base.lpg_shift = 17;

View File

@ -21,23 +21,153 @@
*
*/
#include "drmP.h"
#include <core/object.h>
#include <core/client.h>
#include <core/device.h>
#include <core/class.h>
#include <core/mm.h>
#include "nouveau_drv.h"
#include <subdev/fb.h>
#include <subdev/timer.h>
#include <subdev/instmem.h>
#include "nouveau_drm.h"
#include "nouveau_dma.h"
#include "nouveau_gem.h"
#include "nouveau_chan.h"
#include "nouveau_abi16.h"
#include <core/ramht.h>
#include "nouveau_software.h"
struct nouveau_abi16 *
nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
mutex_lock(&cli->mutex);
if (!cli->abi16) {
struct nouveau_abi16 *abi16;
cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL);
if (cli->abi16) {
INIT_LIST_HEAD(&abi16->channels);
abi16->client = nv_object(cli);
/* allocate device object targeting client's default
* device (ie. the one that belongs to the fd it
* opened)
*/
if (nouveau_object_new(abi16->client, NVDRM_CLIENT,
NVDRM_DEVICE, 0x0080,
&(struct nv_device_class) {
.device = ~0ULL,
},
sizeof(struct nv_device_class),
&abi16->device) == 0)
return cli->abi16;
kfree(cli->abi16);
cli->abi16 = NULL;
}
mutex_unlock(&cli->mutex);
}
return cli->abi16;
}
int
nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
{
struct nouveau_cli *cli = (void *)abi16->client;
mutex_unlock(&cli->mutex);
return ret;
}
u16
nouveau_abi16_swclass(struct nouveau_drm *drm)
{
switch (nv_device(drm->device)->card_type) {
case NV_04:
return 0x006e;
case NV_10:
case NV_20:
case NV_30:
case NV_40:
return 0x016e;
case NV_50:
return 0x506e;
case NV_C0:
case NV_D0:
case NV_E0:
return 0x906e;
}
return 0x0000;
}
static void
nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan,
struct nouveau_abi16_ntfy *ntfy)
{
nouveau_mm_free(&chan->heap, &ntfy->node);
list_del(&ntfy->head);
kfree(ntfy);
}
static void
nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
struct nouveau_abi16_chan *chan)
{
struct nouveau_abi16_ntfy *ntfy, *temp;
/* cleanup notifier state */
list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
nouveau_abi16_ntfy_fini(chan, ntfy);
}
if (chan->ntfy) {
nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma);
drm_gem_object_unreference_unlocked(chan->ntfy->gem);
}
if (chan->heap.block_size)
nouveau_mm_fini(&chan->heap);
/* destroy channel object, all children will be killed too */
if (chan->chan) {
abi16->handles &= ~(1 << (chan->chan->handle & 0xffff));
nouveau_channel_del(&chan->chan);
}
list_del(&chan->head);
kfree(chan);
}
void
nouveau_abi16_fini(struct nouveau_abi16 *abi16)
{
struct nouveau_cli *cli = (void *)abi16->client;
struct nouveau_abi16_chan *chan, *temp;
/* cleanup channels */
list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
nouveau_abi16_chan_fini(abi16, chan);
}
/* destroy the device object */
nouveau_object_del(abi16->client, NVDRM_CLIENT, NVDRM_DEVICE);
kfree(cli->abi16);
cli->abi16 = NULL;
}
int
nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_device *device = nv_device(drm->device);
struct nouveau_timer *ptimer = nouveau_timer(device);
struct drm_nouveau_getparam *getparam = data;
switch (getparam->param) {
case NOUVEAU_GETPARAM_CHIPSET_ID:
getparam->value = dev_priv->chipset;
getparam->value = device->chipset;
break;
case NOUVEAU_GETPARAM_PCI_VENDOR:
getparam->value = dev->pci_vendor;
@ -55,16 +185,16 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
getparam->value = 2;
break;
case NOUVEAU_GETPARAM_FB_SIZE:
getparam->value = dev_priv->fb_available_size;
getparam->value = drm->gem.vram_available;
break;
case NOUVEAU_GETPARAM_AGP_SIZE:
getparam->value = dev_priv->gart_info.aper_size;
getparam->value = drm->gem.gart_available;
break;
case NOUVEAU_GETPARAM_VM_VRAM_BASE:
getparam->value = 0; /* deprecated */
break;
case NOUVEAU_GETPARAM_PTIMER_TIME:
getparam->value = nv_timer_read(dev);
getparam->value = ptimer->read(ptimer);
break;
case NOUVEAU_GETPARAM_HAS_BO_USAGE:
getparam->value = 1;
@ -76,13 +206,13 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
/* NV40 and NV50 versions are quite different, but register
* address is the same. User is supposed to know the card
* family anyway... */
if (dev_priv->chipset >= 0x40) {
getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS);
if (device->chipset >= 0x40) {
getparam->value = nv_rd32(device, 0x001540);
break;
}
/* FALLTHRU */
default:
NV_DEBUG(dev, "unknown parameter %lld\n", getparam->param);
nv_debug(device, "unknown parameter %lld\n", getparam->param);
return -EINVAL;
}
@ -98,148 +228,247 @@ nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS)
int
nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_nouveau_channel_alloc *init = data;
struct nouveau_channel *chan;
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
struct nouveau_abi16_chan *chan;
struct nouveau_client *client;
struct nouveau_device *device;
struct nouveau_instmem *imem;
struct nouveau_fb *pfb;
int ret;
if (!dev_priv->eng[NVOBJ_ENGINE_GR])
return -ENODEV;
if (unlikely(!abi16))
return -ENOMEM;
client = nv_client(abi16->client);
if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
return -EINVAL;
return nouveau_abi16_put(abi16, -EINVAL);
ret = nouveau_channel_alloc(dev, &chan, file_priv,
init->fb_ctxdma_handle,
init->tt_ctxdma_handle);
device = nv_device(abi16->device);
imem = nouveau_instmem(device);
pfb = nouveau_fb(device);
/* allocate "abi16 channel" data and make up a handle for it */
init->channel = ffsll(~abi16->handles);
if (!init->channel--)
return nouveau_abi16_put(abi16, -ENOSPC);
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
if (!chan)
return nouveau_abi16_put(abi16, -ENOMEM);
INIT_LIST_HEAD(&chan->notifiers);
list_add(&chan->head, &abi16->channels);
abi16->handles |= (1 << init->channel);
/* create channel object and initialise dma and fence management */
ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN |
init->channel, init->fb_ctxdma_handle,
init->tt_ctxdma_handle, &chan->chan);
if (ret)
return ret;
init->channel = chan->id;
goto done;
if (nouveau_vram_pushbuf == 0) {
if (chan->dma.ib_max)
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
NOUVEAU_GEM_DOMAIN_GART;
else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
else
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
} else {
if (device->card_type >= NV_50)
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
NOUVEAU_GEM_DOMAIN_GART;
else
if (chan->chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM)
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
}
else
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
if (dev_priv->card_type < NV_C0) {
if (device->card_type < NV_C0) {
init->subchan[0].handle = 0x00000000;
init->subchan[0].grclass = 0x0000;
init->subchan[1].handle = NvSw;
init->subchan[1].grclass = NV_SW;
init->subchan[1].grclass = 0x506e;
init->nr_subchan = 2;
}
/* Named memory object area */
ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
&init->notifier_handle);
ret = nouveau_gem_new(dev, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
0, 0, &chan->ntfy);
if (ret == 0)
atomic_inc(&chan->users); /* userspace reference */
nouveau_channel_put(&chan);
return ret;
ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT);
if (ret)
goto done;
if (device->card_type >= NV_50) {
ret = nouveau_bo_vma_add(chan->ntfy, client->vm,
&chan->ntfy_vma);
if (ret)
goto done;
}
ret = drm_gem_handle_create(file_priv, chan->ntfy->gem,
&init->notifier_handle);
if (ret)
goto done;
ret = nouveau_mm_init(&chan->heap, 0, PAGE_SIZE, 1);
done:
if (ret)
nouveau_abi16_chan_fini(abi16, chan);
return nouveau_abi16_put(abi16, ret);
}
int
nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
{
struct drm_nouveau_channel_free *req = data;
struct nouveau_channel *chan;
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
struct nouveau_abi16_chan *chan;
int ret = -ENOENT;
chan = nouveau_channel_get(file_priv, req->channel);
if (IS_ERR(chan))
return PTR_ERR(chan);
if (unlikely(!abi16))
return -ENOMEM;
list_del(&chan->list);
atomic_dec(&chan->users);
nouveau_channel_put(&chan);
return 0;
list_for_each_entry(chan, &abi16->channels, head) {
if (chan->chan->handle == (NVDRM_CHAN | req->channel)) {
nouveau_abi16_chan_fini(abi16, chan);
return nouveau_abi16_put(abi16, 0);
}
}
return nouveau_abi16_put(abi16, ret);
}
int
nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
{
struct drm_nouveau_grobj_alloc *init = data;
struct nouveau_channel *chan;
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_object *object;
int ret;
if (unlikely(!abi16))
return -ENOMEM;
if (init->handle == ~0)
return -EINVAL;
return nouveau_abi16_put(abi16, -EINVAL);
/* compatibility with userspace that assumes 506e for all chipsets */
if (init->class == 0x506e) {
init->class = nouveau_software_class(dev);
init->class = nouveau_abi16_swclass(drm);
if (init->class == 0x906e)
return 0;
} else
if (init->class == 0x906e) {
NV_DEBUG(dev, "906e not supported yet\n");
return -EINVAL;
return nouveau_abi16_put(abi16, 0);
}
chan = nouveau_channel_get(file_priv, init->channel);
if (IS_ERR(chan))
return PTR_ERR(chan);
if (nouveau_ramht_find(chan, init->handle)) {
ret = -EEXIST;
goto out;
}
ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class);
if (ret) {
NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
ret, init->channel, init->handle);
}
out:
nouveau_channel_put(&chan);
return ret;
ret = nouveau_object_new(abi16->client, NVDRM_CHAN | init->channel,
init->handle, init->class, NULL, 0, &object);
return nouveau_abi16_put(abi16, ret);
}
int
nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_nouveau_notifierobj_alloc *na = data;
struct nouveau_channel *chan;
struct drm_nouveau_notifierobj_alloc *info = data;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_device *device = nv_device(drm->device);
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
struct nouveau_abi16_chan *chan, *temp;
struct nouveau_abi16_ntfy *ntfy;
struct nouveau_object *object;
struct nv_dma_class args;
int ret;
if (unlikely(!abi16))
return -ENOMEM;
/* completely unnecessary for these chipsets... */
if (unlikely(dev_priv->card_type >= NV_C0))
return -EINVAL;
if (unlikely(nv_device(abi16->device)->card_type >= NV_C0))
return nouveau_abi16_put(abi16, -EINVAL);
chan = nouveau_channel_get(file_priv, na->channel);
if (IS_ERR(chan))
return PTR_ERR(chan);
list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
if (chan->chan->handle == (NVDRM_CHAN | info->channel))
break;
chan = NULL;
}
ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000,
&na->offset);
nouveau_channel_put(&chan);
return ret;
if (!chan)
return nouveau_abi16_put(abi16, -ENOENT);
ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
if (!ntfy)
return nouveau_abi16_put(abi16, -ENOMEM);
list_add(&ntfy->head, &chan->notifiers);
ntfy->handle = info->handle;
ret = nouveau_mm_head(&chan->heap, 1, info->size, info->size, 1,
&ntfy->node);
if (ret)
goto done;
args.start = ntfy->node->offset;
args.limit = ntfy->node->offset + ntfy->node->length - 1;
if (device->card_type >= NV_50) {
args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
args.start += chan->ntfy_vma.offset;
args.limit += chan->ntfy_vma.offset;
} else
if (drm->agp.stat == ENABLED) {
args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
args.start += drm->agp.base + chan->ntfy->bo.offset;
args.limit += drm->agp.base + chan->ntfy->bo.offset;
} else {
args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
args.start += chan->ntfy->bo.offset;
args.limit += chan->ntfy->bo.offset;
}
ret = nouveau_object_new(abi16->client, chan->chan->handle,
ntfy->handle, 0x003d, &args,
sizeof(args), &object);
if (ret)
goto done;
done:
if (ret)
nouveau_abi16_ntfy_fini(chan, ntfy);
return nouveau_abi16_put(abi16, ret);
}
int
nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
{
struct drm_nouveau_gpuobj_free *objfree = data;
struct nouveau_channel *chan;
struct drm_nouveau_gpuobj_free *fini = data;
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
struct nouveau_abi16_chan *chan, *temp;
struct nouveau_abi16_ntfy *ntfy;
int ret;
chan = nouveau_channel_get(file_priv, objfree->channel);
if (IS_ERR(chan))
return PTR_ERR(chan);
if (unlikely(!abi16))
return -ENOMEM;
/* Synchronize with the user channel */
nouveau_channel_idle(chan);
list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
if (chan->chan->handle == (NVDRM_CHAN | fini->channel))
break;
chan = NULL;
}
ret = nouveau_ramht_remove(chan, objfree->handle);
nouveau_channel_put(&chan);
return ret;
if (!chan)
return nouveau_abi16_put(abi16, -ENOENT);
/* synchronize with the user channel and destroy the gpu object */
nouveau_channel_idle(chan->chan);
ret = nouveau_object_del(abi16->client, chan->chan->handle, fini->handle);
if (ret)
return nouveau_abi16_put(abi16, ret);
/* cleanup extra state if this object was a notifier */
list_for_each_entry(ntfy, &chan->notifiers, head) {
if (ntfy->handle == fini->handle) {
nouveau_mm_free(&chan->heap, &ntfy->node);
list_del(&ntfy->head);
break;
}
}
return nouveau_abi16_put(abi16, 0);
}

View File

@ -3,6 +3,7 @@
#define ABI16_IOCTL_ARGS \
struct drm_device *dev, void *data, struct drm_file *file_priv
int nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS);
int nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS);
int nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS);
@ -11,6 +12,37 @@ int nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS);
int nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS);
int nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS);
struct nouveau_abi16_ntfy {
struct list_head head;
struct nouveau_mm_node *node;
u32 handle;
};
struct nouveau_abi16_chan {
struct list_head head;
struct nouveau_channel *chan;
struct list_head notifiers;
struct nouveau_bo *ntfy;
struct nouveau_vma ntfy_vma;
struct nouveau_mm heap;
};
struct nouveau_abi16 {
struct nouveau_object *client;
struct nouveau_object *device;
struct list_head channels;
u64 handles;
};
struct nouveau_drm;
struct nouveau_abi16 *nouveau_abi16_get(struct drm_file *, struct drm_device *);
int nouveau_abi16_put(struct nouveau_abi16 *, int);
void nouveau_abi16_fini(struct nouveau_abi16 *);
u16 nouveau_abi16_swclass(struct nouveau_drm *);
#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
struct drm_nouveau_channel_alloc {
uint32_t fb_ctxdma_handle;
uint32_t tt_ctxdma_handle;

View File

@ -13,7 +13,6 @@
#include "drm_crtc_helper.h"
#include "nouveau_drv.h"
#include <nouveau_drm.h>
#include "nv50_display.h"
#include "nouveau_connector.h"
#include <linux/vga_switcheroo.h>

View File

@ -27,66 +27,57 @@
* Jeremy Kolb <jkolb@brandeis.edu>
*/
#include "drmP.h"
#include "ttm/ttm_page_alloc.h"
#include <core/engine.h>
#include <nouveau_drm.h>
#include "nouveau_drv.h"
#include <subdev/fb.h>
#include <subdev/vm.h>
#include <subdev/bar.h>
#include "nouveau_drm.h"
#include "nouveau_dma.h"
#include <core/mm.h>
#include "nouveau_fence.h"
#include <core/ramht.h>
#include <engine/fifo.h>
#include <linux/log2.h>
#include <linux/slab.h>
#include "nouveau_bo.h"
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
/*
* NV10-NV40 tiling helpers
*/
static void
nv10_bo_update_tile_region(struct drm_device *dev,
struct nouveau_tile_reg *tilereg, uint32_t addr,
uint32_t size, uint32_t pitch, uint32_t flags)
nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
u32 addr, u32 size, u32 pitch, u32 flags)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
int i = tilereg - dev_priv->tile.reg, j;
struct nouveau_fb_tile *tile = nvfb_tile(dev, i);
unsigned long save;
struct nouveau_drm *drm = nouveau_newpriv(dev);
int i = reg - drm->tile.reg;
struct nouveau_fb *pfb = nouveau_fb(drm->device);
struct nouveau_fb_tile *tile = &pfb->tile.region[i];
struct nouveau_engine *engine;
nouveau_fence_unref(&tilereg->fence);
nouveau_fence_unref(&reg->fence);
if (tile->pitch)
nvfb_tile_fini(dev, i);
pfb->tile.fini(pfb, i, tile);
if (pitch)
nvfb_tile_init(dev, i, addr, size, pitch, flags);
pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
spin_lock_irqsave(&dev_priv->context_switch_lock, save);
nv_wr32(dev, NV03_PFIFO_CACHES, 0);
nv04_fifo_cache_pull(dev, false);
pfb->tile.prog(pfb, i, tile);
nouveau_wait_for_idle(dev);
nvfb_tile_prog(dev, i);
for (j = 0; j < NVOBJ_ENGINE_NR; j++) {
if (dev_priv->eng[j] && dev_priv->eng[j]->set_tile_region)
dev_priv->eng[j]->set_tile_region(dev, i);
}
nv04_fifo_cache_pull(dev, true);
nv_wr32(dev, NV03_PFIFO_CACHES, 1);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR)))
engine->tile_prog(engine, i);
if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG)))
engine->tile_prog(engine, i);
}
static struct nouveau_tile_reg *
static struct nouveau_drm_tile *
nv10_bo_get_tile_region(struct drm_device *dev, int i)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
struct nouveau_drm *drm = nouveau_newpriv(dev);
struct nouveau_drm_tile *tile = &drm->tile.reg[i];
spin_lock(&dev_priv->tile.lock);
spin_lock(&drm->tile.lock);
if (!tile->used &&
(!tile->fence || nouveau_fence_done(tile->fence)))
@ -94,18 +85,18 @@ nv10_bo_get_tile_region(struct drm_device *dev, int i)
else
tile = NULL;
spin_unlock(&dev_priv->tile.lock);
spin_unlock(&drm->tile.lock);
return tile;
}
static void
nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
struct nouveau_fence *fence)
nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
struct nouveau_fence *fence)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_drm *drm = nouveau_newpriv(dev);
if (tile) {
spin_lock(&dev_priv->tile.lock);
spin_lock(&drm->tile.lock);
if (fence) {
/* Mark it as pending. */
tile->fence = fence;
@ -113,25 +104,27 @@ nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
}
tile->used = false;
spin_unlock(&dev_priv->tile.lock);
spin_unlock(&drm->tile.lock);
}
}
static struct nouveau_tile_reg *
nv10_bo_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
uint32_t pitch, uint32_t flags)
static struct nouveau_drm_tile *
nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
u32 size, u32 pitch, u32 flags)
{
struct nouveau_tile_reg *tile, *found = NULL;
struct nouveau_drm *drm = nouveau_newpriv(dev);
struct nouveau_fb *pfb = nouveau_fb(drm->device);
struct nouveau_drm_tile *tile, *found = NULL;
int i;
for (i = 0; i < nvfb_tile_nr(dev); i++) {
for (i = 0; i < pfb->tile.regions; i++) {
tile = nv10_bo_get_tile_region(dev, i);
if (pitch && !found) {
found = tile;
continue;
} else if (tile && nvfb_tile(dev, i)->pitch) {
} else if (tile && pfb->tile.region[i].pitch) {
/* Kill an unused tile region. */
nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
}
@ -148,13 +141,12 @@ nv10_bo_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
static void
nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct drm_device *dev = dev_priv->dev;
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct drm_device *dev = drm->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
if (unlikely(nvbo->gem))
DRM_ERROR("bo %p still attached to GEM object\n", bo);
nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
kfree(nvbo);
}
@ -163,23 +155,24 @@ static void
nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
int *align, int *size)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct nouveau_device *device = nv_device(drm->device);
if (dev_priv->card_type < NV_50) {
if (device->card_type < NV_50) {
if (nvbo->tile_mode) {
if (dev_priv->chipset >= 0x40) {
if (device->chipset >= 0x40) {
*align = 65536;
*size = roundup(*size, 64 * nvbo->tile_mode);
} else if (dev_priv->chipset >= 0x30) {
} else if (device->chipset >= 0x30) {
*align = 32768;
*size = roundup(*size, 64 * nvbo->tile_mode);
} else if (dev_priv->chipset >= 0x20) {
} else if (device->chipset >= 0x20) {
*align = 16384;
*size = roundup(*size, 64 * nvbo->tile_mode);
} else if (dev_priv->chipset >= 0x10) {
} else if (device->chipset >= 0x10) {
*align = 16384;
*size = roundup(*size, 32 * nvbo->tile_mode);
}
@ -198,7 +191,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
struct sg_table *sg,
struct nouveau_bo **pnvbo)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_drm *drm = nouveau_newpriv(dev);
struct nouveau_bo *nvbo;
size_t acc_size;
int ret;
@ -215,22 +208,22 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
INIT_LIST_HEAD(&nvbo->vma_list);
nvbo->tile_mode = tile_mode;
nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &dev_priv->ttm.bdev;
nvbo->bo.bdev = &drm->ttm.bdev;
nvbo->page_shift = 12;
if (dev_priv->chan_vm) {
if (drm->client.base.vm) {
if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
nvbo->page_shift = nvvm_lpg_shift(dev_priv->chan_vm);
nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift;
}
nouveau_bo_fixup_align(nvbo, flags, &align, &size);
nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, flags, 0);
acc_size = ttm_bo_dma_acc_size(&dev_priv->ttm.bdev, size,
acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
sizeof(struct nouveau_bo));
ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
type, &nvbo->placement,
align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg,
nouveau_bo_del_ttm);
@ -259,10 +252,11 @@ set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
static void
set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
int vram_pages = nvfb_vram_size(dev_priv->dev) >> PAGE_SHIFT;
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct nouveau_fb *pfb = nouveau_fb(drm->device);
u32 vram_pages = pfb->ram.size >> PAGE_SHIFT;
if (dev_priv->card_type == NV_10 &&
if (nv_device(drm->device)->card_type == NV_10 &&
nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
nvbo->bo.mem.num_pages < vram_pages / 4) {
/*
@ -302,13 +296,12 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
int
nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
int ret;
if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
NV_ERROR(nouveau_bdev(bo->bdev)->dev,
"bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
1 << bo->mem.mem_type, memtype);
return -EINVAL;
}
@ -326,10 +319,10 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
dev_priv->fb_aper_free -= bo->mem.size;
drm->gem.vram_available -= bo->mem.size;
break;
case TTM_PL_TT:
dev_priv->gart_info.aper_free -= bo->mem.size;
drm->gem.gart_available -= bo->mem.size;
break;
default:
break;
@ -345,7 +338,7 @@ out:
int
nouveau_bo_unpin(struct nouveau_bo *nvbo)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
int ret;
@ -362,10 +355,10 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
dev_priv->fb_aper_free += bo->mem.size;
drm->gem.vram_available += bo->mem.size;
break;
case TTM_PL_TT:
dev_priv->gart_info.aper_free += bo->mem.size;
drm->gem.gart_available += bo->mem.size;
break;
default:
break;
@ -460,30 +453,18 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
}
static struct ttm_tt *
nouveau_ttm_tt_create(struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page)
nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
uint32_t page_flags, struct page *dummy_read)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
struct drm_device *dev = dev_priv->dev;
struct nouveau_drm *drm = nouveau_bdev(bdev);
struct drm_device *dev = drm->dev;
switch (dev_priv->gart_info.type) {
#if __OS_HAS_AGP
case NOUVEAU_GART_AGP:
return ttm_agp_tt_create(bdev, dev->agp->bridge,
size, page_flags, dummy_read_page);
#endif
case NOUVEAU_GART_PDMA:
case NOUVEAU_GART_HW:
return nouveau_sgdma_create_ttm(bdev, size, page_flags,
dummy_read_page);
default:
NV_ERROR(dev, "Unknown GART type %d\n",
dev_priv->gart_info.type);
break;
if (drm->agp.stat == ENABLED) {
return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
page_flags, dummy_read);
}
return NULL;
return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
}
static int
@ -497,8 +478,7 @@ static int
nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
struct drm_device *dev = dev_priv->dev;
struct nouveau_drm *drm = nouveau_bdev(bdev);
switch (type) {
case TTM_PL_SYSTEM:
@ -507,7 +487,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_VRAM:
if (dev_priv->card_type >= NV_50) {
if (nv_device(drm->device)->card_type >= NV_50) {
man->func = &nouveau_vram_manager;
man->io_reserve_fastpath = false;
man->use_io_reserve_lru = true;
@ -521,35 +501,28 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_WC;
break;
case TTM_PL_TT:
if (dev_priv->card_type >= NV_50)
if (nv_device(drm->device)->card_type >= NV_50)
man->func = &nouveau_gart_manager;
else
if (dev_priv->gart_info.type != NOUVEAU_GART_AGP)
if (drm->agp.stat != ENABLED)
man->func = &nv04_gart_manager;
else
man->func = &ttm_bo_manager_func;
switch (dev_priv->gart_info.type) {
case NOUVEAU_GART_AGP:
if (drm->agp.stat == ENABLED) {
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
break;
case NOUVEAU_GART_PDMA:
case NOUVEAU_GART_HW:
} else {
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
TTM_MEMTYPE_FLAG_CMA;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
break;
default:
NV_ERROR(dev, "Unknown GART type: %d\n",
dev_priv->gart_info.type);
return -EINVAL;
}
break;
default:
NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
return -EINVAL;
}
return 0;
@ -783,20 +756,14 @@ nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
static int
nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
&chan->m2mf_ntfy);
int ret = RING_SPACE(chan, 6);
if (ret == 0) {
ret = RING_SPACE(chan, 6);
if (ret == 0) {
BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
OUT_RING (chan, handle);
BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
OUT_RING (chan, NvNotify0);
OUT_RING (chan, NvDmaFB);
OUT_RING (chan, NvDmaFB);
} else {
nouveau_ramht_remove(chan, NvNotify0);
}
BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
OUT_RING (chan, handle);
BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
OUT_RING (chan, NvNotify0);
OUT_RING (chan, NvDmaFB);
OUT_RING (chan, NvDmaFB);
}
return ret;
@ -895,16 +862,12 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
static int
nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
&chan->m2mf_ntfy);
int ret = RING_SPACE(chan, 4);
if (ret == 0) {
ret = RING_SPACE(chan, 4);
if (ret == 0) {
BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
OUT_RING (chan, handle);
BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
OUT_RING (chan, NvNotify0);
}
BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
OUT_RING (chan, handle);
BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
OUT_RING (chan, NvNotify0);
}
return ret;
@ -915,8 +878,8 @@ nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
struct nouveau_channel *chan, struct ttm_mem_reg *mem)
{
if (mem->mem_type == TTM_PL_TT)
return chan->gart_handle;
return chan->vram_handle;
return NvDmaTT;
return NvDmaFB;
}
static int
@ -972,8 +935,9 @@ nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
struct nouveau_mem *node = mem->mm_node;
int ret;
ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT,
node->page_shift, NV_MEM_ACCESS_RO, vma);
ret = nouveau_vm_get(nv_client(chan->cli)->vm, mem->num_pages <<
PAGE_SHIFT, node->page_shift,
NV_MEM_ACCESS_RW, vma);
if (ret)
return ret;
@ -990,19 +954,19 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_channel *chan = chan = dev_priv->channel;
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_channel *chan = chan = drm->channel;
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct ttm_mem_reg *old_mem = &bo->mem;
int ret;
mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
mutex_lock(&chan->cli->mutex);
/* create temporary vmas for the transfer and attach them to the
* old nouveau_mem node, these will get cleaned up after ttm has
* destroyed the ttm_mem_reg
*/
if (dev_priv->card_type >= NV_50) {
if (nv_device(drm->device)->card_type >= NV_50) {
struct nouveau_mem *node = old_mem->mm_node;
ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
@ -1014,7 +978,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
goto out;
}
ret = dev_priv->ttm.move(chan, bo, &bo->mem, new_mem);
ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
if (ret == 0) {
ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
no_wait_reserve,
@ -1022,14 +986,15 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
}
out:
mutex_unlock(&chan->mutex);
mutex_unlock(&chan->cli->mutex);
return ret;
}
void
nouveau_bo_move_init(struct nouveau_channel *chan)
{
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_cli *cli = chan->cli;
struct nouveau_drm *drm = chan->drm;
static const struct {
const char *name;
int engine;
@ -1054,19 +1019,26 @@ nouveau_bo_move_init(struct nouveau_channel *chan)
int ret;
do {
struct nouveau_object *object;
u32 handle = (mthd->engine << 16) | mthd->oclass;
ret = nouveau_gpuobj_gr_new(chan, handle, mthd->oclass);
ret = nouveau_object_new(nv_object(cli), chan->handle, handle,
mthd->oclass, NULL, 0, &object);
if (ret == 0) {
ret = mthd->init(chan, handle);
if (ret == 0) {
dev_priv->ttm.move = mthd->exec;
name = mthd->name;
break;
if (ret) {
nouveau_object_del(nv_object(cli),
chan->handle, handle);
continue;
}
drm->ttm.move = mthd->exec;
name = mthd->name;
break;
}
} while ((++mthd)->exec);
NV_INFO(chan->dev, "MM: using %s for buffer copies\n", name);
NV_INFO(drm, "MM: using %s for buffer copies\n", name);
}
static int
@ -1151,7 +1123,7 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
nouveau_vm_map(vma, new_mem->mm_node);
} else
if (new_mem && new_mem->mem_type == TTM_PL_TT &&
nvbo->page_shift == nvvm_spg_shift(vma->vm)) {
nvbo->page_shift == vma->vm->vmm->spg_shift) {
if (((struct nouveau_mem *)new_mem->mm_node)->sg)
nouveau_vm_map_sg_table(vma, 0, new_mem->
num_pages << PAGE_SHIFT,
@ -1168,10 +1140,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
static int
nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
struct nouveau_tile_reg **new_tile)
struct nouveau_drm_tile **new_tile)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct drm_device *dev = dev_priv->dev;
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct drm_device *dev = drm->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
u64 offset = new_mem->start << PAGE_SHIFT;
@ -1179,7 +1151,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
if (new_mem->mem_type != TTM_PL_VRAM)
return 0;
if (dev_priv->card_type >= NV_10) {
if (nv_device(drm->device)->card_type >= NV_10) {
*new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
nvbo->tile_mode,
nvbo->tile_flags);
@ -1190,11 +1162,11 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
static void
nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
struct nouveau_tile_reg *new_tile,
struct nouveau_tile_reg **old_tile)
struct nouveau_drm_tile *new_tile,
struct nouveau_drm_tile **old_tile)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct drm_device *dev = dev_priv->dev;
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct drm_device *dev = drm->dev;
nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj);
*old_tile = new_tile;
@ -1205,13 +1177,13 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct ttm_mem_reg *old_mem = &bo->mem;
struct nouveau_tile_reg *new_tile = NULL;
struct nouveau_drm_tile *new_tile = NULL;
int ret = 0;
if (dev_priv->card_type < NV_50) {
if (nv_device(drm->device)->card_type < NV_50) {
ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
if (ret)
return ret;
@ -1226,7 +1198,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
}
/* CPU copy if we have no accelerated method available */
if (!dev_priv->ttm.move) {
if (!drm->ttm.move) {
ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
goto out;
}
@ -1246,7 +1218,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
out:
if (dev_priv->card_type < NV_50) {
if (nv_device(drm->device)->card_type < NV_50) {
if (ret)
nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
else
@ -1266,8 +1238,8 @@ static int
nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
struct drm_device *dev = dev_priv->dev;
struct nouveau_drm *drm = nouveau_bdev(bdev);
struct drm_device *dev = drm->dev;
int ret;
mem->bus.addr = NULL;
@ -1283,9 +1255,9 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
return 0;
case TTM_PL_TT:
#if __OS_HAS_AGP
if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
if (drm->agp.stat == ENABLED) {
mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = dev_priv->gart_info.aper_base;
mem->bus.base = drm->agp.base;
mem->bus.is_iomem = true;
}
#endif
@ -1294,10 +1266,11 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = pci_resource_start(dev->pdev, 1);
mem->bus.is_iomem = true;
if (dev_priv->card_type >= NV_50) {
if (nv_device(drm->device)->card_type >= NV_50) {
struct nouveau_bar *bar = nouveau_bar(drm->device);
struct nouveau_mem *node = mem->mm_node;
ret = nvbar_map(dev, node, NV_MEM_ACCESS_RW,
ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
&node->bar_vma);
if (ret)
return ret;
@ -1314,40 +1287,40 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
struct nouveau_drm *drm = nouveau_bdev(bdev);
struct nouveau_bar *bar = nouveau_bar(drm->device);
struct nouveau_mem *node = mem->mm_node;
if (mem->mem_type != TTM_PL_VRAM)
return;
if (!node->bar_vma.node)
return;
nvbar_unmap(dev_priv->dev, &node->bar_vma);
bar->unmap(bar, &node->bar_vma);
}
static int
nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_device *device = nv_device(drm->device);
u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT;
/* as long as the bo isn't in vram, and isn't tiled, we've got
* nothing to do here.
*/
if (bo->mem.mem_type != TTM_PL_VRAM) {
if (dev_priv->card_type < NV_50 ||
if (nv_device(drm->device)->card_type < NV_50 ||
!nouveau_bo_tile_layout(nvbo))
return 0;
}
/* make sure bo is in mappable vram */
if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
if (bo->mem.start + bo->mem.num_pages < mappable)
return 0;
nvbo->placement.fpfn = 0;
nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
nvbo->placement.lpfn = mappable;
nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
return nouveau_bo_validate(nvbo, false, true, false);
}
@ -1356,7 +1329,7 @@ static int
nouveau_ttm_tt_populate(struct ttm_tt *ttm)
{
struct ttm_dma_tt *ttm_dma = (void *)ttm;
struct drm_nouveau_private *dev_priv;
struct nouveau_drm *drm;
struct drm_device *dev;
unsigned i;
int r;
@ -1373,11 +1346,11 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
return 0;
}
dev_priv = nouveau_bdev(ttm->bdev);
dev = dev_priv->dev;
drm = nouveau_bdev(ttm->bdev);
dev = drm->dev;
#if __OS_HAS_AGP
if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
if (drm->agp.stat == ENABLED) {
return ttm_agp_tt_populate(ttm);
}
#endif
@ -1414,7 +1387,7 @@ static void
nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
struct ttm_dma_tt *ttm_dma = (void *)ttm;
struct drm_nouveau_private *dev_priv;
struct nouveau_drm *drm;
struct drm_device *dev;
unsigned i;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@ -1422,11 +1395,11 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
if (slave)
return;
dev_priv = nouveau_bdev(ttm->bdev);
dev = dev_priv->dev;
drm = nouveau_bdev(ttm->bdev);
dev = drm->dev;
#if __OS_HAS_AGP
if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
if (drm->agp.stat == ENABLED) {
ttm_agp_tt_unpopulate(ttm);
return;
}

View File

@ -2,13 +2,9 @@
#define __NOUVEAU_BO_H__
struct nouveau_channel;
struct nouveau_fence;
struct nouveau_vma;
struct nouveau_tile_reg {
bool used;
struct nouveau_fence *fence;
};
struct nouveau_bo {
struct ttm_buffer_object bo;
struct ttm_placement placement;
@ -29,7 +25,7 @@ struct nouveau_bo {
u32 tile_mode;
u32 tile_flags;
struct nouveau_tile_reg *tile;
struct nouveau_drm_tile *tile;
struct drm_gem_object *gem;
int pin_refcnt;
@ -89,4 +85,15 @@ int nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *,
struct nouveau_vma *);
void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *);
/* TODO: submit equivalent to TTM generic API upstream? */
static inline void __iomem *
nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
{
bool is_iomem;
void __iomem *ioptr = (void __force __iomem *)ttm_kmap_obj_virtual(
&nvbo->kmap, &is_iomem);
WARN_ON_ONCE(ioptr && !is_iomem);
return ioptr;
}
#endif

View File

@ -0,0 +1,387 @@
/*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/object.h>
#include <core/client.h>
#include <core/device.h>
#include <core/class.h>
#include <subdev/fb.h>
#include <subdev/vm.h>
#include <subdev/instmem.h>
#include <engine/software.h>
#include "nouveau_drm.h"
#include "nouveau_dma.h"
#include "nouveau_bo.h"
#include "nouveau_chan.h"
#include "nouveau_fence.h"
#include "nouveau_abi16.h"
MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
static int nouveau_vram_pushbuf;
module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
int
nouveau_channel_idle(struct nouveau_channel *chan)
{
struct nouveau_drm *drm = chan->drm;
struct nouveau_fence *fence = NULL;
int ret;
ret = nouveau_fence_new(chan, &fence);
if (!ret) {
ret = nouveau_fence_wait(fence, false, false);
nouveau_fence_unref(&fence);
}
if (ret)
NV_ERROR(drm, "failed to idle channel 0x%08x\n", chan->handle);
return ret;
}
void
nouveau_channel_del(struct nouveau_channel **pchan)
{
struct nouveau_channel *chan = *pchan;
if (chan) {
struct nouveau_object *client = nv_object(chan->cli);
if (chan->fence) {
nouveau_channel_idle(chan);
nouveau_fence(chan->drm)->context_del(chan);
}
nouveau_object_del(client, NVDRM_DEVICE, chan->handle);
nouveau_object_del(client, NVDRM_DEVICE, chan->push.handle);
nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
nouveau_bo_unmap(chan->push.buffer);
nouveau_bo_ref(NULL, &chan->push.buffer);
kfree(chan);
}
*pchan = NULL;
}
static int
nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli,
u32 parent, u32 handle, u32 size,
struct nouveau_channel **pchan)
{
struct nouveau_device *device = nv_device(drm->device);
struct nouveau_instmem *imem = nouveau_instmem(device);
struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
struct nouveau_fb *pfb = nouveau_fb(device);
struct nouveau_client *client = &cli->base;
struct nv_dma_class args = {};
struct nouveau_channel *chan;
struct nouveau_object *push;
u32 target;
int ret;
chan = *pchan = kzalloc(sizeof(*chan), GFP_KERNEL);
if (!chan)
return -ENOMEM;
chan->cli = cli;
chan->drm = drm;
chan->handle = handle;
/* allocate memory for dma push buffer */
target = TTM_PL_FLAG_TT;
if (nouveau_vram_pushbuf)
target = TTM_PL_FLAG_VRAM;
ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL,
&chan->push.buffer);
if (ret == 0) {
ret = nouveau_bo_pin(chan->push.buffer, target);
if (ret == 0)
ret = nouveau_bo_map(chan->push.buffer);
}
if (ret) {
nouveau_channel_del(pchan);
return ret;
}
/* create dma object covering the *entire* memory space that the
* pushbuf lives in, this is because the GEM code requires that
* we be able to call out to other (indirect) push buffers
*/
chan->push.vma.offset = chan->push.buffer->bo.offset;
chan->push.handle = NVDRM_PUSH | (handle & 0xffff);
if (device->card_type >= NV_50) {
ret = nouveau_bo_vma_add(chan->push.buffer, client->vm,
&chan->push.vma);
if (ret) {
nouveau_channel_del(pchan);
return ret;
}
args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
args.start = 0;
args.limit = client->vm->vmm->limit - 1;
} else
if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
u64 limit = pfb->ram.size - imem->reserved - 1;
if (device->card_type == NV_04) {
/* nv04 vram pushbuf hack, retarget to its location in
* the framebuffer bar rather than direct vram access..
* nfi why this exists, it came from the -nv ddx.
*/
args.flags = NV_DMA_TARGET_PCI | NV_DMA_ACCESS_RDWR;
args.start = pci_resource_start(device->pdev, 1);
args.limit = args.start + limit;
} else {
args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR;
args.start = 0;
args.limit = limit;
}
} else {
if (chan->drm->agp.stat == ENABLED) {
args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
args.start = chan->drm->agp.base;
args.limit = chan->drm->agp.base +
chan->drm->agp.size - 1;
} else {
args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
args.start = 0;
args.limit = vmm->limit - 1;
}
}
ret = nouveau_object_new(nv_object(chan->cli), parent,
chan->push.handle, 0x0002,
&args, sizeof(args), &push);
if (ret) {
nouveau_channel_del(pchan);
return ret;
}
return 0;
}
int
nouveau_channel_ind(struct nouveau_drm *drm, struct nouveau_cli *cli,
u32 parent, u32 handle, struct nouveau_channel **pchan)
{
static const u16 oclasses[] = { 0xa06f, 0x906f, 0x826f, 0x506f, 0 };
const u16 *oclass = oclasses;
struct nv_channel_ind_class args;
struct nouveau_channel *chan;
int ret;
/* allocate dma push buffer */
ret = nouveau_channel_prep(drm, cli, parent, handle, 0x12000, &chan);
*pchan = chan;
if (ret)
return ret;
/* create channel object */
args.pushbuf = chan->push.handle;
args.ioffset = 0x10000 + chan->push.vma.offset;
args.ilength = 0x02000;
do {
ret = nouveau_object_new(nv_object(cli), parent, handle,
*oclass++, &args, sizeof(args),
&chan->object);
if (ret == 0)
return ret;
} while (*oclass);
nouveau_channel_del(pchan);
return ret;
}
static int
nouveau_channel_dma(struct nouveau_drm *drm, struct nouveau_cli *cli,
u32 parent, u32 handle, struct nouveau_channel **pchan)
{
static const u16 oclasses[] = { 0x006e, 0 };
const u16 *oclass = oclasses;
struct nv_channel_dma_class args;
struct nouveau_channel *chan;
int ret;
/* allocate dma push buffer */
ret = nouveau_channel_prep(drm, cli, parent, handle, 0x10000, &chan);
*pchan = chan;
if (ret)
return ret;
/* create channel object */
args.pushbuf = chan->push.handle;
args.offset = chan->push.vma.offset;
do {
ret = nouveau_object_new(nv_object(cli), parent, handle,
*oclass++, &args, sizeof(args),
&chan->object);
if (ret == 0)
return ret;
} while (ret && *oclass);
nouveau_channel_del(pchan);
return ret;
}
static int
nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
{
struct nouveau_client *client = nv_client(chan->cli);
struct nouveau_device *device = nv_device(chan->drm->device);
struct nouveau_instmem *imem = nouveau_instmem(device);
struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
struct nouveau_fb *pfb = nouveau_fb(device);
struct nouveau_software_chan *swch;
struct nouveau_object *object;
struct nv_dma_class args;
int ret, i;
chan->vram = vram;
chan->gart = gart;
/* allocate dma objects to cover all allowed vram, and gart */
if (device->card_type < NV_C0) {
if (device->card_type >= NV_50) {
args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
args.start = 0;
args.limit = client->vm->vmm->limit - 1;
} else {
args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR;
args.start = 0;
args.limit = pfb->ram.size - imem->reserved - 1;
}
ret = nouveau_object_new(nv_object(client), chan->handle, vram,
0x003d, &args, sizeof(args), &object);
if (ret)
return ret;
if (device->card_type >= NV_50) {
args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
args.start = 0;
args.limit = client->vm->vmm->limit - 1;
} else
if (chan->drm->agp.stat == ENABLED) {
args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
args.start = chan->drm->agp.base;
args.limit = chan->drm->agp.base +
chan->drm->agp.size - 1;
} else {
args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
args.start = 0;
args.limit = vmm->limit - 1;
}
ret = nouveau_object_new(nv_object(client), chan->handle, gart,
0x003d, &args, sizeof(args), &object);
if (ret)
return ret;
}
/* initialise dma tracking parameters */
switch (nv_hclass(chan->object) & 0xffff) {
case 0x006e:
chan->user_put = 0x40;
chan->user_get = 0x44;
chan->dma.max = (0x10000 / 4) - 2;
break;
default:
chan->user_put = 0x40;
chan->user_get = 0x44;
chan->user_get_hi = 0x60;
chan->dma.ib_base = 0x10000 / 4;
chan->dma.ib_max = (0x02000 / 8) - 1;
chan->dma.ib_put = 0;
chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
chan->dma.max = chan->dma.ib_base;
break;
}
chan->dma.put = 0;
chan->dma.cur = chan->dma.put;
chan->dma.free = chan->dma.max - chan->dma.cur;
ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
if (ret)
return ret;
for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
OUT_RING(chan, 0x00000000);
/* allocate software object class (used for fences on <= nv05, and
* to signal flip completion), bind it to a subchannel.
*/
ret = nouveau_object_new(nv_object(client), chan->handle,
NvSw, nouveau_abi16_swclass(chan->drm),
NULL, 0, &object);
if (ret)
return ret;
swch = (void *)object->parent;
swch->flip = nouveau_flip_complete;
swch->flip_data = chan;
if (device->card_type < NV_C0) {
ret = RING_SPACE(chan, 2);
if (ret)
return ret;
BEGIN_NV04(chan, NvSubSw, 0x0000, 1);
OUT_RING (chan, NvSw);
FIRE_RING (chan);
}
/* initialise synchronisation */
return nouveau_fence(chan->drm)->context_new(chan);
}
int
nouveau_channel_new(struct nouveau_drm *drm, struct nouveau_cli *cli,
u32 parent, u32 handle, u32 vram, u32 gart,
struct nouveau_channel **pchan)
{
int ret;
ret = nouveau_channel_ind(drm, cli, parent, handle, pchan);
if (ret) {
NV_DEBUG(drm, "ib channel create, %d\n", ret);
ret = nouveau_channel_dma(drm, cli, parent, handle, pchan);
if (ret) {
NV_DEBUG(drm, "dma channel create, %d\n", ret);
return ret;
}
}
ret = nouveau_channel_init(*pchan, vram, gart);
if (ret) {
NV_ERROR(drm, "channel failed to initialise, %d\n", ret);
nouveau_channel_del(pchan);
return ret;
}
return 0;
}

View File

@ -0,0 +1,47 @@
#ifndef __NOUVEAU_CHAN_H__
#define __NOUVEAU_CHAN_H__
struct nouveau_cli;
struct nouveau_channel {
struct nouveau_cli *cli;
struct nouveau_drm *drm;
u32 handle;
u32 vram;
u32 gart;
struct {
struct nouveau_bo *buffer;
struct nouveau_vma vma;
u32 handle;
} push;
/* TODO: this will be reworked in the near future */
bool accel_done;
void *fence;
struct {
int max;
int free;
int cur;
int put;
int ib_base;
int ib_max;
int ib_free;
int ib_put;
} dma;
u32 user_get_hi;
u32 user_get;
u32 user_put;
struct nouveau_object *object;
};
int nouveau_channel_new(struct nouveau_drm *, struct nouveau_cli *,
u32 parent, u32 handle, u32 vram, u32 gart,
struct nouveau_channel **);
void nouveau_channel_del(struct nouveau_channel **);
int nouveau_channel_idle(struct nouveau_channel *);
#endif

View File

@ -1,408 +0,0 @@
/*
* Copyright 2005-2006 Stephane Marchesin
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#include <nouveau_drm.h>
#include "nouveau_dma.h"
#include <engine/fifo.h>
#include <core/ramht.h>
#include "nouveau_fence.h"
#include "nouveau_software.h"
MODULE_PARM_DESC(vram_pushbuf, "Force DMA push buffers to be in VRAM");
int nouveau_vram_pushbuf;
module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
static int
nouveau_channel_pushbuf_init(struct nouveau_channel *chan)
{
u32 mem = nouveau_vram_pushbuf ? TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT;
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
int ret;
/* allocate buffer object */
ret = nouveau_bo_new(dev, 65536, 0, mem, 0, 0, NULL, &chan->pushbuf_bo);
if (ret)
goto out;
ret = nouveau_bo_pin(chan->pushbuf_bo, mem);
if (ret)
goto out;
ret = nouveau_bo_map(chan->pushbuf_bo);
if (ret)
goto out;
/* create DMA object covering the entire memtype where the push
* buffer resides, userspace can submit its own push buffers from
* anywhere within the same memtype.
*/
chan->pushbuf_base = chan->pushbuf_bo->bo.offset;
if (dev_priv->card_type >= NV_50) {
ret = nouveau_bo_vma_add(chan->pushbuf_bo, chan->vm,
&chan->pushbuf_vma);
if (ret)
goto out;
if (dev_priv->card_type < NV_C0) {
ret = nouveau_gpuobj_dma_new(chan,
NV_CLASS_DMA_IN_MEMORY, 0,
(1ULL << 40),
NV_MEM_ACCESS_RO,
NV_MEM_TARGET_VM,
&chan->pushbuf);
}
chan->pushbuf_base = chan->pushbuf_vma.offset;
} else
if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_TT) {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
dev_priv->gart_info.aper_size,
NV_MEM_ACCESS_RO,
NV_MEM_TARGET_GART,
&chan->pushbuf);
} else
if (dev_priv->card_type != NV_04) {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
dev_priv->fb_available_size,
NV_MEM_ACCESS_RO,
NV_MEM_TARGET_VRAM,
&chan->pushbuf);
} else {
/* NV04 cmdbuf hack, from original ddx.. not sure of it's
* exact reason for existing :) PCI access to cmdbuf in
* VRAM.
*/
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
pci_resource_start(dev->pdev, 1),
dev_priv->fb_available_size,
NV_MEM_ACCESS_RO,
NV_MEM_TARGET_PCI,
&chan->pushbuf);
}
out:
if (ret) {
NV_ERROR(dev, "error initialising pushbuf: %d\n", ret);
nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma);
nouveau_gpuobj_ref(NULL, &chan->pushbuf);
if (chan->pushbuf_bo) {
nouveau_bo_unmap(chan->pushbuf_bo);
nouveau_bo_ref(NULL, &chan->pushbuf_bo);
}
}
return 0;
}
/* allocates and initializes a fifo for user space consumption */
int
nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
struct drm_file *file_priv,
uint32_t vram_handle, uint32_t gart_handle)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct nouveau_fence_priv *fence = dev_priv->fence.func;
struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
struct nouveau_channel *chan;
unsigned long flags;
int ret, i;
/* allocate and lock channel structure */
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
if (!chan)
return -ENOMEM;
chan->dev = dev;
chan->file_priv = file_priv;
chan->vram_handle = vram_handle;
chan->gart_handle = gart_handle;
kref_init(&chan->ref);
atomic_set(&chan->users, 1);
mutex_init(&chan->mutex);
mutex_lock(&chan->mutex);
/* allocate hw channel id */
spin_lock_irqsave(&dev_priv->channels.lock, flags);
for (chan->id = 0; chan->id < pfifo->channels; chan->id++) {
if ( dev_priv->card_type == NV_50 && chan->id == 0)
continue;
if (!dev_priv->channels.ptr[chan->id]) {
nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]);
break;
}
}
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
if (chan->id == pfifo->channels) {
mutex_unlock(&chan->mutex);
kfree(chan);
return -ENODEV;
}
NV_DEBUG(dev, "initialising channel %d\n", chan->id);
/* setup channel's memory and vm */
ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
if (ret) {
NV_ERROR(dev, "gpuobj %d\n", ret);
nouveau_channel_put(&chan);
return ret;
}
/* Allocate space for per-channel fixed notifier memory */
ret = nouveau_notifier_init_channel(chan);
if (ret) {
NV_ERROR(dev, "ntfy %d\n", ret);
nouveau_channel_put(&chan);
return ret;
}
/* Allocate DMA push buffer */
ret = nouveau_channel_pushbuf_init(chan);
if (ret) {
NV_ERROR(dev, "pushbuf %d\n", ret);
nouveau_channel_put(&chan);
return ret;
}
nouveau_dma_init(chan);
chan->user_put = 0x40;
chan->user_get = 0x44;
if (dev_priv->card_type >= NV_50)
chan->user_get_hi = 0x60;
/* create fifo context */
ret = pfifo->base.context_new(chan, NVOBJ_ENGINE_FIFO);
if (ret) {
nouveau_channel_put(&chan);
return ret;
}
/* Insert NOPs for NOUVEAU_DMA_SKIPS */
ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
if (ret) {
nouveau_channel_put(&chan);
return ret;
}
for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
OUT_RING (chan, 0x00000000);
ret = nouveau_gpuobj_gr_new(chan, NvSw, nouveau_software_class(dev));
if (ret) {
nouveau_channel_put(&chan);
return ret;
}
if (dev_priv->card_type < NV_C0) {
ret = RING_SPACE(chan, 2);
if (ret) {
nouveau_channel_put(&chan);
return ret;
}
BEGIN_NV04(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1);
OUT_RING (chan, NvSw);
FIRE_RING (chan);
}
FIRE_RING(chan);
ret = fence->context_new(chan);
if (ret) {
nouveau_channel_put(&chan);
return ret;
}
nouveau_debugfs_channel_init(chan);
NV_DEBUG(dev, "channel %d initialised\n", chan->id);
if (fpriv) {
spin_lock(&fpriv->lock);
list_add(&chan->list, &fpriv->channels);
spin_unlock(&fpriv->lock);
}
*chan_ret = chan;
return 0;
}
struct nouveau_channel *
nouveau_channel_get_unlocked(struct nouveau_channel *ref)
{
struct nouveau_channel *chan = NULL;
if (likely(ref && atomic_inc_not_zero(&ref->users)))
nouveau_channel_ref(ref, &chan);
return chan;
}
struct nouveau_channel *
nouveau_channel_get(struct drm_file *file_priv, int id)
{
struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
struct nouveau_channel *chan;
spin_lock(&fpriv->lock);
list_for_each_entry(chan, &fpriv->channels, list) {
if (chan->id == id) {
chan = nouveau_channel_get_unlocked(chan);
spin_unlock(&fpriv->lock);
mutex_lock(&chan->mutex);
return chan;
}
}
spin_unlock(&fpriv->lock);
return ERR_PTR(-EINVAL);
}
void
nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
{
struct nouveau_channel *chan = *pchan;
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fence_priv *fence = dev_priv->fence.func;
unsigned long flags;
int i;
/* decrement the refcount, and we're done if there's still refs */
if (likely(!atomic_dec_and_test(&chan->users))) {
nouveau_channel_ref(NULL, pchan);
return;
}
/* no one wants the channel anymore */
NV_DEBUG(dev, "freeing channel %d\n", chan->id);
nouveau_debugfs_channel_fini(chan);
/* give it chance to idle */
nouveau_channel_idle(chan);
/* destroy the engine specific contexts */
for (i = NVOBJ_ENGINE_NR - 1; i >= 0; i--) {
if (chan->engctx[i])
dev_priv->eng[i]->context_del(chan, i);
}
if (chan->fence)
fence->context_del(chan);
/* aside from its resources, the channel should now be dead,
* remove it from the channel list
*/
spin_lock_irqsave(&dev_priv->channels.lock, flags);
nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]);
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
/* destroy any resources the channel owned */
nouveau_gpuobj_ref(NULL, &chan->pushbuf);
if (chan->pushbuf_bo) {
nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma);
nouveau_bo_unmap(chan->pushbuf_bo);
nouveau_bo_unpin(chan->pushbuf_bo);
nouveau_bo_ref(NULL, &chan->pushbuf_bo);
}
nouveau_ramht_ref(NULL, &chan->ramht, chan);
nouveau_notifier_takedown_channel(chan);
nouveau_gpuobj_channel_takedown(chan);
nouveau_channel_ref(NULL, pchan);
}
void
nouveau_channel_put(struct nouveau_channel **pchan)
{
mutex_unlock(&(*pchan)->mutex);
nouveau_channel_put_unlocked(pchan);
}
static void
nouveau_channel_del(struct kref *ref)
{
struct nouveau_channel *chan =
container_of(ref, struct nouveau_channel, ref);
kfree(chan);
}
void
nouveau_channel_ref(struct nouveau_channel *chan,
struct nouveau_channel **pchan)
{
if (chan)
kref_get(&chan->ref);
if (*pchan)
kref_put(&(*pchan)->ref, nouveau_channel_del);
*pchan = chan;
}
int
nouveau_channel_idle(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct nouveau_fence *fence = NULL;
int ret;
ret = nouveau_fence_new(chan, &fence);
if (!ret) {
ret = nouveau_fence_wait(fence, false, false);
nouveau_fence_unref(&fence);
}
if (ret)
NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
return ret;
}
/* cleans up all the fifos from file_priv */
void
nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
{
struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct nouveau_channel *chan;
int i;
if (!pfifo)
return;
NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
for (i = 0; i < pfifo->channels; i++) {
chan = nouveau_channel_get(file_priv, i);
if (IS_ERR(chan))
continue;
list_del(&chan->list);
atomic_dec(&chan->users);
nouveau_channel_put(&chan);
}
}

View File

@ -1,4 +1,5 @@
#include "nouveau_drm.h"
#include "nouveau_chan.h"
#include "nouveau_compat.h"
#include <subdev/bios.h>
@ -14,8 +15,6 @@
#include <subdev/bar.h>
#include <subdev/vm.h>
void *nouveau_newpriv(struct drm_device *);
int
nvdrm_gart_init(struct drm_device *dev, u64 *base, u64 *size)
{
@ -583,3 +582,28 @@ nvvm_lpg_shift(struct nouveau_vm *vm)
{
return vm->vmm->lpg_shift;
}
u64 nvgpuobj_addr(struct nouveau_object *object)
{
return nv_gpuobj(object)->addr;
}
struct drm_device *
nouveau_drv(void *ptr)
{
struct nouveau_drm *drm = ptr;
return drm->dev;
}
struct nouveau_channel *
nvdrm_channel(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_newpriv(dev);
return drm->channel;
}
struct mutex *
nvchan_mutex(struct nouveau_channel *chan)
{
return &chan->cli->mutex;
}

Some files were not shown because too many files have changed in this diff Show More