dect
/
linux-2.6
Archived
13
0
Fork 0

Merge branch 'drm-for-2.6.35' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-for-2.6.35' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (207 commits)
  drm/radeon/kms/pm/r600: select the mid clock mode for single head low profile
  drm/radeon: fix power supply kconfig interaction.
  drm/radeon/kms: record object that have been list reserved
  drm/radeon: AGP memory is only I/O if the aperture can be mapped by the CPU.
  drm/radeon/kms: don't default display priority to high on rs4xx
  drm/edid: fix typo in 1600x1200@75 mode
  drm/nouveau: fix i2c-related init table handlers
  drm/nouveau: support init table i2c device identifier 0x81
  drm/nouveau: ensure we've parsed i2c table entry for INIT_*I2C* handlers
  drm/nouveau: display error message for any failed init table opcode
  drm/nouveau: fix init table handlers to return proper error codes
  drm/nv50: support fractional feedback divider on newer chips
  drm/nv50: fix monitor detection on certain chipsets
  drm/nv50: store full dcb i2c entry from vbios
  drm/nv50: fix suspend/resume with DP outputs
  drm/nv50: output calculated crtc pll when debugging on
  drm/nouveau: dump pll limits entries when debugging is on
  drm/nouveau: bios parser fixes for eDP boards
  drm/nouveau: fix a nouveau_bo dereference after it's been destroyed
  drm/nv40: remove some completed ctxprog TODOs
  ...
This commit is contained in:
Linus Torvalds 2010-05-21 11:14:52 -07:00
commit 59534f7298
169 changed files with 15177 additions and 7261 deletions

View File

@ -14,7 +14,7 @@ DOCBOOKS := z8530book.xml mcabook.xml device-drivers.xml \
genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
mac80211.xml debugobjects.xml sh.xml regulator.xml \
alsa-driver-api.xml writing-an-alsa-driver.xml \
tracepoint.xml media.xml
tracepoint.xml media.xml drm.xml
###
# The build process is as follows (targets):

View File

@ -0,0 +1,839 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
<book id="drmDevelopersGuide">
<bookinfo>
<title>Linux DRM Developer's Guide</title>
<copyright>
<year>2008-2009</year>
<holder>
Intel Corporation (Jesse Barnes &lt;jesse.barnes@intel.com&gt;)
</holder>
</copyright>
<legalnotice>
<para>
The contents of this file may be used under the terms of the GNU
General Public License version 2 (the "GPL") as distributed in
the kernel source COPYING file.
</para>
</legalnotice>
</bookinfo>
<toc></toc>
<!-- Introduction -->
<chapter id="drmIntroduction">
<title>Introduction</title>
<para>
The Linux DRM layer contains code intended to support the needs
of complex graphics devices, usually containing programmable
pipelines well suited to 3D graphics acceleration. Graphics
drivers in the kernel can make use of DRM functions to make
tasks like memory management, interrupt handling and DMA easier,
and provide a uniform interface to applications.
</para>
<para>
A note on versions: this guide covers features found in the DRM
tree, including the TTM memory manager, output configuration and
mode setting, and the new vblank internals, in addition to all
the regular features found in current kernels.
</para>
<para>
[Insert diagram of typical DRM stack here]
</para>
</chapter>
<!-- Internals -->
<chapter id="drmInternals">
<title>DRM Internals</title>
<para>
This chapter documents DRM internals relevant to driver authors
and developers working to add support for the latest features to
existing drivers.
</para>
<para>
First, we'll go over some typical driver initialization
requirements, like setting up command buffers, creating an
initial output configuration, and initializing core services.
Subsequent sections will cover core internals in more detail,
providing implementation notes and examples.
</para>
<para>
The DRM layer provides several services to graphics drivers,
many of them driven by the application interfaces it provides
through libdrm, the library that wraps most of the DRM ioctls.
These include vblank event handling, memory
management, output management, framebuffer management, command
submission &amp; fencing, suspend/resume support, and DMA
services.
</para>
<para>
The core of every DRM driver is struct drm_device. Drivers
will typically statically initialize a drm_device structure,
then pass it to drm_init() at load time.
</para>
<!-- Internals: driver init -->
<sect1>
<title>Driver initialization</title>
<para>
Before calling the DRM initialization routines, the driver must
first create and fill out a struct drm_device structure.
</para>
<programlisting>
static struct drm_driver driver = {
/* don't use mtrr's here, the Xserver or user space app should
* deal with them for intel hardware.
*/
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_MODESET,
.load = i915_driver_load,
.unload = i915_driver_unload,
.firstopen = i915_driver_firstopen,
.lastclose = i915_driver_lastclose,
.preclose = i915_driver_preclose,
.save = i915_save,
.restore = i915_restore,
.device_is_agp = i915_driver_device_is_agp,
.get_vblank_counter = i915_get_vblank_counter,
.enable_vblank = i915_enable_vblank,
.disable_vblank = i915_disable_vblank,
.irq_preinstall = i915_driver_irq_preinstall,
.irq_postinstall = i915_driver_irq_postinstall,
.irq_uninstall = i915_driver_irq_uninstall,
.irq_handler = i915_driver_irq_handler,
.reclaim_buffers = drm_core_reclaim_buffers,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
.fb_probe = intelfb_probe,
.fb_remove = intelfb_remove,
.fb_resize = intelfb_resize,
.master_create = i915_master_create,
.master_destroy = i915_master_destroy,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = i915_debugfs_init,
.debugfs_cleanup = i915_debugfs_cleanup,
#endif
.gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
.gem_vm_ops = &amp;i915_gem_vm_ops,
.ioctls = i915_ioctls,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = i915_compat_ioctl,
#endif
},
.pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
},
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
};
</programlisting>
<para>
In the example above, taken from the i915 DRM driver, the driver
sets several flags indicating what core features it supports.
We'll go over the individual callbacks in later sections. Since
flags indicate which features your driver supports to the DRM
core, you need to set most of them prior to calling drm_init(). Some,
like DRIVER_MODESET can be set later based on user supplied parameters,
but that's the exception rather than the rule.
</para>
<variablelist>
<title>Driver flags</title>
<varlistentry>
<term>DRIVER_USE_AGP</term>
<listitem><para>
Driver uses AGP interface
</para></listitem>
</varlistentry>
<varlistentry>
<term>DRIVER_REQUIRE_AGP</term>
<listitem><para>
Driver needs AGP interface to function.
</para></listitem>
</varlistentry>
<varlistentry>
<term>DRIVER_USE_MTRR</term>
<listitem>
<para>
Driver uses MTRR interface for mapping memory. Deprecated.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>DRIVER_PCI_DMA</term>
<listitem><para>
Driver is capable of PCI DMA. Deprecated.
</para></listitem>
</varlistentry>
<varlistentry>
<term>DRIVER_SG</term>
<listitem><para>
Driver can perform scatter/gather DMA. Deprecated.
</para></listitem>
</varlistentry>
<varlistentry>
<term>DRIVER_HAVE_DMA</term>
<listitem><para>Driver supports DMA. Deprecated.</para></listitem>
</varlistentry>
<varlistentry>
<term>DRIVER_HAVE_IRQ</term><term>DRIVER_IRQ_SHARED</term>
<listitem>
<para>
DRIVER_HAVE_IRQ indicates whether the driver has a IRQ
handler, DRIVER_IRQ_SHARED indicates whether the device &amp;
handler support shared IRQs (note that this is required of
PCI drivers).
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>DRIVER_DMA_QUEUE</term>
<listitem>
<para>
If the driver queues DMA requests and completes them
asynchronously, this flag should be set. Deprecated.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>DRIVER_FB_DMA</term>
<listitem>
<para>
Driver supports DMA to/from the framebuffer. Deprecated.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>DRIVER_MODESET</term>
<listitem>
<para>
Driver supports mode setting interfaces.
</para>
</listitem>
</varlistentry>
</variablelist>
<para>
In this specific case, the driver requires AGP and supports
IRQs. DMA, as we'll see, is handled by device specific ioctls
in this case. It also supports the kernel mode setting APIs, though
unlike in the actual i915 driver source, this example unconditionally
exports KMS capability.
</para>
</sect1>
<!-- Internals: driver load -->
<sect1>
<title>Driver load</title>
<para>
In the previous section, we saw what a typical drm_driver
structure might look like. One of the more important fields in
the structure is the hook for the load function.
</para>
<programlisting>
static struct drm_driver driver = {
...
.load = i915_driver_load,
...
};
</programlisting>
<para>
The load function has many responsibilities: allocating a driver
private structure, specifying supported performance counters,
configuring the device (e.g. mapping registers &amp; command
buffers), initializing the memory manager, and setting up the
initial output configuration.
</para>
<para>
Note that the tasks performed at driver load time must not
conflict with DRM client requirements. For instance, if user
level mode setting drivers are in use, it would be problematic
to perform output discovery &amp; configuration at load time.
Likewise, if pre-memory management aware user level drivers are
in use, memory management and command buffer setup may need to
be omitted. These requirements are driver specific, and care
needs to be taken to keep both old and new applications and
libraries working. The i915 driver supports the "modeset"
module parameter to control whether advanced features are
enabled at load time or in legacy fashion. If compatibility is
a concern (e.g. with drivers converted over to the new interfaces
from the old ones), care must be taken to prevent incompatible
device initialization and control with the currently active
userspace drivers.
</para>
<sect2>
<title>Driver private &amp; performance counters</title>
<para>
The driver private hangs off the main drm_device structure and
can be used for tracking various device specific bits of
information, like register offsets, command buffer status,
register state for suspend/resume, etc. At load time, a
driver can simply allocate one and set drm_device.dev_priv
appropriately; at unload the driver can free it and set
drm_device.dev_priv to NULL.
</para>
<para>
The DRM supports several counters which can be used for rough
performance characterization. Note that the DRM stat counter
system is not often used by applications, and supporting
additional counters is completely optional.
</para>
<para>
These interfaces are deprecated and should not be used. If performance
monitoring is desired, the developer should investigate and
potentially enhance the kernel perf and tracing infrastructure to export
GPU related performance information to performance monitoring
tools and applications.
</para>
</sect2>
<sect2>
<title>Configuring the device</title>
<para>
Obviously, device configuration will be device specific.
However, there are several common operations: finding a
device's PCI resources, mapping them, and potentially setting
up an IRQ handler.
</para>
<para>
Finding &amp; mapping resources is fairly straightforward. The
DRM wrapper functions, drm_get_resource_start() and
drm_get_resource_len() can be used to find BARs on the given
drm_device struct. Once those values have been retrieved, the
driver load function can call drm_addmap() to create a new
mapping for the BAR in question. Note you'll probably want a
drm_local_map_t in your driver private structure to track any
mappings you create.
<!-- !Fdrivers/gpu/drm/drm_bufs.c drm_get_resource_* -->
<!-- !Finclude/drm/drmP.h drm_local_map_t -->
</para>
<para>
if compatibility with other operating systems isn't a concern
(DRM drivers can run under various BSD variants and OpenSolaris),
native Linux calls can be used for the above, e.g. pci_resource_*
and iomap*/iounmap. See the Linux device driver book for more
info.
</para>
<para>
Once you have a register map, you can use the DRM_READn() and
DRM_WRITEn() macros to access the registers on your device, or
use driver specific versions to offset into your MMIO space
relative to a driver specific base pointer (see I915_READ for
example).
</para>
<para>
If your device supports interrupt generation, you may want to
setup an interrupt handler at driver load time as well. This
is done using the drm_irq_install() function. If your device
supports vertical blank interrupts, it should call
drm_vblank_init() to initialize the core vblank handling code before
enabling interrupts on your device. This ensures the vblank related
structures are allocated and allows the core to handle vblank events.
</para>
<!--!Fdrivers/char/drm/drm_irq.c drm_irq_install-->
<para>
Once your interrupt handler is registered (it'll use your
drm_driver.irq_handler as the actual interrupt handling
function), you can safely enable interrupts on your device,
assuming any other state your interrupt handler uses is also
initialized.
</para>
<para>
Another task that may be necessary during configuration is
mapping the video BIOS. On many devices, the VBIOS describes
device configuration, LCD panel timings (if any), and contains
flags indicating device state. Mapping the BIOS can be done
using the pci_map_rom() call, a convenience function that
takes care of mapping the actual ROM, whether it has been
shadowed into memory (typically at address 0xc0000) or exists
on the PCI device in the ROM BAR. Note that once you've
mapped the ROM and extracted any necessary information, be
sure to unmap it; on many devices the ROM address decoder is
shared with other BARs, so leaving it mapped can cause
undesired behavior like hangs or memory corruption.
<!--!Fdrivers/pci/rom.c pci_map_rom-->
</para>
</sect2>
<sect2>
<title>Memory manager initialization</title>
<para>
In order to allocate command buffers, cursor memory, scanout
buffers, etc., as well as support the latest features provided
by packages like Mesa and the X.Org X server, your driver
should support a memory manager.
</para>
<para>
If your driver supports memory management (it should!), you'll
need to set that up at load time as well. How you intialize
it depends on which memory manager you're using, TTM or GEM.
</para>
<sect3>
<title>TTM initialization</title>
<para>
TTM (for Translation Table Manager) manages video memory and
aperture space for graphics devices. TTM supports both UMA devices
and devices with dedicated video RAM (VRAM), i.e. most discrete
graphics devices. If your device has dedicated RAM, supporting
TTM is desireable. TTM also integrates tightly with your
driver specific buffer execution function. See the radeon
driver for examples.
</para>
<para>
The core TTM structure is the ttm_bo_driver struct. It contains
several fields with function pointers for initializing the TTM,
allocating and freeing memory, waiting for command completion
and fence synchronization, and memory migration. See the
radeon_ttm.c file for an example of usage.
</para>
<para>
The ttm_global_reference structure is made up of several fields:
</para>
<programlisting>
struct ttm_global_reference {
enum ttm_global_types global_type;
size_t size;
void *object;
int (*init) (struct ttm_global_reference *);
void (*release) (struct ttm_global_reference *);
};
</programlisting>
<para>
There should be one global reference structure for your memory
manager as a whole, and there will be others for each object
created by the memory manager at runtime. Your global TTM should
have a type of TTM_GLOBAL_TTM_MEM. The size field for the global
object should be sizeof(struct ttm_mem_global), and the init and
release hooks should point at your driver specific init and
release routines, which will probably eventually call
ttm_mem_global_init and ttm_mem_global_release respectively.
</para>
<para>
Once your global TTM accounting structure is set up and initialized
(done by calling ttm_global_item_ref on the global object you
just created), you'll need to create a buffer object TTM to
provide a pool for buffer object allocation by clients and the
kernel itself. The type of this object should be TTM_GLOBAL_TTM_BO,
and its size should be sizeof(struct ttm_bo_global). Again,
driver specific init and release functions can be provided,
likely eventually calling ttm_bo_global_init and
ttm_bo_global_release, respectively. Also like the previous
object, ttm_global_item_ref is used to create an initial reference
count for the TTM, which will call your initalization function.
</para>
</sect3>
<sect3>
<title>GEM initialization</title>
<para>
GEM is an alternative to TTM, designed specifically for UMA
devices. It has simpler initialization and execution requirements
than TTM, but has no VRAM management capability. Core GEM
initialization is comprised of a basic drm_mm_init call to create
a GTT DRM MM object, which provides an address space pool for
object allocation. In a KMS configuration, the driver will
need to allocate and initialize a command ring buffer following
basic GEM initialization. Most UMA devices have a so-called
"stolen" memory region, which provides space for the initial
framebuffer and large, contiguous memory regions required by the
device. This space is not typically managed by GEM, and must
be initialized separately into its own DRM MM object.
</para>
<para>
Initialization will be driver specific, and will depend on
the architecture of the device. In the case of Intel
integrated graphics chips like 965GM, GEM initialization can
be done by calling the internal GEM init function,
i915_gem_do_init(). Since the 965GM is a UMA device
(i.e. it doesn't have dedicated VRAM), GEM will manage
making regular RAM available for GPU operations. Memory set
aside by the BIOS (called "stolen" memory by the i915
driver) will be managed by the DRM memrange allocator; the
rest of the aperture will be managed by GEM.
<programlisting>
/* Basic memrange allocator for stolen space (aka vram) */
drm_memrange_init(&amp;dev_priv->vram, 0, prealloc_size);
/* Let GEM Manage from end of prealloc space to end of aperture */
i915_gem_do_init(dev, prealloc_size, agp_size);
</programlisting>
<!--!Edrivers/char/drm/drm_memrange.c-->
</para>
<para>
Once the memory manager has been set up, we can allocate the
command buffer. In the i915 case, this is also done with a
GEM function, i915_gem_init_ringbuffer().
</para>
</sect3>
</sect2>
<sect2>
<title>Output configuration</title>
<para>
The final initialization task is output configuration. This involves
finding and initializing the CRTCs, encoders and connectors
for your device, creating an initial configuration and
registering a framebuffer console driver.
</para>
<sect3>
<title>Output discovery and initialization</title>
<para>
Several core functions exist to create CRTCs, encoders and
connectors, namely drm_crtc_init(), drm_connector_init() and
drm_encoder_init(), along with several "helper" functions to
perform common tasks.
</para>
<para>
Connectors should be registered with sysfs once they've been
detected and initialized, using the
drm_sysfs_connector_add() function. Likewise, when they're
removed from the system, they should be destroyed with
drm_sysfs_connector_remove().
</para>
<programlisting>
<![CDATA[
void intel_crt_init(struct drm_device *dev)
{
struct drm_connector *connector;
struct intel_output *intel_output;
intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
if (!intel_output)
return;
connector = &intel_output->base;
drm_connector_init(dev, &intel_output->base,
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs,
DRM_MODE_ENCODER_DAC);
drm_mode_connector_attach_encoder(&intel_output->base,
&intel_output->enc);
/* Set up the DDC bus. */
intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A");
if (!intel_output->ddc_bus) {
dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
"failed.\n");
return;
}
intel_output->type = INTEL_OUTPUT_ANALOG;
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs);
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
drm_sysfs_connector_add(connector);
}
]]>
</programlisting>
<para>
In the example above (again, taken from the i915 driver), a
CRT connector and encoder combination is created. A device
specific i2c bus is also created, for fetching EDID data and
performing monitor detection. Once the process is complete,
the new connector is regsitered with sysfs, to make its
properties available to applications.
</para>
<sect4>
<title>Helper functions and core functions</title>
<para>
Since many PC-class graphics devices have similar display output
designs, the DRM provides a set of helper functions to make
output management easier. The core helper routines handle
encoder re-routing and disabling of unused functions following
mode set. Using the helpers is optional, but recommended for
devices with PC-style architectures (i.e. a set of display planes
for feeding pixels to encoders which are in turn routed to
connectors). Devices with more complex requirements needing
finer grained management can opt to use the core callbacks
directly.
</para>
<para>
[Insert typical diagram here.] [Insert OMAP style config here.]
</para>
</sect4>
<para>
For each encoder, CRTC and connector, several functions must
be provided, depending on the object type. Encoder objects
need should provide a DPMS (basically on/off) function, mode fixup
(for converting requested modes into native hardware timings),
and prepare, set and commit functions for use by the core DRM
helper functions. Connector helpers need to provide mode fetch and
validity functions as well as an encoder matching function for
returing an ideal encoder for a given connector. The core
connector functions include a DPMS callback, (deprecated)
save/restore routines, detection, mode probing, property handling,
and cleanup functions.
</para>
<!--!Edrivers/char/drm/drm_crtc.h-->
<!--!Edrivers/char/drm/drm_crtc.c-->
<!--!Edrivers/char/drm/drm_crtc_helper.c-->
</sect3>
</sect2>
</sect1>
<!-- Internals: vblank handling -->
<sect1>
<title>VBlank event handling</title>
<para>
The DRM core exposes two vertical blank related ioctls:
DRM_IOCTL_WAIT_VBLANK and DRM_IOCTL_MODESET_CTL.
<!--!Edrivers/char/drm/drm_irq.c-->
</para>
<para>
DRM_IOCTL_WAIT_VBLANK takes a struct drm_wait_vblank structure
as its argument, and is used to block or request a signal when a
specified vblank event occurs.
</para>
<para>
DRM_IOCTL_MODESET_CTL should be called by application level
drivers before and after mode setting, since on many devices the
vertical blank counter will be reset at that time. Internally,
the DRM snapshots the last vblank count when the ioctl is called
with the _DRM_PRE_MODESET command so that the counter won't go
backwards (which is dealt with when _DRM_POST_MODESET is used).
</para>
<para>
To support the functions above, the DRM core provides several
helper functions for tracking vertical blank counters, and
requires drivers to provide several callbacks:
get_vblank_counter(), enable_vblank() and disable_vblank(). The
core uses get_vblank_counter() to keep the counter accurate
across interrupt disable periods. It should return the current
vertical blank event count, which is often tracked in a device
register. The enable and disable vblank callbacks should enable
and disable vertical blank interrupts, respectively. In the
absence of DRM clients waiting on vblank events, the core DRM
code will use the disable_vblank() function to disable
interrupts, which saves power. They'll be re-enabled again when
a client calls the vblank wait ioctl above.
</para>
<para>
Devices that don't provide a count register can simply use an
internal atomic counter incremented on every vertical blank
interrupt, and can make their enable and disable vblank
functions into no-ops.
</para>
</sect1>
<sect1>
<title>Memory management</title>
<para>
The memory manager lies at the heart of many DRM operations, and
is also required to support advanced client features like OpenGL
pbuffers. The DRM currently contains two memory managers, TTM
and GEM.
</para>
<sect2>
<title>The Translation Table Manager (TTM)</title>
<para>
TTM was developed by Tungsten Graphics, primarily by Thomas
Hellström, and is intended to be a flexible, high performance
graphics memory manager.
</para>
<para>
Drivers wishing to support TTM must fill out a drm_bo_driver
structure.
</para>
<para>
TTM design background and information belongs here.
</para>
</sect2>
<sect2>
<title>The Graphics Execution Manager (GEM)</title>
<para>
GEM is an Intel project, authored by Eric Anholt and Keith
Packard. It provides simpler interfaces than TTM, and is well
suited for UMA devices.
</para>
<para>
GEM-enabled drivers must provide gem_init_object() and
gem_free_object() callbacks to support the core memory
allocation routines. They should also provide several driver
specific ioctls to support command execution, pinning, buffer
read &amp; write, mapping, and domain ownership transfers.
</para>
<para>
On a fundamental level, GEM involves several operations: memory
allocation and freeing, command execution, and aperture management
at command execution time. Buffer object allocation is relatively
straightforward and largely provided by Linux's shmem layer, which
provides memory to back each object. When mapped into the GTT
or used in a command buffer, the backing pages for an object are
flushed to memory and marked write combined so as to be coherent
with the GPU. Likewise, when the GPU finishes rendering to an object,
if the CPU accesses it, it must be made coherent with the CPU's view
of memory, usually involving GPU cache flushing of various kinds.
This core CPU&lt;-&gt;GPU coherency management is provided by the GEM
set domain function, which evaluates an object's current domain and
performs any necessary flushing or synchronization to put the object
into the desired coherency domain (note that the object may be busy,
i.e. an active render target; in that case the set domain function
will block the client and wait for rendering to complete before
performing any necessary flushing operations).
</para>
<para>
Perhaps the most important GEM function is providing a command
execution interface to clients. Client programs construct command
buffers containing references to previously allocated memory objects
and submit them to GEM. At that point, GEM will take care to bind
all the objects into the GTT, execute the buffer, and provide
necessary synchronization between clients accessing the same buffers.
This often involves evicting some objects from the GTT and re-binding
others (a fairly expensive operation), and providing relocation
support which hides fixed GTT offsets from clients. Clients must
take care not to submit command buffers that reference more objects
than can fit in the GTT or GEM will reject them and no rendering
will occur. Similarly, if several objects in the buffer require
fence registers to be allocated for correct rendering (e.g. 2D blits
on pre-965 chips), care must be taken not to require more fence
registers than are available to the client. Such resource management
should be abstracted from the client in libdrm.
</para>
</sect2>
</sect1>
<!-- Output management -->
<sect1>
<title>Output management</title>
<para>
At the core of the DRM output management code is a set of
structures representing CRTCs, encoders and connectors.
</para>
<para>
A CRTC is an abstraction representing a part of the chip that
contains a pointer to a scanout buffer. Therefore, the number
of CRTCs available determines how many independent scanout
buffers can be active at any given time. The CRTC structure
contains several fields to support this: a pointer to some video
memory, a display mode, and an (x, y) offset into the video
memory to support panning or configurations where one piece of
video memory spans multiple CRTCs.
</para>
<para>
An encoder takes pixel data from a CRTC and converts it to a
format suitable for any attached connectors. On some devices,
it may be possible to have a CRTC send data to more than one
encoder. In that case, both encoders would receive data from
the same scanout buffer, resulting in a "cloned" display
configuration across the connectors attached to each encoder.
</para>
<para>
A connector is the final destination for pixel data on a device,
and usually connects directly to an external display device like
a monitor or laptop panel. A connector can only be attached to
one encoder at a time. The connector is also the structure
where information about the attached display is kept, so it
contains fields for display data, EDID data, DPMS &amp;
connection status, and information about modes supported on the
attached displays.
</para>
<!--!Edrivers/char/drm/drm_crtc.c-->
</sect1>
<sect1>
<title>Framebuffer management</title>
<para>
In order to set a mode on a given CRTC, encoder and connector
configuration, clients need to provide a framebuffer object which
will provide a source of pixels for the CRTC to deliver to the encoder(s)
and ultimately the connector(s) in the configuration. A framebuffer
is fundamentally a driver specific memory object, made into an opaque
handle by the DRM addfb function. Once an fb has been created this
way it can be passed to the KMS mode setting routines for use in
a configuration.
</para>
</sect1>
<sect1>
<title>Command submission &amp; fencing</title>
<para>
This should cover a few device specific command submission
implementations.
</para>
</sect1>
<sect1>
<title>Suspend/resume</title>
<para>
The DRM core provides some suspend/resume code, but drivers
wanting full suspend/resume support should provide save() and
restore() functions. These will be called at suspend,
hibernate, or resume time, and should perform any state save or
restore required by your device across suspend or hibernate
states.
</para>
</sect1>
<sect1>
<title>DMA services</title>
<para>
This should cover how DMA mapping etc. is supported by the core.
These functions are deprecated and should not be used.
</para>
</sect1>
</chapter>
<!-- External interfaces -->
<chapter id="drmExternals">
<title>Userland interfaces</title>
<para>
The DRM core exports several interfaces to applications,
generally intended to be used through corresponding libdrm
wrapper functions. In addition, drivers export device specific
interfaces for use by userspace drivers &amp; device aware
applications through ioctls and sysfs files.
</para>
<para>
External interfaces include: memory mapping, context management,
DMA operations, AGP management, vblank control, fence
management, memory management, and output management.
</para>
<para>
Cover generic ioctls and sysfs layout here. Only need high
level info, since man pages will cover the rest.
</para>
</chapter>
<!-- API reference -->
<appendix id="drmDriverApi">
<title>DRM Driver API</title>
<para>
Include auto-generated API reference here (need to reference it
from paragraphs above too).
</para>
</appendix>
</book>

View File

@ -145,9 +145,11 @@ int set_memory_np(unsigned long addr, int numpages);
int set_memory_4k(unsigned long addr, int numpages);
int set_memory_array_uc(unsigned long *addr, int addrinarray);
int set_memory_array_wc(unsigned long *addr, int addrinarray);
int set_memory_array_wb(unsigned long *addr, int addrinarray);
int set_pages_array_uc(struct page **pages, int addrinarray);
int set_pages_array_wc(struct page **pages, int addrinarray);
int set_pages_array_wb(struct page **pages, int addrinarray);
/*

View File

@ -997,7 +997,8 @@ out_err:
}
EXPORT_SYMBOL(set_memory_uc);
int set_memory_array_uc(unsigned long *addr, int addrinarray)
int _set_memory_array(unsigned long *addr, int addrinarray,
unsigned long new_type)
{
int i, j;
int ret;
@ -1007,13 +1008,19 @@ int set_memory_array_uc(unsigned long *addr, int addrinarray)
*/
for (i = 0; i < addrinarray; i++) {
ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE,
_PAGE_CACHE_UC_MINUS, NULL);
new_type, NULL);
if (ret)
goto out_free;
}
ret = change_page_attr_set(addr, addrinarray,
__pgprot(_PAGE_CACHE_UC_MINUS), 1);
if (!ret && new_type == _PAGE_CACHE_WC)
ret = change_page_attr_set_clr(addr, addrinarray,
__pgprot(_PAGE_CACHE_WC),
__pgprot(_PAGE_CACHE_MASK),
0, CPA_ARRAY, NULL);
if (ret)
goto out_free;
@ -1025,8 +1032,19 @@ out_free:
return ret;
}
int set_memory_array_uc(unsigned long *addr, int addrinarray)
{
return _set_memory_array(addr, addrinarray, _PAGE_CACHE_UC_MINUS);
}
EXPORT_SYMBOL(set_memory_array_uc);
int set_memory_array_wc(unsigned long *addr, int addrinarray)
{
return _set_memory_array(addr, addrinarray, _PAGE_CACHE_WC);
}
EXPORT_SYMBOL(set_memory_array_wc);
int _set_memory_wc(unsigned long addr, int numpages)
{
int ret;
@ -1153,26 +1171,34 @@ int set_pages_uc(struct page *page, int numpages)
}
EXPORT_SYMBOL(set_pages_uc);
int set_pages_array_uc(struct page **pages, int addrinarray)
static int _set_pages_array(struct page **pages, int addrinarray,
unsigned long new_type)
{
unsigned long start;
unsigned long end;
int i;
int free_idx;
int ret;
for (i = 0; i < addrinarray; i++) {
if (PageHighMem(pages[i]))
continue;
start = page_to_pfn(pages[i]) << PAGE_SHIFT;
end = start + PAGE_SIZE;
if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL))
if (reserve_memtype(start, end, new_type, NULL))
goto err_out;
}
if (cpa_set_pages_array(pages, addrinarray,
__pgprot(_PAGE_CACHE_UC_MINUS)) == 0) {
return 0; /* Success */
}
ret = cpa_set_pages_array(pages, addrinarray,
__pgprot(_PAGE_CACHE_UC_MINUS));
if (!ret && new_type == _PAGE_CACHE_WC)
ret = change_page_attr_set_clr(NULL, addrinarray,
__pgprot(_PAGE_CACHE_WC),
__pgprot(_PAGE_CACHE_MASK),
0, CPA_PAGES_ARRAY, pages);
if (ret)
goto err_out;
return 0; /* Success */
err_out:
free_idx = i;
for (i = 0; i < free_idx; i++) {
@ -1184,8 +1210,19 @@ err_out:
}
return -EINVAL;
}
int set_pages_array_uc(struct page **pages, int addrinarray)
{
return _set_pages_array(pages, addrinarray, _PAGE_CACHE_UC_MINUS);
}
EXPORT_SYMBOL(set_pages_array_uc);
int set_pages_array_wc(struct page **pages, int addrinarray)
{
return _set_pages_array(pages, addrinarray, _PAGE_CACHE_WC);
}
EXPORT_SYMBOL(set_pages_array_wc);
int set_pages_wb(struct page *page, int numpages)
{
unsigned long addr = (unsigned long)page_address(page);

View File

@ -178,86 +178,6 @@ struct agp_bridge_data {
#define PGE_EMPTY(b, p) (!(p) || (p) == (unsigned long) (b)->scratch_page)
/* Intel registers */
#define INTEL_APSIZE 0xb4
#define INTEL_ATTBASE 0xb8
#define INTEL_AGPCTRL 0xb0
#define INTEL_NBXCFG 0x50
#define INTEL_ERRSTS 0x91
/* Intel i830 registers */
#define I830_GMCH_CTRL 0x52
#define I830_GMCH_ENABLED 0x4
#define I830_GMCH_MEM_MASK 0x1
#define I830_GMCH_MEM_64M 0x1
#define I830_GMCH_MEM_128M 0
#define I830_GMCH_GMS_MASK 0x70
#define I830_GMCH_GMS_DISABLED 0x00
#define I830_GMCH_GMS_LOCAL 0x10
#define I830_GMCH_GMS_STOLEN_512 0x20
#define I830_GMCH_GMS_STOLEN_1024 0x30
#define I830_GMCH_GMS_STOLEN_8192 0x40
#define I830_RDRAM_CHANNEL_TYPE 0x03010
#define I830_RDRAM_ND(x) (((x) & 0x20) >> 5)
#define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3)
/* This one is for I830MP w. an external graphic card */
#define INTEL_I830_ERRSTS 0x92
/* Intel 855GM/852GM registers */
#define I855_GMCH_GMS_MASK 0xF0
#define I855_GMCH_GMS_STOLEN_0M 0x0
#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
#define I85X_CAPID 0x44
#define I85X_VARIANT_MASK 0x7
#define I85X_VARIANT_SHIFT 5
#define I855_GME 0x0
#define I855_GM 0x4
#define I852_GME 0x2
#define I852_GM 0x5
/* Intel i845 registers */
#define INTEL_I845_AGPM 0x51
#define INTEL_I845_ERRSTS 0xc8
/* Intel i860 registers */
#define INTEL_I860_MCHCFG 0x50
#define INTEL_I860_ERRSTS 0xc8
/* Intel i810 registers */
#define I810_GMADDR 0x10
#define I810_MMADDR 0x14
#define I810_PTE_BASE 0x10000
#define I810_PTE_MAIN_UNCACHED 0x00000000
#define I810_PTE_LOCAL 0x00000002
#define I810_PTE_VALID 0x00000001
#define I830_PTE_SYSTEM_CACHED 0x00000006
#define I810_SMRAM_MISCC 0x70
#define I810_GFX_MEM_WIN_SIZE 0x00010000
#define I810_GFX_MEM_WIN_32M 0x00010000
#define I810_GMS 0x000000c0
#define I810_GMS_DISABLE 0x00000000
#define I810_PGETBL_CTL 0x2020
#define I810_PGETBL_ENABLED 0x00000001
#define I965_PGETBL_SIZE_MASK 0x0000000e
#define I965_PGETBL_SIZE_512KB (0 << 1)
#define I965_PGETBL_SIZE_256KB (1 << 1)
#define I965_PGETBL_SIZE_128KB (2 << 1)
#define I965_PGETBL_SIZE_1MB (3 << 1)
#define I965_PGETBL_SIZE_2MB (4 << 1)
#define I965_PGETBL_SIZE_1_5MB (5 << 1)
#define G33_PGETBL_SIZE_MASK (3 << 8)
#define G33_PGETBL_SIZE_1M (1 << 8)
#define G33_PGETBL_SIZE_2M (2 << 8)
#define I810_DRAM_CTL 0x3000
#define I810_DRAM_ROW_0 0x00000001
#define I810_DRAM_ROW_0_SDRAM 0x00000001
struct agp_device_ids {
unsigned short device_id; /* first, to make table easier to read */
enum chipset_type chipset;

View File

@ -204,6 +204,7 @@ static const struct agp_bridge_driver ali_generic_bridge = {
.aperture_sizes = ali_generic_sizes,
.size_type = U32_APER_SIZE,
.num_aperture_sizes = 7,
.needs_scratch_page = true,
.configure = ali_configure,
.fetch_size = ali_fetch_size,
.cleanup = ali_cleanup,

View File

@ -142,6 +142,7 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge)
{
struct aper_size_info_lvl2 *value;
struct amd_page_map page_dir;
unsigned long __iomem *cur_gatt;
unsigned long addr;
int retval;
u32 temp;
@ -178,6 +179,13 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge)
readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
}
for (i = 0; i < value->num_entries; i++) {
addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
}
return 0;
}
@ -375,6 +383,7 @@ static const struct agp_bridge_driver amd_irongate_driver = {
.aperture_sizes = amd_irongate_sizes,
.size_type = LVL2_APER_SIZE,
.num_aperture_sizes = 7,
.needs_scratch_page = true,
.configure = amd_irongate_configure,
.fetch_size = amd_irongate_fetch_size,
.cleanup = amd_irongate_cleanup,

View File

@ -210,6 +210,7 @@ static const struct agp_bridge_driver amd_8151_driver = {
.aperture_sizes = amd_8151_sizes,
.size_type = U32_APER_SIZE,
.num_aperture_sizes = 7,
.needs_scratch_page = true,
.configure = amd_8151_configure,
.fetch_size = amd64_fetch_size,
.cleanup = amd64_cleanup,
@ -499,6 +500,10 @@ static int __devinit agp_amd64_probe(struct pci_dev *pdev,
u8 cap_ptr;
int err;
/* The Highlander principle */
if (agp_bridges_found)
return -ENODEV;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
if (!cap_ptr)
return -ENODEV;
@ -562,6 +567,8 @@ static void __devexit agp_amd64_remove(struct pci_dev *pdev)
amd64_aperture_sizes[bridge->aperture_size_idx].size);
agp_remove_bridge(bridge);
agp_put_bridge(bridge);
agp_bridges_found--;
}
#ifdef CONFIG_PM
@ -709,6 +716,11 @@ static struct pci_device_id agp_amd64_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
static DEFINE_PCI_DEVICE_TABLE(agp_amd64_pci_promisc_table) = {
{ PCI_DEVICE_CLASS(0, 0) },
{ }
};
static struct pci_driver agp_amd64_pci_driver = {
.name = "agpgart-amd64",
.id_table = agp_amd64_pci_table,
@ -734,7 +746,6 @@ int __init agp_amd64_init(void)
return err;
if (agp_bridges_found == 0) {
struct pci_dev *dev;
if (!agp_try_unsupported && !agp_try_unsupported_boot) {
printk(KERN_INFO PFX "No supported AGP bridge found.\n");
#ifdef MODULE
@ -750,17 +761,10 @@ int __init agp_amd64_init(void)
return -ENODEV;
/* Look for any AGP bridge */
dev = NULL;
err = -ENODEV;
for_each_pci_dev(dev) {
if (!pci_find_capability(dev, PCI_CAP_ID_AGP))
continue;
/* Only one bridge supported right now */
if (agp_amd64_probe(dev, NULL) == 0) {
err = 0;
break;
}
}
agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
err = driver_attach(&agp_amd64_pci_driver.driver);
if (err == 0 && agp_bridges_found == 0)
err = -ENODEV;
}
return err;
}

View File

@ -341,6 +341,7 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge)
{
struct aper_size_info_lvl2 *value;
struct ati_page_map page_dir;
unsigned long __iomem *cur_gatt;
unsigned long addr;
int retval;
u32 temp;
@ -395,6 +396,12 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge)
readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
}
for (i = 0; i < value->num_entries; i++) {
addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
}
return 0;
}
@ -415,6 +422,7 @@ static const struct agp_bridge_driver ati_generic_bridge = {
.aperture_sizes = ati_generic_sizes,
.size_type = LVL2_APER_SIZE,
.num_aperture_sizes = 7,
.needs_scratch_page = true,
.configure = ati_configure,
.fetch_size = ati_fetch_size,
.cleanup = ati_cleanup,

View File

@ -28,6 +28,7 @@
#include <linux/page-flags.h>
#include <linux/mm.h>
#include "agp.h"
#include "intel-agp.h"
/*
* The real differences to the generic AGP code is

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,239 @@
/*
* Common Intel AGPGART and GTT definitions.
*/
/* Intel registers */
#define INTEL_APSIZE 0xb4
#define INTEL_ATTBASE 0xb8
#define INTEL_AGPCTRL 0xb0
#define INTEL_NBXCFG 0x50
#define INTEL_ERRSTS 0x91
/* Intel i830 registers */
#define I830_GMCH_CTRL 0x52
#define I830_GMCH_ENABLED 0x4
#define I830_GMCH_MEM_MASK 0x1
#define I830_GMCH_MEM_64M 0x1
#define I830_GMCH_MEM_128M 0
#define I830_GMCH_GMS_MASK 0x70
#define I830_GMCH_GMS_DISABLED 0x00
#define I830_GMCH_GMS_LOCAL 0x10
#define I830_GMCH_GMS_STOLEN_512 0x20
#define I830_GMCH_GMS_STOLEN_1024 0x30
#define I830_GMCH_GMS_STOLEN_8192 0x40
#define I830_RDRAM_CHANNEL_TYPE 0x03010
#define I830_RDRAM_ND(x) (((x) & 0x20) >> 5)
#define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3)
/* This one is for I830MP w. an external graphic card */
#define INTEL_I830_ERRSTS 0x92
/* Intel 855GM/852GM registers */
#define I855_GMCH_GMS_MASK 0xF0
#define I855_GMCH_GMS_STOLEN_0M 0x0
#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
#define I85X_CAPID 0x44
#define I85X_VARIANT_MASK 0x7
#define I85X_VARIANT_SHIFT 5
#define I855_GME 0x0
#define I855_GM 0x4
#define I852_GME 0x2
#define I852_GM 0x5
/* Intel i845 registers */
#define INTEL_I845_AGPM 0x51
#define INTEL_I845_ERRSTS 0xc8
/* Intel i860 registers */
#define INTEL_I860_MCHCFG 0x50
#define INTEL_I860_ERRSTS 0xc8
/* Intel i810 registers */
#define I810_GMADDR 0x10
#define I810_MMADDR 0x14
#define I810_PTE_BASE 0x10000
#define I810_PTE_MAIN_UNCACHED 0x00000000
#define I810_PTE_LOCAL 0x00000002
#define I810_PTE_VALID 0x00000001
#define I830_PTE_SYSTEM_CACHED 0x00000006
#define I810_SMRAM_MISCC 0x70
#define I810_GFX_MEM_WIN_SIZE 0x00010000
#define I810_GFX_MEM_WIN_32M 0x00010000
#define I810_GMS 0x000000c0
#define I810_GMS_DISABLE 0x00000000
#define I810_PGETBL_CTL 0x2020
#define I810_PGETBL_ENABLED 0x00000001
#define I965_PGETBL_SIZE_MASK 0x0000000e
#define I965_PGETBL_SIZE_512KB (0 << 1)
#define I965_PGETBL_SIZE_256KB (1 << 1)
#define I965_PGETBL_SIZE_128KB (2 << 1)
#define I965_PGETBL_SIZE_1MB (3 << 1)
#define I965_PGETBL_SIZE_2MB (4 << 1)
#define I965_PGETBL_SIZE_1_5MB (5 << 1)
#define G33_PGETBL_SIZE_MASK (3 << 8)
#define G33_PGETBL_SIZE_1M (1 << 8)
#define G33_PGETBL_SIZE_2M (2 << 8)
#define I810_DRAM_CTL 0x3000
#define I810_DRAM_ROW_0 0x00000001
#define I810_DRAM_ROW_0_SDRAM 0x00000001
/* Intel 815 register */
#define INTEL_815_APCONT 0x51
#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF
/* Intel i820 registers */
#define INTEL_I820_RDCR 0x51
#define INTEL_I820_ERRSTS 0xc8
/* Intel i840 registers */
#define INTEL_I840_MCHCFG 0x50
#define INTEL_I840_ERRSTS 0xc8
/* Intel i850 registers */
#define INTEL_I850_MCHCFG 0x50
#define INTEL_I850_ERRSTS 0xc8
/* intel 915G registers */
#define I915_GMADDR 0x18
#define I915_MMADDR 0x10
#define I915_PTEADDR 0x1C
#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
#define I915_IFPADDR 0x60
/* Intel 965G registers */
#define I965_MSAC 0x62
#define I965_IFPADDR 0x70
/* Intel 7505 registers */
#define INTEL_I7505_APSIZE 0x74
#define INTEL_I7505_NCAPID 0x60
#define INTEL_I7505_NISTAT 0x6c
#define INTEL_I7505_ATTBASE 0x78
#define INTEL_I7505_ERRSTS 0x42
#define INTEL_I7505_AGPCTRL 0x70
#define INTEL_I7505_MCHCFG 0x50
#define SNB_GMCH_CTRL 0x50
#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
#define SNB_GTT_SIZE_0M (0 << 8)
#define SNB_GTT_SIZE_1M (1 << 8)
#define SNB_GTT_SIZE_2M (2 << 8)
#define SNB_GTT_SIZE_MASK (3 << 8)
/* pci devices ids */
#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
#define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972
#define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980
#define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982
#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990
#define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992
#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0
#define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2
#define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00
#define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02
#define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10
#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12
#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB 0xA010
#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG 0xA011
#define PCI_DEVICE_ID_INTEL_PINEVIEW_HB 0xA000
#define PCI_DEVICE_ID_INTEL_PINEVIEW_IG 0xA001
#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2
#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2
#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0
#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40
#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00
#define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG 0x2E02
#define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10
#define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12
#define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20
#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22
#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040
#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042
#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106
/* cover 915 and 945 variants */
#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB)
#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB)
#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
IS_SNB)

1516
drivers/char/agp/intel-gtt.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -310,6 +310,7 @@ static const struct agp_bridge_driver nvidia_driver = {
.aperture_sizes = nvidia_generic_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 5,
.needs_scratch_page = true,
.configure = nvidia_configure,
.fetch_size = nvidia_fetch_size,
.cleanup = nvidia_cleanup,

View File

@ -125,6 +125,7 @@ static struct agp_bridge_driver sis_driver = {
.aperture_sizes = sis_generic_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
.needs_scratch_page = true,
.configure = sis_configure,
.fetch_size = sis_fetch_size,
.cleanup = sis_cleanup,
@ -415,14 +416,6 @@ static struct pci_device_id agp_sis_pci_table[] = {
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_SI,
.device = PCI_DEVICE_ID_SI_760,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{ }
};

View File

@ -28,6 +28,7 @@
*/
static int uninorth_rev;
static int is_u3;
static u32 scratch_value;
#define DEFAULT_APERTURE_SIZE 256
#define DEFAULT_APERTURE_STRING "256"
@ -172,7 +173,7 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int ty
gp = (u32 *) &agp_bridge->gatt_table[pg_start];
for (i = 0; i < mem->page_count; ++i) {
if (gp[i]) {
if (gp[i] != scratch_value) {
dev_info(&agp_bridge->dev->dev,
"uninorth_insert_memory: entry 0x%x occupied (%x)\n",
i, gp[i]);
@ -214,8 +215,9 @@ int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
return 0;
gp = (u32 *) &agp_bridge->gatt_table[pg_start];
for (i = 0; i < mem->page_count; ++i)
gp[i] = 0;
for (i = 0; i < mem->page_count; ++i) {
gp[i] = scratch_value;
}
mb();
uninorth_tlbflush(mem);
@ -421,8 +423,13 @@ static int uninorth_create_gatt_table(struct agp_bridge_data *bridge)
bridge->gatt_bus_addr = virt_to_phys(table);
if (is_u3)
scratch_value = (page_to_phys(agp_bridge->scratch_page_page) >> PAGE_SHIFT) | 0x80000000UL;
else
scratch_value = cpu_to_le32((page_to_phys(agp_bridge->scratch_page_page) & 0xFFFFF000UL) |
0x1UL);
for (i = 0; i < num_entries; i++)
bridge->gatt_table[i] = 0;
bridge->gatt_table[i] = scratch_value;
return 0;
@ -519,6 +526,7 @@ const struct agp_bridge_driver uninorth_agp_driver = {
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
.cant_use_aperture = true,
.needs_scratch_page = true,
};
const struct agp_bridge_driver u3_agp_driver = {

View File

@ -175,6 +175,7 @@ static const struct agp_bridge_driver via_agp3_driver = {
.aperture_sizes = agp3_generic_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 10,
.needs_scratch_page = true,
.configure = via_configure_agp3,
.fetch_size = via_fetch_size_agp3,
.cleanup = via_cleanup_agp3,
@ -201,6 +202,7 @@ static const struct agp_bridge_driver via_driver = {
.aperture_sizes = via_generic_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 9,
.needs_scratch_page = true,
.configure = via_configure,
.fetch_size = via_fetch_size,
.cleanup = via_cleanup,

View File

@ -9,6 +9,7 @@ menuconfig DRM
depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU
select I2C
select I2C_ALGOBIT
select SLOW_WORK
help
Kernel-level support for the Direct Rendering Infrastructure (DRI)
introduced in XFree86 4.0. If you say Y here, you need to select
@ -59,6 +60,7 @@ config DRM_RADEON
select FW_LOADER
select DRM_KMS_HELPER
select DRM_TTM
select POWER_SUPPLY
help
Choose this option if you have an ATI Radeon graphics card. There
are both PCI and AGP versions. You don't need to choose this to

View File

@ -79,10 +79,9 @@ static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
struct drm_device *dev = master->minor->dev;
DRM_DEBUG("%d\n", magic);
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
memset(entry, 0, sizeof(*entry));
entry->priv = priv;
entry->hash_item.key = (unsigned long)magic;
mutex_lock(&dev->struct_mutex);

View File

@ -34,6 +34,7 @@
#include "drm.h"
#include "drmP.h"
#include "drm_crtc.h"
#include "drm_edid.h"
struct drm_prop_enum_list {
int type;
@ -494,7 +495,6 @@ void drm_connector_cleanup(struct drm_connector *connector)
list_for_each_entry_safe(mode, t, &connector->user_modes, head)
drm_mode_remove(connector, mode);
kfree(connector->fb_helper_private);
mutex_lock(&dev->mode_config.mutex);
drm_mode_object_put(dev, &connector->base);
list_del(&connector->head);
@ -858,7 +858,6 @@ void drm_mode_config_init(struct drm_device *dev)
mutex_init(&dev->mode_config.mutex);
mutex_init(&dev->mode_config.idr_mutex);
INIT_LIST_HEAD(&dev->mode_config.fb_list);
INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list);
INIT_LIST_HEAD(&dev->mode_config.crtc_list);
INIT_LIST_HEAD(&dev->mode_config.connector_list);
INIT_LIST_HEAD(&dev->mode_config.encoder_list);
@ -2350,7 +2349,7 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
struct edid *edid)
{
struct drm_device *dev = connector->dev;
int ret = 0;
int ret = 0, size;
if (connector->edid_blob_ptr)
drm_property_destroy_blob(dev, connector->edid_blob_ptr);
@ -2362,7 +2361,9 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
return ret;
}
connector->edid_blob_ptr = drm_property_create_blob(connector->dev, 128, edid);
size = EDID_LENGTH * (1 + edid->extensions);
connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
size, edid);
ret = drm_connector_property_set_value(connector,
dev->mode_config.edid_property,

View File

@ -55,7 +55,7 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
}
/**
* drm_helper_probe_connector_modes - get complete set of display modes
* drm_helper_probe_single_connector_modes - get complete set of display modes
* @dev: DRM device
* @maxX: max width for modes
* @maxY: max height for modes
@ -154,21 +154,6 @@ prune:
}
EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
int drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX,
uint32_t maxY)
{
struct drm_connector *connector;
int count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
count += drm_helper_probe_single_connector_modes(connector,
maxX, maxY);
}
return count;
}
EXPORT_SYMBOL(drm_helper_probe_connector_modes);
/**
* drm_helper_encoder_in_use - check if a given encoder is in use
* @encoder: encoder to check
@ -263,302 +248,6 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_helper_disable_unused_functions);
static struct drm_display_mode *drm_has_preferred_mode(struct drm_connector *connector, int width, int height)
{
struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->modes, head) {
if (drm_mode_width(mode) > width ||
drm_mode_height(mode) > height)
continue;
if (mode->type & DRM_MODE_TYPE_PREFERRED)
return mode;
}
return NULL;
}
static bool drm_has_cmdline_mode(struct drm_connector *connector)
{
struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
struct drm_fb_helper_cmdline_mode *cmdline_mode;
if (!fb_help_conn)
return false;
cmdline_mode = &fb_help_conn->cmdline_mode;
return cmdline_mode->specified;
}
static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_connector *connector, int width, int height)
{
struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
struct drm_fb_helper_cmdline_mode *cmdline_mode;
struct drm_display_mode *mode = NULL;
if (!fb_help_conn)
return mode;
cmdline_mode = &fb_help_conn->cmdline_mode;
if (cmdline_mode->specified == false)
return mode;
/* attempt to find a matching mode in the list of modes
* we have gotten so far, if not add a CVT mode that conforms
*/
if (cmdline_mode->rb || cmdline_mode->margins)
goto create_mode;
list_for_each_entry(mode, &connector->modes, head) {
/* check width/height */
if (mode->hdisplay != cmdline_mode->xres ||
mode->vdisplay != cmdline_mode->yres)
continue;
if (cmdline_mode->refresh_specified) {
if (mode->vrefresh != cmdline_mode->refresh)
continue;
}
if (cmdline_mode->interlace) {
if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
continue;
}
return mode;
}
create_mode:
mode = drm_cvt_mode(connector->dev, cmdline_mode->xres,
cmdline_mode->yres,
cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
cmdline_mode->rb, cmdline_mode->interlace,
cmdline_mode->margins);
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
list_add(&mode->head, &connector->modes);
return mode;
}
static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
{
bool enable;
if (strict) {
enable = connector->status == connector_status_connected;
} else {
enable = connector->status != connector_status_disconnected;
}
return enable;
}
static void drm_enable_connectors(struct drm_device *dev, bool *enabled)
{
bool any_enabled = false;
struct drm_connector *connector;
int i = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
enabled[i] = drm_connector_enabled(connector, true);
DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
enabled[i] ? "yes" : "no");
any_enabled |= enabled[i];
i++;
}
if (any_enabled)
return;
i = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
enabled[i] = drm_connector_enabled(connector, false);
i++;
}
}
static bool drm_target_preferred(struct drm_device *dev,
struct drm_display_mode **modes,
bool *enabled, int width, int height)
{
struct drm_connector *connector;
int i = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (enabled[i] == false) {
i++;
continue;
}
DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
connector->base.id);
/* got for command line mode first */
modes[i] = drm_pick_cmdline_mode(connector, width, height);
if (!modes[i]) {
DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
connector->base.id);
modes[i] = drm_has_preferred_mode(connector, width, height);
}
/* No preferred modes, pick one off the list */
if (!modes[i] && !list_empty(&connector->modes)) {
list_for_each_entry(modes[i], &connector->modes, head)
break;
}
DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
"none");
i++;
}
return true;
}
static int drm_pick_crtcs(struct drm_device *dev,
struct drm_crtc **best_crtcs,
struct drm_display_mode **modes,
int n, int width, int height)
{
int c, o;
struct drm_connector *connector;
struct drm_connector_helper_funcs *connector_funcs;
struct drm_encoder *encoder;
struct drm_crtc *best_crtc;
int my_score, best_score, score;
struct drm_crtc **crtcs, *crtc;
if (n == dev->mode_config.num_connector)
return 0;
c = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (c == n)
break;
c++;
}
best_crtcs[n] = NULL;
best_crtc = NULL;
best_score = drm_pick_crtcs(dev, best_crtcs, modes, n+1, width, height);
if (modes[n] == NULL)
return best_score;
crtcs = kmalloc(dev->mode_config.num_connector *
sizeof(struct drm_crtc *), GFP_KERNEL);
if (!crtcs)
return best_score;
my_score = 1;
if (connector->status == connector_status_connected)
my_score++;
if (drm_has_cmdline_mode(connector))
my_score++;
if (drm_has_preferred_mode(connector, width, height))
my_score++;
connector_funcs = connector->helper_private;
encoder = connector_funcs->best_encoder(connector);
if (!encoder)
goto out;
connector->encoder = encoder;
/* select a crtc for this connector and then attempt to configure
remaining connectors */
c = 0;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if ((encoder->possible_crtcs & (1 << c)) == 0) {
c++;
continue;
}
for (o = 0; o < n; o++)
if (best_crtcs[o] == crtc)
break;
if (o < n) {
/* ignore cloning for now */
c++;
continue;
}
crtcs[n] = crtc;
memcpy(crtcs, best_crtcs, n * sizeof(struct drm_crtc *));
score = my_score + drm_pick_crtcs(dev, crtcs, modes, n + 1,
width, height);
if (score > best_score) {
best_crtc = crtc;
best_score = score;
memcpy(best_crtcs, crtcs,
dev->mode_config.num_connector *
sizeof(struct drm_crtc *));
}
c++;
}
out:
kfree(crtcs);
return best_score;
}
static void drm_setup_crtcs(struct drm_device *dev)
{
struct drm_crtc **crtcs;
struct drm_display_mode **modes;
struct drm_encoder *encoder;
struct drm_connector *connector;
bool *enabled;
int width, height;
int i, ret;
DRM_DEBUG_KMS("\n");
width = dev->mode_config.max_width;
height = dev->mode_config.max_height;
/* clean out all the encoder/crtc combos */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
encoder->crtc = NULL;
}
crtcs = kcalloc(dev->mode_config.num_connector,
sizeof(struct drm_crtc *), GFP_KERNEL);
modes = kcalloc(dev->mode_config.num_connector,
sizeof(struct drm_display_mode *), GFP_KERNEL);
enabled = kcalloc(dev->mode_config.num_connector,
sizeof(bool), GFP_KERNEL);
drm_enable_connectors(dev, enabled);
ret = drm_target_preferred(dev, modes, enabled, width, height);
if (!ret)
DRM_ERROR("Unable to find initial modes\n");
DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
drm_pick_crtcs(dev, crtcs, modes, 0, width, height);
i = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct drm_display_mode *mode = modes[i];
struct drm_crtc *crtc = crtcs[i];
if (connector->encoder == NULL) {
i++;
continue;
}
if (mode && crtc) {
DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
mode->name, crtc->base.id);
crtc->desired_mode = mode;
connector->encoder->crtc = crtc;
} else {
connector->encoder->crtc = NULL;
connector->encoder = NULL;
}
i++;
}
kfree(crtcs);
kfree(modes);
kfree(enabled);
}
/**
* drm_encoder_crtc_ok - can a given crtc drive a given encoder?
* @encoder: encoder to test
@ -936,10 +625,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
ret = -EINVAL;
goto fail;
}
/* TODO are these needed? */
set->crtc->desired_x = set->x;
set->crtc->desired_y = set->y;
set->crtc->desired_mode = set->mode;
}
drm_helper_disable_unused_functions(dev);
} else if (fb_changed) {
@ -984,63 +669,6 @@ fail:
}
EXPORT_SYMBOL(drm_crtc_helper_set_config);
bool drm_helper_plugged_event(struct drm_device *dev)
{
DRM_DEBUG_KMS("\n");
drm_helper_probe_connector_modes(dev, dev->mode_config.max_width,
dev->mode_config.max_height);
drm_setup_crtcs(dev);
/* alert the driver fb layer */
dev->mode_config.funcs->fb_changed(dev);
/* FIXME: send hotplug event */
return true;
}
/**
* drm_initial_config - setup a sane initial connector configuration
* @dev: DRM device
*
* LOCKING:
* Called at init time, must take mode config lock.
*
* Scan the CRTCs and connectors and try to put together an initial setup.
* At the moment, this is a cloned configuration across all heads with
* a new framebuffer object as the backing store.
*
* RETURNS:
* Zero if everything went ok, nonzero otherwise.
*/
bool drm_helper_initial_config(struct drm_device *dev)
{
int count = 0;
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
drm_fb_helper_parse_command_line(dev);
count = drm_helper_probe_connector_modes(dev,
dev->mode_config.max_width,
dev->mode_config.max_height);
/*
* we shouldn't end up with no modes here.
*/
if (count == 0)
printk(KERN_INFO "No connectors reported connected with modes\n");
drm_setup_crtcs(dev);
/* alert the driver fb layer */
dev->mode_config.funcs->fb_changed(dev);
return 0;
}
EXPORT_SYMBOL(drm_helper_initial_config);
static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
{
int dpms = DRM_MODE_DPMS_OFF;
@ -1123,27 +751,6 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
}
EXPORT_SYMBOL(drm_helper_connector_dpms);
/**
* drm_hotplug_stage_two
* @dev DRM device
* @connector hotpluged connector
*
* LOCKING.
* Caller must hold mode config lock, function might grab struct lock.
*
* Stage two of a hotplug.
*
* RETURNS:
* Zero on success, errno on failure.
*/
int drm_helper_hotplug_stage_two(struct drm_device *dev)
{
drm_helper_plugged_event(dev);
return 0;
}
EXPORT_SYMBOL(drm_helper_hotplug_stage_two);
int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
struct drm_mode_fb_cmd *mode_cmd)
{
@ -1200,3 +807,98 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
return 0;
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
static struct slow_work_ops output_poll_ops;
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
static void output_poll_execute(struct slow_work *work)
{
struct delayed_slow_work *delayed_work = container_of(work, struct delayed_slow_work, work);
struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_slow_work);
struct drm_connector *connector;
enum drm_connector_status old_status, status;
bool repoll = false, changed = false;
int ret;
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
/* if this is HPD or polled don't check it -
TV out for instance */
if (!connector->polled)
continue;
else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT))
repoll = true;
old_status = connector->status;
/* if we are connected and don't want to poll for disconnect
skip it */
if (old_status == connector_status_connected &&
!(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) &&
!(connector->polled & DRM_CONNECTOR_POLL_HPD))
continue;
status = connector->funcs->detect(connector);
if (old_status != status)
changed = true;
}
mutex_unlock(&dev->mode_config.mutex);
if (changed) {
/* send a uevent + call fbdev */
drm_sysfs_hotplug_event(dev);
if (dev->mode_config.funcs->output_poll_changed)
dev->mode_config.funcs->output_poll_changed(dev);
}
if (repoll) {
ret = delayed_slow_work_enqueue(delayed_work, DRM_OUTPUT_POLL_PERIOD);
if (ret)
DRM_ERROR("delayed enqueue failed %d\n", ret);
}
}
void drm_kms_helper_poll_init(struct drm_device *dev)
{
struct drm_connector *connector;
bool poll = false;
int ret;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->polled)
poll = true;
}
slow_work_register_user(THIS_MODULE);
delayed_slow_work_init(&dev->mode_config.output_poll_slow_work,
&output_poll_ops);
if (poll) {
ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD);
if (ret)
DRM_ERROR("delayed enqueue failed %d\n", ret);
}
}
EXPORT_SYMBOL(drm_kms_helper_poll_init);
void drm_kms_helper_poll_fini(struct drm_device *dev)
{
delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
slow_work_unregister_user(THIS_MODULE);
}
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
void drm_helper_hpd_irq_event(struct drm_device *dev)
{
if (!dev->mode_config.poll_enabled)
return;
delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
/* schedule a slow work asap */
delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, 0);
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
static struct slow_work_ops output_poll_ops = {
.execute = output_poll_execute,
};

View File

@ -47,12 +47,10 @@ int drm_dma_setup(struct drm_device *dev)
{
int i;
dev->dma = kmalloc(sizeof(*dev->dma), GFP_KERNEL);
dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL);
if (!dev->dma)
return -ENOMEM;
memset(dev->dma, 0, sizeof(*dev->dma));
for (i = 0; i <= DRM_MAX_ORDER; i++)
memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -243,11 +243,10 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
priv = kmalloc(sizeof(*priv), GFP_KERNEL);
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
memset(priv, 0, sizeof(*priv));
filp->private_data = priv;
priv->filp = filp;
priv->uid = current_euid();

View File

@ -123,6 +123,31 @@ drm_gem_destroy(struct drm_device *dev)
dev->mm_private = NULL;
}
/**
* Initialize an already allocate GEM object of the specified size with
* shmfs backing store.
*/
int drm_gem_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size)
{
BUG_ON((size & (PAGE_SIZE - 1)) != 0);
obj->dev = dev;
obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
if (IS_ERR(obj->filp))
return -ENOMEM;
kref_init(&obj->refcount);
kref_init(&obj->handlecount);
obj->size = size;
atomic_inc(&dev->object_count);
atomic_add(obj->size, &dev->object_memory);
return 0;
}
EXPORT_SYMBOL(drm_gem_object_init);
/**
* Allocate a GEM object of the specified size with shmfs backing store
*/
@ -131,28 +156,22 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
{
struct drm_gem_object *obj;
BUG_ON((size & (PAGE_SIZE - 1)) != 0);
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
goto free;
obj->dev = dev;
obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
if (IS_ERR(obj->filp))
if (drm_gem_object_init(dev, obj, size) != 0)
goto free;
kref_init(&obj->refcount);
kref_init(&obj->handlecount);
obj->size = size;
if (dev->driver->gem_init_object != NULL &&
dev->driver->gem_init_object(obj) != 0) {
goto fput;
}
atomic_inc(&dev->object_count);
atomic_add(obj->size, &dev->object_memory);
return obj;
fput:
/* Object_init mangles the global counters - readjust them. */
atomic_dec(&dev->object_count);
atomic_sub(obj->size, &dev->object_memory);
fput(obj->filp);
free:
kfree(obj);
@ -403,15 +422,15 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
idr_destroy(&file_private->object_idr);
}
static void
drm_gem_object_free_common(struct drm_gem_object *obj)
void
drm_gem_object_release(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
fput(obj->filp);
atomic_dec(&dev->object_count);
atomic_sub(obj->size, &dev->object_memory);
kfree(obj);
}
EXPORT_SYMBOL(drm_gem_object_release);
/**
* Called after the last reference to the object has been lost.
@ -429,8 +448,6 @@ drm_gem_object_free(struct kref *kref)
if (dev->driver->gem_free_object != NULL)
dev->driver->gem_free_object(obj);
drm_gem_object_free_common(obj);
}
EXPORT_SYMBOL(drm_gem_object_free);
@ -453,8 +470,6 @@ drm_gem_object_free_unlocked(struct kref *kref)
dev->driver->gem_free_object(obj);
mutex_unlock(&dev->struct_mutex);
}
drm_gem_object_free_common(obj);
}
EXPORT_SYMBOL(drm_gem_object_free_unlocked);

View File

@ -258,8 +258,10 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
/* 18/16. Find actual vertical frame frequency */
/* ignore - just set the mode flag for interlaced */
if (interlaced)
if (interlaced) {
drm_mode->vtotal *= 2;
drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
}
/* Fill the mode line name */
drm_mode_set_name(drm_mode);
if (reduced)
@ -268,43 +270,35 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
else
drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_NHSYNC);
if (interlaced)
drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
return drm_mode;
return drm_mode;
}
EXPORT_SYMBOL(drm_cvt_mode);
/**
* drm_gtf_mode - create the modeline based on GTF algorithm
* drm_gtf_mode_complex - create the modeline based on full GTF algorithm
*
* @dev :drm device
* @hdisplay :hdisplay size
* @vdisplay :vdisplay size
* @vrefresh :vrefresh rate.
* @interlaced :whether the interlace is supported
* @margins :whether the margin is supported
* @margins :desired margin size
* @GTF_[MCKJ] :extended GTF formula parameters
*
* LOCKING.
* none.
*
* return the modeline based on GTF algorithm
* return the modeline based on full GTF algorithm.
*
* This function is to create the modeline based on the GTF algorithm.
* Generalized Timing Formula is derived from:
* GTF Spreadsheet by Andy Morrish (1/5/97)
* available at http://www.vesa.org
*
* And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
* What I have done is to translate it by using integer calculation.
* I also refer to the function of fb_get_mode in the file of
* drivers/video/fbmon.c
* GTF feature blocks specify C and J in multiples of 0.5, so we pass them
* in here multiplied by two. For a C of 40, pass in 80.
*/
struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
int vdisplay, int vrefresh,
bool interlaced, int margins)
{
/* 1) top/bottom margin size (% of height) - default: 1.8, */
struct drm_display_mode *
drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
int vrefresh, bool interlaced, int margins,
int GTF_M, int GTF_2C, int GTF_K, int GTF_2J)
{ /* 1) top/bottom margin size (% of height) - default: 1.8, */
#define GTF_MARGIN_PERCENTAGE 18
/* 2) character cell horizontal granularity (pixels) - default 8 */
#define GTF_CELL_GRAN 8
@ -316,17 +310,9 @@ struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
#define H_SYNC_PERCENT 8
/* min time of vsync + back porch (microsec) */
#define MIN_VSYNC_PLUS_BP 550
/* blanking formula gradient */
#define GTF_M 600
/* blanking formula offset */
#define GTF_C 40
/* blanking formula scaling factor */
#define GTF_K 128
/* blanking formula scaling factor */
#define GTF_J 20
/* C' and M' are part of the Blanking Duty Cycle computation */
#define GTF_C_PRIME (((GTF_C - GTF_J) * GTF_K / 256) + GTF_J)
#define GTF_M_PRIME (GTF_K * GTF_M / 256)
#define GTF_C_PRIME ((((GTF_2C - GTF_2J) * GTF_K / 256) + GTF_2J) / 2)
#define GTF_M_PRIME (GTF_K * GTF_M / 256)
struct drm_display_mode *drm_mode;
unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd;
int top_margin, bottom_margin;
@ -460,17 +446,61 @@ struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay,
drm_mode->clock = pixel_freq;
drm_mode_set_name(drm_mode);
drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
if (interlaced) {
drm_mode->vtotal *= 2;
drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
}
drm_mode_set_name(drm_mode);
if (GTF_M == 600 && GTF_2C == 80 && GTF_K == 128 && GTF_2J == 40)
drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
else
drm_mode->flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC;
return drm_mode;
}
EXPORT_SYMBOL(drm_gtf_mode_complex);
/**
* drm_gtf_mode - create the modeline based on GTF algorithm
*
* @dev :drm device
* @hdisplay :hdisplay size
* @vdisplay :vdisplay size
* @vrefresh :vrefresh rate.
* @interlaced :whether the interlace is supported
* @margins :whether the margin is supported
*
* LOCKING.
* none.
*
* return the modeline based on GTF algorithm
*
* This function is to create the modeline based on the GTF algorithm.
* Generalized Timing Formula is derived from:
* GTF Spreadsheet by Andy Morrish (1/5/97)
* available at http://www.vesa.org
*
* And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
* What I have done is to translate it by using integer calculation.
* I also refer to the function of fb_get_mode in the file of
* drivers/video/fbmon.c
*
* Standard GTF parameters:
* M = 600
* C = 40
* K = 128
* J = 20
*/
struct drm_display_mode *
drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
bool lace, int margins)
{
return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace,
margins, 600, 40 * 2, 128, 20 * 2);
}
EXPORT_SYMBOL(drm_gtf_mode);
/**
* drm_mode_set_name - set the name on a mode
* @mode: name will be set in this mode
@ -482,8 +512,11 @@ EXPORT_SYMBOL(drm_gtf_mode);
*/
void drm_mode_set_name(struct drm_display_mode *mode)
{
snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay,
mode->vdisplay);
bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
mode->hdisplay, mode->vdisplay,
interlaced ? "i" : "");
}
EXPORT_SYMBOL(drm_mode_set_name);

View File

@ -334,7 +334,7 @@ static struct device_attribute connector_attrs_opt1[] = {
static struct bin_attribute edid_attr = {
.attr.name = "edid",
.attr.mode = 0444,
.size = 128,
.size = 0,
.read = edid_show,
};

View File

@ -33,3 +33,5 @@ i915-$(CONFIG_ACPI) += i915_opregion.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
obj-$(CONFIG_DRM_I915) += i915.o
CFLAGS_i915_trace_points.o := -I$(src)

View File

@ -69,16 +69,6 @@ struct intel_dvo_dev_ops {
*/
void (*dpms)(struct intel_dvo_device *dvo, int mode);
/*
* Saves the output's state for restoration on VT switch.
*/
void (*save)(struct intel_dvo_device *dvo);
/*
* Restore's the output's state at VT switch.
*/
void (*restore)(struct intel_dvo_device *dvo);
/*
* Callback for testing a video mode for a given output.
*

View File

@ -159,16 +159,7 @@
#define CH7017_BANG_LIMIT_CONTROL 0x7f
struct ch7017_priv {
uint8_t save_hapi;
uint8_t save_vali;
uint8_t save_valo;
uint8_t save_ailo;
uint8_t save_lvds_pll_vco;
uint8_t save_feedback_div;
uint8_t save_lvds_control_2;
uint8_t save_outputs_enable;
uint8_t save_lvds_power_down;
uint8_t save_power_management;
uint8_t dummy;
};
static void ch7017_dump_regs(struct intel_dvo_device *dvo);
@ -401,39 +392,6 @@ do { \
DUMP(CH7017_LVDS_POWER_DOWN);
}
static void ch7017_save(struct intel_dvo_device *dvo)
{
struct ch7017_priv *priv = dvo->dev_priv;
ch7017_read(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, &priv->save_hapi);
ch7017_read(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, &priv->save_valo);
ch7017_read(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, &priv->save_ailo);
ch7017_read(dvo, CH7017_LVDS_PLL_VCO_CONTROL, &priv->save_lvds_pll_vco);
ch7017_read(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, &priv->save_feedback_div);
ch7017_read(dvo, CH7017_LVDS_CONTROL_2, &priv->save_lvds_control_2);
ch7017_read(dvo, CH7017_OUTPUTS_ENABLE, &priv->save_outputs_enable);
ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &priv->save_lvds_power_down);
ch7017_read(dvo, CH7017_POWER_MANAGEMENT, &priv->save_power_management);
}
static void ch7017_restore(struct intel_dvo_device *dvo)
{
struct ch7017_priv *priv = dvo->dev_priv;
/* Power down before changing mode */
ch7017_dpms(dvo, DRM_MODE_DPMS_OFF);
ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, priv->save_hapi);
ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, priv->save_valo);
ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, priv->save_ailo);
ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, priv->save_lvds_pll_vco);
ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, priv->save_feedback_div);
ch7017_write(dvo, CH7017_LVDS_CONTROL_2, priv->save_lvds_control_2);
ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, priv->save_outputs_enable);
ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, priv->save_lvds_power_down);
ch7017_write(dvo, CH7017_POWER_MANAGEMENT, priv->save_power_management);
}
static void ch7017_destroy(struct intel_dvo_device *dvo)
{
struct ch7017_priv *priv = dvo->dev_priv;
@ -451,7 +409,5 @@ struct intel_dvo_dev_ops ch7017_ops = {
.mode_set = ch7017_mode_set,
.dpms = ch7017_dpms,
.dump_regs = ch7017_dump_regs,
.save = ch7017_save,
.restore = ch7017_restore,
.destroy = ch7017_destroy,
};

View File

@ -92,21 +92,10 @@ static struct ch7xxx_id_struct {
{ CH7301_VID, "CH7301" },
};
struct ch7xxx_reg_state {
uint8_t regs[CH7xxx_NUM_REGS];
};
struct ch7xxx_priv {
bool quiet;
struct ch7xxx_reg_state save_reg;
struct ch7xxx_reg_state mode_reg;
uint8_t save_TCTL, save_TPCP, save_TPD, save_TPVT;
uint8_t save_TLPF, save_TCT, save_PM, save_IDF;
};
static void ch7xxx_save(struct intel_dvo_device *dvo);
static char *ch7xxx_get_id(uint8_t vid)
{
int i;
@ -312,42 +301,17 @@ static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode)
static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
int i;
for (i = 0; i < CH7xxx_NUM_REGS; i++) {
uint8_t val;
if ((i % 8) == 0 )
DRM_LOG_KMS("\n %02X: ", i);
DRM_LOG_KMS("%02X ", ch7xxx->mode_reg.regs[i]);
ch7xxx_readb(dvo, i, &val);
DRM_LOG_KMS("%02X ", val);
}
}
static void ch7xxx_save(struct intel_dvo_device *dvo)
{
struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
ch7xxx_readb(dvo, CH7xxx_TCTL, &ch7xxx->save_TCTL);
ch7xxx_readb(dvo, CH7xxx_TPCP, &ch7xxx->save_TPCP);
ch7xxx_readb(dvo, CH7xxx_TPD, &ch7xxx->save_TPD);
ch7xxx_readb(dvo, CH7xxx_TPVT, &ch7xxx->save_TPVT);
ch7xxx_readb(dvo, CH7xxx_TLPF, &ch7xxx->save_TLPF);
ch7xxx_readb(dvo, CH7xxx_PM, &ch7xxx->save_PM);
ch7xxx_readb(dvo, CH7xxx_IDF, &ch7xxx->save_IDF);
}
static void ch7xxx_restore(struct intel_dvo_device *dvo)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
ch7xxx_writeb(dvo, CH7xxx_TCTL, ch7xxx->save_TCTL);
ch7xxx_writeb(dvo, CH7xxx_TPCP, ch7xxx->save_TPCP);
ch7xxx_writeb(dvo, CH7xxx_TPD, ch7xxx->save_TPD);
ch7xxx_writeb(dvo, CH7xxx_TPVT, ch7xxx->save_TPVT);
ch7xxx_writeb(dvo, CH7xxx_TLPF, ch7xxx->save_TLPF);
ch7xxx_writeb(dvo, CH7xxx_IDF, ch7xxx->save_IDF);
ch7xxx_writeb(dvo, CH7xxx_PM, ch7xxx->save_PM);
}
static void ch7xxx_destroy(struct intel_dvo_device *dvo)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
@ -365,7 +329,5 @@ struct intel_dvo_dev_ops ch7xxx_ops = {
.mode_set = ch7xxx_mode_set,
.dpms = ch7xxx_dpms,
.dump_regs = ch7xxx_dump_regs,
.save = ch7xxx_save,
.restore = ch7xxx_restore,
.destroy = ch7xxx_destroy,
};

View File

@ -153,9 +153,6 @@ struct ivch_priv {
bool quiet;
uint16_t width, height;
uint16_t save_VR01;
uint16_t save_VR40;
};
@ -405,22 +402,6 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo)
DRM_LOG_KMS("VR8F: 0x%04x\n", val);
}
static void ivch_save(struct intel_dvo_device *dvo)
{
struct ivch_priv *priv = dvo->dev_priv;
ivch_read(dvo, VR01, &priv->save_VR01);
ivch_read(dvo, VR40, &priv->save_VR40);
}
static void ivch_restore(struct intel_dvo_device *dvo)
{
struct ivch_priv *priv = dvo->dev_priv;
ivch_write(dvo, VR01, priv->save_VR01);
ivch_write(dvo, VR40, priv->save_VR40);
}
static void ivch_destroy(struct intel_dvo_device *dvo)
{
struct ivch_priv *priv = dvo->dev_priv;
@ -434,8 +415,6 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
struct intel_dvo_dev_ops ivch_ops= {
.init = ivch_init,
.dpms = ivch_dpms,
.save = ivch_save,
.restore = ivch_restore,
.mode_valid = ivch_mode_valid,
.mode_set = ivch_mode_set,
.detect = ivch_detect,

View File

@ -58,17 +58,9 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#define SIL164_REGC 0x0c
struct sil164_save_rec {
uint8_t reg8;
uint8_t reg9;
uint8_t regc;
};
struct sil164_priv {
//I2CDevRec d;
bool quiet;
struct sil164_save_rec save_regs;
struct sil164_save_rec mode_regs;
};
#define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr))
@ -252,34 +244,6 @@ static void sil164_dump_regs(struct intel_dvo_device *dvo)
DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val);
}
static void sil164_save(struct intel_dvo_device *dvo)
{
struct sil164_priv *sil= dvo->dev_priv;
if (!sil164_readb(dvo, SIL164_REG8, &sil->save_regs.reg8))
return;
if (!sil164_readb(dvo, SIL164_REG9, &sil->save_regs.reg9))
return;
if (!sil164_readb(dvo, SIL164_REGC, &sil->save_regs.regc))
return;
return;
}
static void sil164_restore(struct intel_dvo_device *dvo)
{
struct sil164_priv *sil = dvo->dev_priv;
/* Restore it powered down initially */
sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8 & ~0x1);
sil164_writeb(dvo, SIL164_REG9, sil->save_regs.reg9);
sil164_writeb(dvo, SIL164_REGC, sil->save_regs.regc);
sil164_writeb(dvo, SIL164_REG8, sil->save_regs.reg8);
}
static void sil164_destroy(struct intel_dvo_device *dvo)
{
struct sil164_priv *sil = dvo->dev_priv;
@ -297,7 +261,5 @@ struct intel_dvo_dev_ops sil164_ops = {
.mode_set = sil164_mode_set,
.dpms = sil164_dpms,
.dump_regs = sil164_dump_regs,
.save = sil164_save,
.restore = sil164_restore,
.destroy = sil164_destroy,
};

View File

@ -86,16 +86,8 @@
#define TFP410_V_RES_LO 0x3C
#define TFP410_V_RES_HI 0x3D
struct tfp410_save_rec {
uint8_t ctl1;
uint8_t ctl2;
};
struct tfp410_priv {
bool quiet;
struct tfp410_save_rec saved_reg;
struct tfp410_save_rec mode_reg;
};
static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
@ -293,28 +285,6 @@ static void tfp410_dump_regs(struct intel_dvo_device *dvo)
DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
}
static void tfp410_save(struct intel_dvo_device *dvo)
{
struct tfp410_priv *tfp = dvo->dev_priv;
if (!tfp410_readb(dvo, TFP410_CTL_1, &tfp->saved_reg.ctl1))
return;
if (!tfp410_readb(dvo, TFP410_CTL_2, &tfp->saved_reg.ctl2))
return;
}
static void tfp410_restore(struct intel_dvo_device *dvo)
{
struct tfp410_priv *tfp = dvo->dev_priv;
/* Restore it powered down initially */
tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1 & ~0x1);
tfp410_writeb(dvo, TFP410_CTL_2, tfp->saved_reg.ctl2);
tfp410_writeb(dvo, TFP410_CTL_1, tfp->saved_reg.ctl1);
}
static void tfp410_destroy(struct intel_dvo_device *dvo)
{
struct tfp410_priv *tfp = dvo->dev_priv;
@ -332,7 +302,5 @@ struct intel_dvo_dev_ops tfp410_ops = {
.mode_set = tfp410_mode_set,
.dpms = tfp410_dpms,
.dump_regs = tfp410_dump_regs,
.save = tfp410_save,
.restore = tfp410_restore,
.destroy = tfp410_destroy,
};

View File

@ -96,19 +96,18 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
spin_lock(lock);
list_for_each_entry(obj_priv, head, list)
{
struct drm_gem_object *obj = obj_priv->obj;
seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s",
obj,
&obj_priv->base,
get_pin_flag(obj_priv),
obj->size,
obj->read_domains, obj->write_domain,
obj_priv->base.size,
obj_priv->base.read_domains,
obj_priv->base.write_domain,
obj_priv->last_rendering_seqno,
obj_priv->dirty ? " dirty" : "",
obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->name)
seq_printf(m, " (name: %d)", obj->name);
if (obj_priv->base.name)
seq_printf(m, " (name: %d)", obj_priv->base.name);
if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
if (obj_priv->gtt_space != NULL)
@ -289,7 +288,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
spin_lock(&dev_priv->mm.active_list_lock);
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
obj = obj_priv->obj;
obj = &obj_priv->base;
if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
ret = i915_gem_object_get_pages(obj, 0);
if (ret) {
@ -567,23 +566,14 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_crtc *crtc;
drm_i915_private_t *dev_priv = dev->dev_private;
bool fbc_enabled = false;
if (!dev_priv->display.fbc_enabled) {
if (!I915_HAS_FBC(dev)) {
seq_printf(m, "FBC unsupported on this chipset\n");
return 0;
}
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->enabled)
continue;
if (dev_priv->display.fbc_enabled(crtc))
fbc_enabled = true;
}
if (fbc_enabled) {
if (intel_fbc_enabled(dev)) {
seq_printf(m, "FBC enabled\n");
} else {
seq_printf(m, "FBC disabled: ");

View File

@ -1357,13 +1357,12 @@ static void i915_setup_compression(struct drm_device *dev, int size)
dev_priv->cfb_size = size;
intel_disable_fbc(dev);
dev_priv->compressed_fb = compressed_fb;
if (IS_GM45(dev)) {
g4x_disable_fbc(dev);
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
} else {
i8xx_disable_fbc(dev);
I915_WRITE(FBC_CFB_BASE, cfb_base);
I915_WRITE(FBC_LL_BASE, ll_base);
dev_priv->compressed_llb = compressed_llb;
@ -1504,8 +1503,8 @@ static int i915_load_modeset_init(struct drm_device *dev,
I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
drm_helper_initial_config(dev);
intel_fbdev_init(dev);
drm_kms_helper_poll_init(dev);
return 0;
destroy_ringbuffer:
@ -1591,7 +1590,7 @@ static void i915_get_mem_freq(struct drm_device *dev)
*/
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv;
resource_size_t base, size;
int ret = 0, mmio_bar;
uint32_t agp_size, prealloc_size, prealloc_start;
@ -1723,6 +1722,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* Start out suspended */
dev_priv->mm.suspended = 1;
intel_detect_pch(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = i915_load_modeset_init(dev, prealloc_start,
prealloc_size, agp_size);
@ -1769,6 +1770,8 @@ int i915_driver_unload(struct drm_device *dev)
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_modeset_cleanup(dev);
/*
* free the memory space allocated for the child device
* config parsed from VBT
@ -1792,8 +1795,6 @@ int i915_driver_unload(struct drm_device *dev)
intel_opregion_free(dev, 0);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_modeset_cleanup(dev);
i915_gem_free_all_phys_object(dev);
mutex_lock(&dev->struct_mutex);

View File

@ -188,6 +188,35 @@ const static struct pci_device_id pciidlist[] = {
MODULE_DEVICE_TABLE(pci, pciidlist);
#endif
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
void intel_detect_pch (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct pci_dev *pch;
/*
* The reason to probe ISA bridge instead of Dev31:Fun0 is to
* make graphics device passthrough work easy for VMM, that only
* need to expose ISA bridge to let driver know the real hardware
* underneath. This is a requirement from virtualization team.
*/
pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
if (pch) {
if (pch->vendor == PCI_VENDOR_ID_INTEL) {
int id;
id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CPT;
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
}
}
pci_dev_put(pch);
}
}
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;

View File

@ -128,6 +128,7 @@ struct drm_i915_master_private {
struct drm_i915_fence_reg {
struct drm_gem_object *obj;
struct list_head lru_list;
};
struct sdvo_device_mapping {
@ -135,6 +136,7 @@ struct sdvo_device_mapping {
u8 slave_addr;
u8 dvo_wiring;
u8 initialized;
u8 ddc_pin;
};
struct drm_i915_error_state {
@ -175,7 +177,7 @@ struct drm_i915_error_state {
struct drm_i915_display_funcs {
void (*dpms)(struct drm_crtc *crtc, int mode);
bool (*fbc_enabled)(struct drm_crtc *crtc);
bool (*fbc_enabled)(struct drm_device *dev);
void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
void (*disable_fbc)(struct drm_device *dev);
int (*get_display_clock_speed)(struct drm_device *dev);
@ -222,6 +224,13 @@ enum no_fbc_reason {
FBC_NOT_TILED, /* buffer not tiled */
};
enum intel_pch {
PCH_IBX, /* Ibexpeak PCH */
PCH_CPT, /* Cougarpoint PCH */
};
struct intel_fbdev;
typedef struct drm_i915_private {
struct drm_device *dev;
@ -335,6 +344,9 @@ typedef struct drm_i915_private {
/* Display functions */
struct drm_i915_display_funcs display;
/* PCH chipset type */
enum intel_pch pch_type;
/* Register state */
bool modeset_on_lid;
u8 saveLBB;
@ -637,11 +649,14 @@ typedef struct drm_i915_private {
struct drm_mm_node *compressed_fb;
struct drm_mm_node *compressed_llb;
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
} drm_i915_private_t;
/** driver private structure attached to each drm_gem_object */
struct drm_i915_gem_object {
struct drm_gem_object *obj;
struct drm_gem_object base;
/** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space;
@ -651,9 +666,6 @@ struct drm_i915_gem_object {
/** This object's place on GPU write list */
struct list_head gpu_write_list;
/** This object's place on the fenced object LRU */
struct list_head fence_list;
/**
* This is set if the object is on the active or flushing lists
* (has pending rendering), and is not set if it's on inactive (ready
@ -740,7 +752,7 @@ struct drm_i915_gem_object {
atomic_t pending_flip;
};
#define to_intel_bo(x) ((struct drm_i915_gem_object *) (x)->driver_private)
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
/**
* Request queue structure.
@ -902,6 +914,8 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj);
struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
void i915_gem_object_unpin(struct drm_gem_object *obj);
@ -998,6 +1012,12 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void i8xx_disable_fbc(struct drm_device *dev);
extern void g4x_disable_fbc(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_detect_pch (struct drm_device *dev);
extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
/**
* Lock test for when it's just for synchronization of ring access.
@ -1130,7 +1150,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \
!IS_IRONLAKE(dev) && !IS_PINEVIEW(dev))
!IS_IRONLAKE(dev) && !IS_PINEVIEW(dev) && \
!IS_GEN6(dev))
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
/* dsparb controlled by hw only */
#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
@ -1144,6 +1165,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
IS_GEN6(dev))
#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
#endif

View File

@ -124,7 +124,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
args->size = roundup(args->size, PAGE_SIZE);
/* Allocate the new object */
obj = drm_gem_object_alloc(dev, args->size);
obj = i915_gem_alloc_object(dev, args->size);
if (obj == NULL)
return -ENOMEM;
@ -1051,7 +1051,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
* about to occur.
*/
if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
list_move_tail(&obj_priv->fence_list,
struct drm_i915_fence_reg *reg =
&dev_priv->fence_regs[obj_priv->fence_reg];
list_move_tail(&reg->lru_list,
&dev_priv->mm.fence_list);
}
@ -1566,7 +1568,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
list_for_each_entry_safe(obj_priv, next,
&dev_priv->mm.gpu_write_list,
gpu_write_list) {
struct drm_gem_object *obj = obj_priv->obj;
struct drm_gem_object *obj = &obj_priv->base;
if ((obj->write_domain & flush_domains) ==
obj->write_domain) {
@ -1577,9 +1579,12 @@ i915_gem_process_flushing_list(struct drm_device *dev,
i915_gem_object_move_to_active(obj, seqno);
/* update the fence lru list */
if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
list_move_tail(&obj_priv->fence_list,
if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_fence_reg *reg =
&dev_priv->fence_regs[obj_priv->fence_reg];
list_move_tail(&reg->lru_list,
&dev_priv->mm.fence_list);
}
trace_i915_gem_object_change_domain(obj,
obj->read_domains,
@ -1745,7 +1750,7 @@ i915_gem_retire_request(struct drm_device *dev,
obj_priv = list_first_entry(&dev_priv->mm.active_list,
struct drm_i915_gem_object,
list);
obj = obj_priv->obj;
obj = &obj_priv->base;
/* If the seqno being retired doesn't match the oldest in the
* list, then the oldest in the list must still be newer than
@ -2119,7 +2124,7 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
/* Try to find the smallest clean object */
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
struct drm_gem_object *obj = obj_priv->obj;
struct drm_gem_object *obj = &obj_priv->base;
if (obj->size >= min_size) {
if ((!obj_priv->dirty ||
i915_gem_object_is_purgeable(obj_priv)) &&
@ -2253,7 +2258,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
/* Find an object that we can immediately reuse */
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
obj = obj_priv->obj;
obj = &obj_priv->base;
if (obj->size >= min_size)
break;
@ -2485,9 +2490,10 @@ static int i915_find_fence_reg(struct drm_device *dev)
/* None available, try to steal one or wait for a user to finish */
i = I915_FENCE_REG_NONE;
list_for_each_entry(obj_priv, &dev_priv->mm.fence_list,
fence_list) {
obj = obj_priv->obj;
list_for_each_entry(reg, &dev_priv->mm.fence_list,
lru_list) {
obj = reg->obj;
obj_priv = to_intel_bo(obj);
if (obj_priv->pin_count)
continue;
@ -2536,7 +2542,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
/* Just update our place in the LRU if our fence is getting used. */
if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
reg = &dev_priv->fence_regs[obj_priv->fence_reg];
list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
return 0;
}
@ -2566,7 +2573,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
obj_priv->fence_reg = ret;
reg = &dev_priv->fence_regs[obj_priv->fence_reg];
list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
reg->obj = obj;
@ -2598,6 +2605,8 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_i915_fence_reg *reg =
&dev_priv->fence_regs[obj_priv->fence_reg];
if (IS_GEN6(dev)) {
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
@ -2616,9 +2625,9 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
I915_WRITE(fence_reg, 0);
}
dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
reg->obj = NULL;
obj_priv->fence_reg = I915_FENCE_REG_NONE;
list_del_init(&obj_priv->fence_list);
list_del_init(&reg->lru_list);
}
/**
@ -4471,34 +4480,38 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
return 0;
}
struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
size_t size)
{
struct drm_i915_gem_object *obj;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (obj == NULL)
return NULL;
if (drm_gem_object_init(dev, &obj->base, size) != 0) {
kfree(obj);
return NULL;
}
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->agp_type = AGP_USER_MEMORY;
obj->base.driver_private = NULL;
obj->fence_reg = I915_FENCE_REG_NONE;
INIT_LIST_HEAD(&obj->list);
INIT_LIST_HEAD(&obj->gpu_write_list);
obj->madv = I915_MADV_WILLNEED;
trace_i915_gem_object_create(&obj->base);
return &obj->base;
}
int i915_gem_init_object(struct drm_gem_object *obj)
{
struct drm_i915_gem_object *obj_priv;
obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL);
if (obj_priv == NULL)
return -ENOMEM;
/*
* We've just allocated pages from the kernel,
* so they've just been written by the CPU with
* zeros. They'll need to be clflushed before we
* use them with the GPU.
*/
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj_priv->agp_type = AGP_USER_MEMORY;
obj->driver_private = obj_priv;
obj_priv->obj = obj;
obj_priv->fence_reg = I915_FENCE_REG_NONE;
INIT_LIST_HEAD(&obj_priv->list);
INIT_LIST_HEAD(&obj_priv->gpu_write_list);
INIT_LIST_HEAD(&obj_priv->fence_list);
obj_priv->madv = I915_MADV_WILLNEED;
trace_i915_gem_object_create(obj);
BUG();
return 0;
}
@ -4521,9 +4534,11 @@ void i915_gem_free_object(struct drm_gem_object *obj)
if (obj_priv->mmap_offset)
i915_gem_free_mmap_offset(obj);
drm_gem_object_release(obj);
kfree(obj_priv->page_cpu_valid);
kfree(obj_priv->bit_17);
kfree(obj->driver_private);
kfree(obj_priv);
}
/** Unbinds all inactive objects. */
@ -4536,9 +4551,9 @@ i915_gem_evict_from_inactive_list(struct drm_device *dev)
struct drm_gem_object *obj;
int ret;
obj = list_first_entry(&dev_priv->mm.inactive_list,
struct drm_i915_gem_object,
list)->obj;
obj = &list_first_entry(&dev_priv->mm.inactive_list,
struct drm_i915_gem_object,
list)->base;
ret = i915_gem_object_unbind(obj);
if (ret != 0) {
@ -4608,7 +4623,7 @@ i915_gem_init_pipe_control(struct drm_device *dev)
struct drm_i915_gem_object *obj_priv;
int ret;
obj = drm_gem_object_alloc(dev, 4096);
obj = i915_gem_alloc_object(dev, 4096);
if (obj == NULL) {
DRM_ERROR("Failed to allocate seqno page\n");
ret = -ENOMEM;
@ -4653,7 +4668,7 @@ i915_gem_init_hws(struct drm_device *dev)
if (!I915_NEED_GFX_HWS(dev))
return 0;
obj = drm_gem_object_alloc(dev, 4096);
obj = i915_gem_alloc_object(dev, 4096);
if (obj == NULL) {
DRM_ERROR("Failed to allocate status page\n");
ret = -ENOMEM;
@ -4764,7 +4779,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
if (ret != 0)
return ret;
obj = drm_gem_object_alloc(dev, 128 * 1024);
obj = i915_gem_alloc_object(dev, 128 * 1024);
if (obj == NULL) {
DRM_ERROR("Failed to allocate ringbuffer\n");
i915_gem_cleanup_hws(dev);
@ -4957,6 +4972,8 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
INIT_LIST_HEAD(&dev_priv->mm.request_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
for (i = 0; i < 16; i++)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
dev_priv->mm.next_gem_seqno = 1;
@ -5184,6 +5201,20 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
mutex_unlock(&dev->struct_mutex);
}
static int
i915_gpu_is_active(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int lists_empty;
spin_lock(&dev_priv->mm.active_list_lock);
lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
list_empty(&dev_priv->mm.active_list);
spin_unlock(&dev_priv->mm.active_list_lock);
return !lists_empty;
}
static int
i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
{
@ -5213,6 +5244,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
spin_lock(&shrink_list_lock);
rescan:
/* first scan for clean buffers */
list_for_each_entry_safe(dev_priv, next_dev,
&shrink_list, mm.shrink_list) {
@ -5229,7 +5261,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
&dev_priv->mm.inactive_list,
list) {
if (i915_gem_object_is_purgeable(obj_priv)) {
i915_gem_object_unbind(obj_priv->obj);
i915_gem_object_unbind(&obj_priv->base);
if (--nr_to_scan <= 0)
break;
}
@ -5258,7 +5290,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
&dev_priv->mm.inactive_list,
list) {
if (nr_to_scan > 0) {
i915_gem_object_unbind(obj_priv->obj);
i915_gem_object_unbind(&obj_priv->base);
nr_to_scan--;
} else
cnt++;
@ -5270,6 +5302,36 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
would_deadlock = 0;
}
if (nr_to_scan) {
int active = 0;
/*
* We are desperate for pages, so as a last resort, wait
* for the GPU to finish and discard whatever we can.
* This has a dramatic impact to reduce the number of
* OOM-killer events whilst running the GPU aggressively.
*/
list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
struct drm_device *dev = dev_priv->dev;
if (!mutex_trylock(&dev->struct_mutex))
continue;
spin_unlock(&shrink_list_lock);
if (i915_gpu_is_active(dev)) {
i915_gpu_idle(dev);
active++;
}
spin_lock(&shrink_list_lock);
mutex_unlock(&dev->struct_mutex);
}
if (active)
goto rescan;
}
spin_unlock(&shrink_list_lock);
if (would_deadlock)

View File

@ -39,7 +39,7 @@ i915_verify_inactive(struct drm_device *dev, char *file, int line)
struct drm_i915_gem_object *obj_priv;
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
obj = obj_priv->obj;
obj = &obj_priv->base;
if (obj_priv->pin_count || obj_priv->active ||
(obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
I915_GEM_DOMAIN_GTT)))

View File

@ -283,6 +283,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
return -EINVAL;
}
if (obj_priv->pin_count) {
drm_gem_object_unreference_unlocked(obj);
return -EBUSY;
}
if (args->tiling_mode == I915_TILING_NONE) {
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
args->stride = 0;

View File

@ -169,9 +169,13 @@ void intel_enable_asle (struct drm_device *dev)
if (HAS_PCH_SPLIT(dev))
ironlake_enable_display_irq(dev_priv, DE_GSE);
else
else {
i915_enable_pipestat(dev_priv, 1,
I915_LEGACY_BLC_EVENT_ENABLE);
if (IS_I965G(dev))
i915_enable_pipestat(dev_priv, 0,
I915_LEGACY_BLC_EVENT_ENABLE);
}
}
/**
@ -256,18 +260,18 @@ static void i915_hotplug_work_func(struct work_struct *work)
hotplug_work);
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
struct drm_encoder *encoder;
if (mode_config->num_connector) {
list_for_each_entry(connector, &mode_config->connector_list, head) {
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
if (mode_config->num_encoder) {
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
if (intel_encoder->hot_plug)
(*intel_encoder->hot_plug) (intel_encoder);
}
}
/* Just fire off a uevent and let userspace tell us what to do */
drm_sysfs_hotplug_event(dev);
drm_helper_hpd_irq_event(dev);
}
static void i915_handle_rps_change(struct drm_device *dev)
@ -612,7 +616,7 @@ static void i915_capture_error_state(struct drm_device *dev)
batchbuffer[1] = NULL;
count = 0;
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
struct drm_gem_object *obj = obj_priv->obj;
struct drm_gem_object *obj = &obj_priv->base;
if (batchbuffer[0] == NULL &&
bbaddr >= obj_priv->gtt_offset &&
@ -648,7 +652,7 @@ static void i915_capture_error_state(struct drm_device *dev)
if (error->active_bo) {
int i = 0;
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
struct drm_gem_object *obj = obj_priv->obj;
struct drm_gem_object *obj = &obj_priv->base;
error->active_bo[i].size = obj->size;
error->active_bo[i].name = obj->name;
@ -950,7 +954,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
intel_finish_page_flip(dev, 1);
}
if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
(pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
(iir & I915_ASLE_INTERRUPT))
opregion_asle_intr(dev);

View File

@ -1764,6 +1764,14 @@
#define DP_LINK_TRAIN_MASK (3 << 28)
#define DP_LINK_TRAIN_SHIFT 28
/* CPT Link training mode */
#define DP_LINK_TRAIN_PAT_1_CPT (0 << 8)
#define DP_LINK_TRAIN_PAT_2_CPT (1 << 8)
#define DP_LINK_TRAIN_PAT_IDLE_CPT (2 << 8)
#define DP_LINK_TRAIN_OFF_CPT (3 << 8)
#define DP_LINK_TRAIN_MASK_CPT (7 << 8)
#define DP_LINK_TRAIN_SHIFT_CPT 8
/* Signal voltages. These are mostly controlled by the other end */
#define DP_VOLTAGE_0_4 (0 << 25)
#define DP_VOLTAGE_0_6 (1 << 25)
@ -1924,7 +1932,10 @@
/* Display & cursor control */
/* dithering flag on Ironlake */
#define PIPE_ENABLE_DITHER (1 << 4)
#define PIPE_ENABLE_DITHER (1 << 4)
#define PIPE_DITHER_TYPE_MASK (3 << 2)
#define PIPE_DITHER_TYPE_SPATIAL (0 << 2)
#define PIPE_DITHER_TYPE_ST01 (1 << 2)
/* Pipe A */
#define PIPEADSL 0x70000
#define PIPEACONF 0x70008
@ -1988,15 +1999,24 @@
#define DSPFW1 0x70034
#define DSPFW_SR_SHIFT 23
#define DSPFW_SR_MASK (0x1ff<<23)
#define DSPFW_CURSORB_SHIFT 16
#define DSPFW_CURSORB_MASK (0x3f<<16)
#define DSPFW_PLANEB_SHIFT 8
#define DSPFW_PLANEB_MASK (0x7f<<8)
#define DSPFW_PLANEA_MASK (0x7f)
#define DSPFW2 0x70038
#define DSPFW_CURSORA_MASK 0x00003f00
#define DSPFW_CURSORA_SHIFT 8
#define DSPFW_PLANEC_MASK (0x7f)
#define DSPFW3 0x7003c
#define DSPFW_HPLL_SR_EN (1<<31)
#define DSPFW_CURSOR_SR_SHIFT 24
#define PINEVIEW_SELF_REFRESH_EN (1<<30)
#define DSPFW_CURSOR_SR_MASK (0x3f<<24)
#define DSPFW_HPLL_CURSOR_SHIFT 16
#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
#define DSPFW_HPLL_SR_MASK (0x1ff)
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
@ -2023,6 +2043,43 @@
#define PINEVIEW_CURSOR_DFT_WM 0
#define PINEVIEW_CURSOR_GUARD_WM 5
/* define the Watermark register on Ironlake */
#define WM0_PIPEA_ILK 0x45100
#define WM0_PIPE_PLANE_MASK (0x7f<<16)
#define WM0_PIPE_PLANE_SHIFT 16
#define WM0_PIPE_SPRITE_MASK (0x3f<<8)
#define WM0_PIPE_SPRITE_SHIFT 8
#define WM0_PIPE_CURSOR_MASK (0x1f)
#define WM0_PIPEB_ILK 0x45104
#define WM1_LP_ILK 0x45108
#define WM1_LP_SR_EN (1<<31)
#define WM1_LP_LATENCY_SHIFT 24
#define WM1_LP_LATENCY_MASK (0x7f<<24)
#define WM1_LP_SR_MASK (0x1ff<<8)
#define WM1_LP_SR_SHIFT 8
#define WM1_LP_CURSOR_MASK (0x3f)
/* Memory latency timer register */
#define MLTR_ILK 0x11222
/* the unit of memory self-refresh latency time is 0.5us */
#define ILK_SRLT_MASK 0x3f
/* define the fifo size on Ironlake */
#define ILK_DISPLAY_FIFO 128
#define ILK_DISPLAY_MAXWM 64
#define ILK_DISPLAY_DFTWM 8
#define ILK_DISPLAY_SR_FIFO 512
#define ILK_DISPLAY_MAX_SRWM 0x1ff
#define ILK_DISPLAY_DFT_SRWM 0x3f
#define ILK_CURSOR_SR_FIFO 64
#define ILK_CURSOR_MAX_SRWM 0x3f
#define ILK_CURSOR_DFT_SRWM 8
#define ILK_FIFO_LINE_SIZE 64
/*
* The two pipe frame counter registers are not synchronized, so
* reading a stable value is somewhat tricky. The following code
@ -2304,8 +2361,15 @@
#define GTIIR 0x44018
#define GTIER 0x4401c
#define ILK_DISPLAY_CHICKEN2 0x42004
#define ILK_DPARB_GATE (1<<22)
#define ILK_VSDPFD_FULL (1<<21)
#define ILK_DSPCLK_GATE 0x42020
#define ILK_DPARB_CLK_GATE (1<<5)
#define DISP_ARB_CTL 0x45000
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
#define DISP_FBC_WM_DIS (1<<15)
/* PCH */
@ -2316,6 +2380,11 @@
#define SDE_PORTB_HOTPLUG (1 << 8)
#define SDE_SDVOB_HOTPLUG (1 << 6)
#define SDE_HOTPLUG_MASK (0xf << 8)
/* CPT */
#define SDE_CRT_HOTPLUG_CPT (1 << 19)
#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
#define SDEISR 0xc4000
#define SDEIMR 0xc4004
@ -2407,6 +2476,17 @@
#define PCH_SSC4_PARMS 0xc6210
#define PCH_SSC4_AUX_PARMS 0xc6214
#define PCH_DPLL_SEL 0xc7000
#define TRANSA_DPLL_ENABLE (1<<3)
#define TRANSA_DPLLB_SEL (1<<0)
#define TRANSA_DPLLA_SEL 0
#define TRANSB_DPLL_ENABLE (1<<7)
#define TRANSB_DPLLB_SEL (1<<4)
#define TRANSB_DPLLA_SEL (0)
#define TRANSC_DPLL_ENABLE (1<<11)
#define TRANSC_DPLLB_SEL (1<<8)
#define TRANSC_DPLLA_SEL (0)
/* transcoder */
#define TRANS_HTOTAL_A 0xe0000
@ -2493,6 +2573,19 @@
#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22)
#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22)
#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22)
/* ILK always use 400mV 0dB for voltage swing and pre-emphasis level.
SNB has different settings. */
/* SNB A-stepping */
#define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
#define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
#define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
/* SNB B-stepping */
#define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22)
#define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22)
#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22)
#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f<<22)
#define FDI_DP_PORT_WIDTH_X1 (0<<19)
#define FDI_DP_PORT_WIDTH_X2 (1<<19)
#define FDI_DP_PORT_WIDTH_X3 (2<<19)
@ -2525,6 +2618,13 @@
#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6)
#define FDI_SEL_RAWCLK (0<<4)
#define FDI_SEL_PCDCLK (1<<4)
/* CPT */
#define FDI_AUTO_TRAINING (1<<10)
#define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8)
#define FDI_LINK_TRAIN_PATTERN_2_CPT (1<<8)
#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8)
#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
#define FDI_RXA_MISC 0xf0010
#define FDI_RXB_MISC 0xf1010
@ -2596,6 +2696,9 @@
#define HSYNC_ACTIVE_HIGH (1 << 3)
#define PORT_DETECTED (1 << 2)
/* PCH SDVOB multiplex with HDMIB */
#define PCH_SDVOB HDMIB
#define HDMIC 0xe1150
#define HDMID 0xe1160
@ -2653,4 +2756,42 @@
#define PCH_DPD_AUX_CH_DATA4 0xe4320
#define PCH_DPD_AUX_CH_DATA5 0xe4324
/* CPT */
#define PORT_TRANS_A_SEL_CPT 0
#define PORT_TRANS_B_SEL_CPT (1<<29)
#define PORT_TRANS_C_SEL_CPT (2<<29)
#define PORT_TRANS_SEL_MASK (3<<29)
#define TRANS_DP_CTL_A 0xe0300
#define TRANS_DP_CTL_B 0xe1300
#define TRANS_DP_CTL_C 0xe2300
#define TRANS_DP_OUTPUT_ENABLE (1<<31)
#define TRANS_DP_PORT_SEL_B (0<<29)
#define TRANS_DP_PORT_SEL_C (1<<29)
#define TRANS_DP_PORT_SEL_D (2<<29)
#define TRANS_DP_PORT_SEL_MASK (3<<29)
#define TRANS_DP_AUDIO_ONLY (1<<26)
#define TRANS_DP_ENH_FRAMING (1<<18)
#define TRANS_DP_8BPC (0<<9)
#define TRANS_DP_10BPC (1<<9)
#define TRANS_DP_6BPC (2<<9)
#define TRANS_DP_12BPC (3<<9)
#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4)
#define TRANS_DP_VSYNC_ACTIVE_LOW 0
#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
#define TRANS_DP_HSYNC_ACTIVE_LOW 0
/* SNB eDP training params */
/* SNB A-stepping */
#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22)
#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22)
#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22)
#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22)
/* SNB B-stepping */
#define EDP_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22)
#define EDP_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22)
#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22)
#define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
#endif /* _I915_REG_H_ */

View File

@ -600,14 +600,16 @@ void i915_save_display(struct drm_device *dev)
}
/* FIXME: save TV & SDVO state */
/* FBC state */
if (IS_GM45(dev)) {
dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
} else {
dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
/* Only save FBC state on the platform that supports FBC */
if (I915_HAS_FBC(dev)) {
if (IS_GM45(dev)) {
dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
} else {
dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
}
}
/* VGA state */
@ -702,18 +704,19 @@ void i915_restore_display(struct drm_device *dev)
}
/* FIXME: restore TV & SDVO state */
/* FBC info */
if (IS_GM45(dev)) {
g4x_disable_fbc(dev);
I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
} else {
i8xx_disable_fbc(dev);
I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
/* only restore FBC info on the platform that supports FBC*/
if (I915_HAS_FBC(dev)) {
if (IS_GM45(dev)) {
g4x_disable_fbc(dev);
I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
} else {
i8xx_disable_fbc(dev);
I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
}
}
/* VGA state */
if (IS_IRONLAKE(dev))
I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);

View File

@ -115,7 +115,7 @@ TRACE_EVENT(i915_gem_object_get_fence,
__entry->obj, __entry->fence, __entry->tiling_mode)
);
TRACE_EVENT(i915_gem_object_unbind,
DECLARE_EVENT_CLASS(i915_gem_object,
TP_PROTO(struct drm_gem_object *obj),
@ -132,21 +132,18 @@ TRACE_EVENT(i915_gem_object_unbind,
TP_printk("obj=%p", __entry->obj)
);
TRACE_EVENT(i915_gem_object_destroy,
DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
TP_PROTO(struct drm_gem_object *obj),
TP_ARGS(obj),
TP_ARGS(obj)
);
TP_STRUCT__entry(
__field(struct drm_gem_object *, obj)
),
DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
TP_fast_assign(
__entry->obj = obj;
),
TP_PROTO(struct drm_gem_object *obj),
TP_printk("obj=%p", __entry->obj)
TP_ARGS(obj)
);
/* batch tracing */
@ -197,8 +194,7 @@ TRACE_EVENT(i915_gem_request_flush,
__entry->flush_domains, __entry->invalidate_domains)
);
TRACE_EVENT(i915_gem_request_complete,
DECLARE_EVENT_CLASS(i915_gem_request,
TP_PROTO(struct drm_device *dev, u32 seqno),
@ -217,64 +213,35 @@ TRACE_EVENT(i915_gem_request_complete,
TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
);
TRACE_EVENT(i915_gem_request_retire,
DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
TP_PROTO(struct drm_device *dev, u32 seqno),
TP_ARGS(dev, seqno),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, seqno)
),
TP_fast_assign(
__entry->dev = dev->primary->index;
__entry->seqno = seqno;
),
TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
TP_ARGS(dev, seqno)
);
TRACE_EVENT(i915_gem_request_wait_begin,
DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
TP_PROTO(struct drm_device *dev, u32 seqno),
TP_ARGS(dev, seqno),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, seqno)
),
TP_fast_assign(
__entry->dev = dev->primary->index;
__entry->seqno = seqno;
),
TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
TP_ARGS(dev, seqno)
);
TRACE_EVENT(i915_gem_request_wait_end,
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin,
TP_PROTO(struct drm_device *dev, u32 seqno),
TP_ARGS(dev, seqno),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, seqno)
),
TP_fast_assign(
__entry->dev = dev->primary->index;
__entry->seqno = seqno;
),
TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
TP_ARGS(dev, seqno)
);
TRACE_EVENT(i915_ring_wait_begin,
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
TP_PROTO(struct drm_device *dev, u32 seqno),
TP_ARGS(dev, seqno)
);
DECLARE_EVENT_CLASS(i915_ring,
TP_PROTO(struct drm_device *dev),
@ -291,26 +258,23 @@ TRACE_EVENT(i915_ring_wait_begin,
TP_printk("dev=%u", __entry->dev)
);
TRACE_EVENT(i915_ring_wait_end,
DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
TP_PROTO(struct drm_device *dev),
TP_ARGS(dev),
TP_ARGS(dev)
);
TP_STRUCT__entry(
__field(u32, dev)
),
DEFINE_EVENT(i915_ring, i915_ring_wait_end,
TP_fast_assign(
__entry->dev = dev->primary->index;
),
TP_PROTO(struct drm_device *dev),
TP_printk("dev=%u", __entry->dev)
TP_ARGS(dev)
);
#endif /* _I915_TRACE_H_ */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
#define TRACE_INCLUDE_PATH .
#include <trace/define_trace.h>

View File

@ -366,6 +366,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
p_mapping->dvo_port = p_child->dvo_port;
p_mapping->slave_addr = p_child->slave_addr;
p_mapping->dvo_wiring = p_child->dvo_wiring;
p_mapping->ddc_pin = p_child->ddc_pin;
p_mapping->initialized = 1;
} else {
DRM_DEBUG_KMS("Maybe one SDVO port is shared by "

View File

@ -136,11 +136,17 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
if (intel_crtc->pipe == 0) {
adpa |= ADPA_PIPE_A_SELECT;
if (HAS_PCH_CPT(dev))
adpa |= PORT_TRANS_A_SEL_CPT;
else
adpa |= ADPA_PIPE_A_SELECT;
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT_A, 0);
} else {
adpa |= ADPA_PIPE_B_SELECT;
if (HAS_PCH_CPT(dev))
adpa |= PORT_TRANS_B_SEL_CPT;
else
adpa |= ADPA_PIPE_B_SELECT;
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT_B, 0);
}
@ -152,15 +158,21 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 adpa;
u32 adpa, temp;
bool ret;
adpa = I915_READ(PCH_ADPA);
temp = adpa = I915_READ(PCH_ADPA);
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
/* disable HPD first */
I915_WRITE(PCH_ADPA, adpa);
(void)I915_READ(PCH_ADPA);
if (HAS_PCH_CPT(dev)) {
/* Disable DAC before force detect */
I915_WRITE(PCH_ADPA, adpa & ~ADPA_DAC_ENABLE);
(void)I915_READ(PCH_ADPA);
} else {
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
/* disable HPD first */
I915_WRITE(PCH_ADPA, adpa);
(void)I915_READ(PCH_ADPA);
}
adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
ADPA_CRT_HOTPLUG_WARMUP_10MS |
@ -176,6 +188,11 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0)
;
if (HAS_PCH_CPT(dev)) {
I915_WRITE(PCH_ADPA, temp);
(void)I915_READ(PCH_ADPA);
}
/* Check the status to see if both blue and green are on now */
adpa = I915_READ(PCH_ADPA);
adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK;
@ -245,9 +262,9 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
return false;
}
static bool intel_crt_detect_ddc(struct drm_connector *connector)
static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
{
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
/* CRT should always be at 0, but check anyway */
if (intel_encoder->type != INTEL_OUTPUT_ANALOG)
@ -387,8 +404,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct drm_encoder *encoder = &intel_encoder->enc;
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct drm_crtc *crtc;
int dpms_mode;
enum drm_connector_status status;
@ -400,18 +417,19 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
return connector_status_disconnected;
}
if (intel_crt_detect_ddc(connector))
if (intel_crt_detect_ddc(encoder))
return connector_status_connected;
/* for pre-945g platforms use load detect */
if (encoder->crtc && encoder->crtc->enabled) {
status = intel_crt_load_detect(encoder->crtc, intel_encoder);
} else {
crtc = intel_get_load_detect_pipe(intel_encoder,
crtc = intel_get_load_detect_pipe(intel_encoder, connector,
NULL, &dpms_mode);
if (crtc) {
status = intel_crt_load_detect(crtc, intel_encoder);
intel_release_load_detect_pipe(intel_encoder, dpms_mode);
intel_release_load_detect_pipe(intel_encoder,
connector, dpms_mode);
} else
status = connector_status_unknown;
}
@ -421,9 +439,6 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
static void intel_crt_destroy(struct drm_connector *connector)
{
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
intel_i2c_destroy(intel_encoder->ddc_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
@ -432,29 +447,27 @@ static void intel_crt_destroy(struct drm_connector *connector)
static int intel_crt_get_modes(struct drm_connector *connector)
{
int ret;
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct i2c_adapter *ddcbus;
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct i2c_adapter *ddc_bus;
struct drm_device *dev = connector->dev;
ret = intel_ddc_get_modes(intel_encoder);
ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (ret || !IS_G4X(dev))
goto end;
ddcbus = intel_encoder->ddc_bus;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
intel_encoder->ddc_bus =
intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
ddc_bus = intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
if (!intel_encoder->ddc_bus) {
intel_encoder->ddc_bus = ddcbus;
if (!ddc_bus) {
dev_printk(KERN_ERR, &connector->dev->pdev->dev,
"DDC bus registration failed for CRTDDC_D.\n");
goto end;
}
/* Try to get modes by GPIOD port */
ret = intel_ddc_get_modes(intel_encoder);
intel_i2c_destroy(ddcbus);
ret = intel_ddc_get_modes(connector, ddc_bus);
intel_i2c_destroy(ddc_bus);
end:
return ret;
@ -491,12 +504,16 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
.mode_valid = intel_crt_mode_valid,
.get_modes = intel_crt_get_modes,
.best_encoder = intel_best_encoder,
.best_encoder = intel_attached_encoder,
};
static void intel_crt_enc_destroy(struct drm_encoder *encoder)
{
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
intel_i2c_destroy(intel_encoder->ddc_bus);
drm_encoder_cleanup(encoder);
kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_crt_enc_funcs = {
@ -507,6 +524,7 @@ void intel_crt_init(struct drm_device *dev)
{
struct drm_connector *connector;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 i2c_reg;
@ -514,14 +532,20 @@ void intel_crt_init(struct drm_device *dev)
if (!intel_encoder)
return;
connector = &intel_encoder->base;
drm_connector_init(dev, &intel_encoder->base,
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_encoder);
return;
}
connector = &intel_connector->base;
drm_connector_init(dev, &intel_connector->base,
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs,
DRM_MODE_ENCODER_DAC);
drm_mode_connector_attach_encoder(&intel_encoder->base,
drm_mode_connector_attach_encoder(&intel_connector->base,
&intel_encoder->enc);
/* Set up the DDC bus. */
@ -553,5 +577,10 @@ void intel_crt_init(struct drm_device *dev)
drm_sysfs_connector_add(connector);
if (I915_HAS_HOTPLUG(dev))
connector->polled = DRM_CONNECTOR_POLL_HPD;
else
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
}

File diff suppressed because it is too large Load Diff

View File

@ -48,8 +48,6 @@ struct intel_dp_priv {
uint32_t output_reg;
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
uint32_t save_DP;
uint8_t save_link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
int dpms_mode;
uint8_t link_bw;
@ -141,7 +139,8 @@ static int
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder));
int max_lanes = intel_dp_max_lane_count(intel_encoder);
@ -215,7 +214,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
{
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint32_t output_reg = dp_priv->output_reg;
struct drm_device *dev = intel_encoder->base.dev;
struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = output_reg + 0x10;
uint32_t ch_data = ch_ctl + 4;
@ -224,19 +223,27 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
uint32_t ctl;
uint32_t status;
uint32_t aux_clock_divider;
int try;
int try, precharge;
/* The clock divider is based off the hrawclk,
* and would like to run at 2MHz. So, take the
* hrawclk value and divide by 2 and use that
*/
if (IS_eDP(intel_encoder))
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
else if (HAS_PCH_SPLIT(dev))
if (IS_eDP(intel_encoder)) {
if (IS_GEN6(dev))
aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
else
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
} else if (HAS_PCH_SPLIT(dev))
aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
else
aux_clock_divider = intel_hrawclk(dev) / 2;
if (IS_GEN6(dev))
precharge = 3;
else
precharge = 5;
/* Must try at least 3 times according to DP spec */
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
@ -249,7 +256,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
ctl = (DP_AUX_CH_CTL_SEND_BUSY |
DP_AUX_CH_CTL_TIME_OUT_400us |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
(5 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
DP_AUX_CH_CTL_DONE |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
@ -465,7 +472,8 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
}
static int
intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name)
intel_dp_i2c_init(struct intel_encoder *intel_encoder,
struct intel_connector *intel_connector, const char *name)
{
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
@ -480,7 +488,7 @@ intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name)
strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1);
dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0';
dp_priv->adapter.algo_data = &dp_priv->algo;
dp_priv->adapter.dev.parent = &intel_encoder->base.kdev;
dp_priv->adapter.dev.parent = &intel_connector->base.kdev;
return i2c_dp_aux_add_bus(&dp_priv->adapter);
}
@ -555,7 +563,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int lane_count = 4;
@ -564,13 +572,16 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
/*
* Find the lane count in the intel_encoder private
*/
list_for_each_entry(connector, &mode_config->connector_list, head) {
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
struct intel_encoder *intel_encoder;
struct intel_dp_priv *dp_priv;
if (!connector->encoder || connector->encoder->crtc != crtc)
if (!encoder || encoder->crtc != crtc)
continue;
intel_encoder = enc_to_intel_encoder(encoder);
dp_priv = intel_encoder->dev_priv;
if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
lane_count = dp_priv->lane_count;
break;
@ -626,16 +637,24 @@ static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
struct drm_crtc *crtc = intel_encoder->enc.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
dp_priv->DP = (DP_LINK_TRAIN_OFF |
DP_VOLTAGE_0_4 |
DP_PRE_EMPHASIS_0 |
DP_SYNC_VS_HIGH |
DP_SYNC_HS_HIGH);
dp_priv->DP = (DP_VOLTAGE_0_4 |
DP_PRE_EMPHASIS_0);
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
dp_priv->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
dp_priv->DP |= DP_SYNC_VS_HIGH;
if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
dp_priv->DP |= DP_LINK_TRAIN_OFF_CPT;
else
dp_priv->DP |= DP_LINK_TRAIN_OFF;
switch (dp_priv->lane_count) {
case 1:
@ -664,7 +683,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
dp_priv->DP |= DP_ENHANCED_FRAMING;
}
if (intel_crtc->pipe == 1)
/* CPT DP's pipe select is decided in TRANS_DP_CTL */
if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
dp_priv->DP |= DP_PIPEB_SELECT;
if (IS_eDP(intel_encoder)) {
@ -704,7 +724,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
{
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
struct drm_device *dev = intel_encoder->base.dev;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dp_reg = I915_READ(dp_priv->output_reg);
@ -749,20 +769,6 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
return link_status[r - DP_LANE0_1_STATUS];
}
static void
intel_dp_save(struct drm_connector *connector)
{
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
dp_priv->save_DP = I915_READ(dp_priv->output_reg);
intel_dp_aux_native_read(intel_encoder, DP_LINK_BW_SET,
dp_priv->save_link_configuration,
sizeof (dp_priv->save_link_configuration));
}
static uint8_t
intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
@ -892,6 +898,25 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count)
return signal_levels;
}
/* Gen6's DP voltage swing and pre-emphasis control */
static uint32_t
intel_gen6_edp_signal_levels(uint8_t train_set)
{
switch (train_set & (DP_TRAIN_VOLTAGE_SWING_MASK|DP_TRAIN_PRE_EMPHASIS_MASK)) {
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
return EDP_LINK_TRAIN_400MV_6DB_SNB_B;
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
return EDP_LINK_TRAIN_600MV_3_5DB_SNB_B;
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
return EDP_LINK_TRAIN_800MV_0DB_SNB_B;
default:
DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level\n");
return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
}
}
static uint8_t
intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
@ -948,7 +973,7 @@ intel_dp_set_link_train(struct intel_encoder *intel_encoder,
uint8_t train_set[4],
bool first)
{
struct drm_device *dev = intel_encoder->base.dev;
struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
int ret;
@ -974,7 +999,7 @@ static void
intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
{
struct drm_device *dev = intel_encoder->base.dev;
struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint8_t train_set[4];
@ -985,23 +1010,38 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
bool channel_eq = false;
bool first = true;
int tries;
u32 reg;
/* Write the link configuration data */
intel_dp_aux_native_write(intel_encoder, 0x100,
intel_dp_aux_native_write(intel_encoder, DP_LINK_BW_SET,
link_configuration, DP_LINK_CONFIGURATION_SIZE);
DP |= DP_PORT_EN;
DP &= ~DP_LINK_TRAIN_MASK;
if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
DP &= ~DP_LINK_TRAIN_MASK_CPT;
else
DP &= ~DP_LINK_TRAIN_MASK;
memset(train_set, 0, 4);
voltage = 0xff;
tries = 0;
clock_recovery = false;
for (;;) {
/* Use train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
uint32_t signal_levels;
if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_1,
if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_1;
if (!intel_dp_set_link_train(intel_encoder, reg,
DP_TRAINING_PATTERN_1, train_set, first))
break;
first = false;
@ -1041,11 +1081,23 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
channel_eq = false;
for (;;) {
/* Use train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
uint32_t signal_levels;
if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_2;
/* channel eq pattern */
if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_2,
if (!intel_dp_set_link_train(intel_encoder, reg,
DP_TRAINING_PATTERN_2, train_set,
false))
break;
@ -1068,7 +1120,12 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
++tries;
}
I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF);
if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
reg = DP | DP_LINK_TRAIN_OFF_CPT;
else
reg = DP | DP_LINK_TRAIN_OFF;
I915_WRITE(dp_priv->output_reg, reg);
POSTING_READ(dp_priv->output_reg);
intel_dp_aux_native_write_1(intel_encoder,
DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
@ -1077,7 +1134,7 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
static void
intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
{
struct drm_device *dev = intel_encoder->base.dev;
struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
@ -1090,9 +1147,15 @@ intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
udelay(100);
}
DP &= ~DP_LINK_TRAIN_MASK;
I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
POSTING_READ(dp_priv->output_reg);
if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
POSTING_READ(dp_priv->output_reg);
} else {
DP &= ~DP_LINK_TRAIN_MASK;
I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
POSTING_READ(dp_priv->output_reg);
}
udelay(17000);
@ -1102,18 +1165,6 @@ intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
POSTING_READ(dp_priv->output_reg);
}
static void
intel_dp_restore(struct drm_connector *connector)
{
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
if (dp_priv->save_DP & DP_PORT_EN)
intel_dp_link_train(intel_encoder, dp_priv->save_DP, dp_priv->save_link_configuration);
else
intel_dp_link_down(intel_encoder, dp_priv->save_DP);
}
/*
* According to DP spec
* 5.1.2:
@ -1144,7 +1195,8 @@ intel_dp_check_link_status(struct intel_encoder *intel_encoder)
static enum drm_connector_status
ironlake_dp_detect(struct drm_connector *connector)
{
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
enum drm_connector_status status;
@ -1168,8 +1220,9 @@ ironlake_dp_detect(struct drm_connector *connector)
static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector)
{
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct drm_device *dev = intel_encoder->base.dev;
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint32_t temp, bit;
@ -1180,16 +1233,6 @@ intel_dp_detect(struct drm_connector *connector)
if (HAS_PCH_SPLIT(dev))
return ironlake_dp_detect(connector);
temp = I915_READ(PORT_HOTPLUG_EN);
I915_WRITE(PORT_HOTPLUG_EN,
temp |
DPB_HOTPLUG_INT_EN |
DPC_HOTPLUG_INT_EN |
DPD_HOTPLUG_INT_EN);
POSTING_READ(PORT_HOTPLUG_EN);
switch (dp_priv->output_reg) {
case DP_B:
bit = DPB_HOTPLUG_INT_STATUS;
@ -1222,15 +1265,16 @@ intel_dp_detect(struct drm_connector *connector)
static int intel_dp_get_modes(struct drm_connector *connector)
{
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct drm_device *dev = intel_encoder->base.dev;
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
/* We should parse the EDID data and find out if it has an audio sink
*/
ret = intel_ddc_get_modes(intel_encoder);
ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (ret)
return ret;
@ -1249,13 +1293,9 @@ static int intel_dp_get_modes(struct drm_connector *connector)
static void
intel_dp_destroy (struct drm_connector *connector)
{
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
if (intel_encoder->i2c_bus)
intel_i2c_destroy(intel_encoder->i2c_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(intel_encoder);
kfree(connector);
}
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
@ -1268,8 +1308,6 @@ static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
static const struct drm_connector_funcs intel_dp_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.save = intel_dp_save,
.restore = intel_dp_restore,
.detect = intel_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_dp_destroy,
@ -1278,12 +1316,17 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
.get_modes = intel_dp_get_modes,
.mode_valid = intel_dp_mode_valid,
.best_encoder = intel_best_encoder,
.best_encoder = intel_attached_encoder,
};
static void intel_dp_enc_destroy(struct drm_encoder *encoder)
{
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
if (intel_encoder->i2c_bus)
intel_i2c_destroy(intel_encoder->i2c_bus);
drm_encoder_cleanup(encoder);
kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
@ -1299,12 +1342,35 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder)
intel_dp_check_link_status(intel_encoder);
}
/* Return which DP Port should be selected for Transcoder DP control */
int
intel_trans_dp_port_sel (struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_encoder *encoder;
struct intel_encoder *intel_encoder = NULL;
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
if (!encoder || encoder->crtc != crtc)
continue;
intel_encoder = enc_to_intel_encoder(encoder);
if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
return dp_priv->output_reg;
}
}
return -1;
}
void
intel_dp_init(struct drm_device *dev, int output_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
struct intel_dp_priv *dp_priv;
const char *name = NULL;
@ -1313,13 +1379,21 @@ intel_dp_init(struct drm_device *dev, int output_reg)
if (!intel_encoder)
return;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_encoder);
return;
}
dp_priv = (struct intel_dp_priv *)(intel_encoder + 1);
connector = &intel_encoder->base;
connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_dp_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
connector->polled = DRM_CONNECTOR_POLL_HPD;
if (output_reg == DP_A)
intel_encoder->type = INTEL_OUTPUT_EDP;
else
@ -1349,7 +1423,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs);
drm_mode_connector_attach_encoder(&intel_encoder->base,
drm_mode_connector_attach_encoder(&intel_connector->base,
&intel_encoder->enc);
drm_sysfs_connector_add(connector);
@ -1378,7 +1452,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
break;
}
intel_dp_i2c_init(intel_encoder, name);
intel_dp_i2c_init(intel_encoder, intel_connector, name);
intel_encoder->ddc_bus = &dp_priv->adapter;
intel_encoder->hot_plug = intel_dp_hot_plug;

View File

@ -96,8 +96,6 @@ struct intel_framebuffer {
struct intel_encoder {
struct drm_connector base;
struct drm_encoder enc;
int type;
struct i2c_adapter *i2c_bus;
@ -110,6 +108,11 @@ struct intel_encoder {
int clone_mask;
};
struct intel_connector {
struct drm_connector base;
void *dev_priv;
};
struct intel_crtc;
struct intel_overlay {
struct drm_device *dev;
@ -149,17 +152,18 @@ struct intel_crtc {
bool lowfreq_avail;
struct intel_overlay *overlay;
struct intel_unpin_work *unpin_work;
int fdi_lanes;
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
const char *name);
void intel_i2c_destroy(struct i2c_adapter *adapter);
int intel_ddc_get_modes(struct intel_encoder *intel_encoder);
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
extern bool intel_ddc_probe(struct intel_encoder *intel_encoder);
void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
void intel_i2c_reset_gmbus(struct drm_device *dev);
@ -183,7 +187,7 @@ extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder);
extern void intel_encoder_commit (struct drm_encoder *encoder);
extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector);
extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
@ -192,17 +196,16 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
extern void intel_wait_for_vblank(struct drm_device *dev);
extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_connector *connector,
struct drm_display_mode *mode,
int *dpms_mode);
extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_connector *connector,
int dpms_mode);
extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
extern int intel_sdvo_supports_hotplug(struct drm_connector *connector);
extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable);
extern int intelfb_probe(struct drm_device *dev);
extern int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc);
extern void intelfb_restore(void);
extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
@ -212,10 +215,12 @@ extern void intel_init_clock_gating(struct drm_device *dev);
extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev);
extern int intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd *mode_cmd,
struct drm_framebuffer **fb,
struct drm_gem_object *obj);
extern int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd *mode_cmd,
struct drm_gem_object *obj);
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev);
extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
@ -229,4 +234,6 @@ extern int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_overlay_attrs(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern void intel_fb_output_poll_changed(struct drm_device *dev);
#endif /* __INTEL_DRV_H__ */

View File

@ -96,39 +96,11 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
}
}
static void intel_dvo_save(struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
/* Each output should probably just save the registers it touches,
* but for now, use more overkill.
*/
dev_priv->saveDVOA = I915_READ(DVOA);
dev_priv->saveDVOB = I915_READ(DVOB);
dev_priv->saveDVOC = I915_READ(DVOC);
dvo->dev_ops->save(dvo);
}
static void intel_dvo_restore(struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
dvo->dev_ops->restore(dvo);
I915_WRITE(DVOA, dev_priv->saveDVOA);
I915_WRITE(DVOB, dev_priv->saveDVOB);
I915_WRITE(DVOC, dev_priv->saveDVOC);
}
static int intel_dvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@ -241,7 +213,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
*/
static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
{
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
return dvo->dev_ops->detect(dvo);
@ -249,7 +222,8 @@ static enum drm_connector_status intel_dvo_detect(struct drm_connector *connecto
static int intel_dvo_get_modes(struct drm_connector *connector)
{
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
/* We should probably have an i2c driver get_modes function for those
@ -257,7 +231,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
* (TV-out, for example), but for now with just TMDS and LVDS,
* that's not the case.
*/
intel_ddc_get_modes(intel_encoder);
intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (!list_empty(&connector->probed_modes))
return 1;
@ -275,39 +249,11 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
static void intel_dvo_destroy (struct drm_connector *connector)
{
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
if (dvo) {
if (dvo->dev_ops->destroy)
dvo->dev_ops->destroy(dvo);
if (dvo->panel_fixed_mode)
kfree(dvo->panel_fixed_mode);
/* no need, in i830_dvoices[] now */
//kfree(dvo);
}
if (intel_encoder->i2c_bus)
intel_i2c_destroy(intel_encoder->i2c_bus);
if (intel_encoder->ddc_bus)
intel_i2c_destroy(intel_encoder->ddc_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(intel_encoder);
kfree(connector);
}
#ifdef RANDR_GET_CRTC_INTERFACE
static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT);
return intel_pipe_to_crtc(pScrn, pipe);
}
#endif
static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
.dpms = intel_dvo_dpms,
.mode_fixup = intel_dvo_mode_fixup,
@ -318,8 +264,6 @@ static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.save = intel_dvo_save,
.restore = intel_dvo_restore,
.detect = intel_dvo_detect,
.destroy = intel_dvo_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
@ -328,12 +272,26 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
.mode_valid = intel_dvo_mode_valid,
.get_modes = intel_dvo_get_modes,
.best_encoder = intel_best_encoder,
.best_encoder = intel_attached_encoder,
};
static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
{
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
if (dvo) {
if (dvo->dev_ops->destroy)
dvo->dev_ops->destroy(dvo);
if (dvo->panel_fixed_mode)
kfree(dvo->panel_fixed_mode);
}
if (intel_encoder->i2c_bus)
intel_i2c_destroy(intel_encoder->i2c_bus);
if (intel_encoder->ddc_bus)
intel_i2c_destroy(intel_encoder->ddc_bus);
drm_encoder_cleanup(encoder);
kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
@ -352,7 +310,8 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dvo_device *dvo = intel_encoder->dev_priv;
uint32_t dvo_reg = dvo->dvo_reg;
uint32_t dvo_val = I915_READ(dvo_reg);
@ -384,6 +343,7 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
void intel_dvo_init(struct drm_device *dev)
{
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
struct intel_dvo_device *dvo;
struct i2c_adapter *i2cbus = NULL;
int ret = 0;
@ -393,6 +353,12 @@ void intel_dvo_init(struct drm_device *dev)
if (!intel_encoder)
return;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_encoder);
return;
}
/* Set up the DDC bus */
intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
if (!intel_encoder->ddc_bus)
@ -400,7 +366,7 @@ void intel_dvo_init(struct drm_device *dev)
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
struct drm_connector *connector = &intel_encoder->base;
struct drm_connector *connector = &intel_connector->base;
int gpio;
dvo = &intel_dvo_devices[i];
@ -471,7 +437,7 @@ void intel_dvo_init(struct drm_device *dev)
drm_encoder_helper_add(&intel_encoder->enc,
&intel_dvo_helper_funcs);
drm_mode_connector_attach_encoder(&intel_encoder->base,
drm_mode_connector_attach_encoder(&intel_connector->base,
&intel_encoder->enc);
if (dvo->type == INTEL_DVO_CHIP_LVDS) {
/* For our LVDS chipsets, we should hopefully be able
@ -496,4 +462,5 @@ void intel_dvo_init(struct drm_device *dev)
intel_i2c_destroy(i2cbus);
free_intel:
kfree(intel_encoder);
kfree(intel_connector);
}

View File

@ -44,9 +44,10 @@
#include "i915_drm.h"
#include "i915_drv.h"
struct intelfb_par {
struct intel_fbdev {
struct drm_fb_helper helper;
struct intel_framebuffer *intel_fb;
struct intel_framebuffer ifb;
struct list_head fbdev_list;
struct drm_display_mode *our_mode;
};
@ -54,7 +55,6 @@ static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@ -63,62 +63,12 @@ static struct fb_ops intelfb_ops = {
.fb_setcmap = drm_fb_helper_setcmap,
};
static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.gamma_set = intel_crtc_fb_gamma_set,
.gamma_get = intel_crtc_fb_gamma_get,
};
/**
* Currently it is assumed that the old framebuffer is reused.
*
* LOCKING
* caller should hold the mode config lock.
*
*/
int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
static int intelfb_create(struct intel_fbdev *ifbdev,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = ifbdev->helper.dev;
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_display_mode *mode = crtc->desired_mode;
fb = crtc->fb;
if (!fb)
return 1;
info = fb->fbdev;
if (!info)
return 1;
if (!mode)
return 1;
info->var.xres = mode->hdisplay;
info->var.right_margin = mode->hsync_start - mode->hdisplay;
info->var.hsync_len = mode->hsync_end - mode->hsync_start;
info->var.left_margin = mode->htotal - mode->hsync_end;
info->var.yres = mode->vdisplay;
info->var.lower_margin = mode->vsync_start - mode->vdisplay;
info->var.vsync_len = mode->vsync_end - mode->vsync_start;
info->var.upper_margin = mode->vtotal - mode->vsync_end;
info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
/* avoid overflow */
info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
return 0;
}
EXPORT_SYMBOL(intelfb_resize);
static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
uint32_t fb_height, uint32_t surface_width,
uint32_t surface_height,
uint32_t surface_depth, uint32_t surface_bpp,
struct drm_framebuffer **fb_p)
{
struct fb_info *info;
struct intelfb_par *par;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
struct drm_mode_fb_cmd mode_cmd;
struct drm_gem_object *fbo = NULL;
struct drm_i915_gem_object *obj_priv;
@ -126,19 +76,19 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1;
/* we don't do packed 24bpp */
if (surface_bpp == 24)
surface_bpp = 32;
if (sizes->surface_bpp == 24)
sizes->surface_bpp = 32;
mode_cmd.width = surface_width;
mode_cmd.height = surface_height;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
mode_cmd.bpp = surface_bpp;
mode_cmd.bpp = sizes->surface_bpp;
mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
mode_cmd.depth = surface_depth;
mode_cmd.depth = sizes->surface_depth;
size = mode_cmd.pitch * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
fbo = drm_gem_object_alloc(dev, size);
fbo = i915_gem_alloc_object(dev, size);
if (!fbo) {
DRM_ERROR("failed to allocate framebuffer\n");
ret = -ENOMEM;
@ -157,45 +107,37 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
/* Flush everything out, we'll be doing GTT only from now on */
i915_gem_object_set_to_gtt_domain(fbo, 1);
ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo);
if (ret) {
DRM_ERROR("failed to allocate fb.\n");
goto out_unpin;
}
list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
intel_fb = to_intel_framebuffer(fb);
*fb_p = fb;
info = framebuffer_alloc(sizeof(struct intelfb_par), device);
info = framebuffer_alloc(0, device);
if (!info) {
ret = -ENOMEM;
goto out_unpin;
}
par = info->par;
info->par = ifbdev;
par->helper.funcs = &intel_fb_helper_funcs;
par->helper.dev = dev;
ret = drm_fb_helper_init_crtc_count(&par->helper, 2,
INTELFB_CONN_LIMIT);
if (ret)
goto out_unref;
intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo);
fb = &ifbdev->ifb.base;
ifbdev->helper.fb = fb;
ifbdev->helper.fbdev = info;
strcpy(info->fix.id, "inteldrmfb");
info->flags = FBINFO_DEFAULT;
info->fbops = &intelfb_ops;
/* setup aperture base/size for vesafb takeover */
info->aperture_base = dev->mode_config.fb_base;
info->apertures = alloc_apertures(1);
if (!info->apertures) {
ret = -ENOMEM;
goto out_unpin;
}
info->apertures->ranges[0].base = dev->mode_config.fb_base;
if (IS_I9XX(dev))
info->aperture_size = pci_resource_len(dev->pdev, 2);
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2);
else
info->aperture_size = pci_resource_len(dev->pdev, 0);
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset;
info->fix.smem_len = size;
@ -208,12 +150,18 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
ret = -ENOSPC;
goto out_unpin;
}
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret) {
ret = -ENOMEM;
goto out_unpin;
}
info->screen_size = size;
// memset(info->screen_base, 0, size);
drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
/* FIXME: we really shouldn't expose mmio space at all */
info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar);
@ -225,14 +173,10 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->pixmap.scan_align = 1;
fb->fbdev = info;
par->intel_fb = intel_fb;
/* To allow resizeing without swapping buffers */
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
intel_fb->base.width, intel_fb->base.height,
obj_priv->gtt_offset, fbo);
fb->width, fb->height,
obj_priv->gtt_offset, fbo);
mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(dev->pdev, info);
@ -247,35 +191,86 @@ out:
return ret;
}
int intelfb_probe(struct drm_device *dev)
static int intel_fb_find_or_create_single(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
int new_fb = 0;
int ret;
DRM_DEBUG_KMS("\n");
ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create);
return ret;
if (!helper->fb) {
ret = intelfb_create(ifbdev, sizes);
if (ret)
return ret;
new_fb = 1;
}
return new_fb;
}
EXPORT_SYMBOL(intelfb_probe);
int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.gamma_set = intel_crtc_fb_gamma_set,
.gamma_get = intel_crtc_fb_gamma_get,
.fb_probe = intel_fb_find_or_create_single,
};
int intel_fbdev_destroy(struct drm_device *dev,
struct intel_fbdev *ifbdev)
{
struct fb_info *info;
struct intel_framebuffer *ifb = &ifbdev->ifb;
if (!fb)
return -EINVAL;
info = fb->fbdev;
if (info) {
struct intelfb_par *par = info->par;
if (ifbdev->helper.fbdev) {
info = ifbdev->helper.fbdev;
unregister_framebuffer(info);
iounmap(info->screen_base);
if (info->par)
drm_fb_helper_free(&par->helper);
if (info->cmap.len)
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
drm_fb_helper_fini(&ifbdev->helper);
drm_framebuffer_cleanup(&ifb->base);
if (ifb->obj)
drm_gem_object_unreference_unlocked(ifb->obj);
return 0;
}
EXPORT_SYMBOL(intelfb_remove);
int intel_fbdev_init(struct drm_device *dev)
{
struct intel_fbdev *ifbdev;
drm_i915_private_t *dev_priv = dev->dev_private;
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
if (!ifbdev)
return -ENOMEM;
dev_priv->fbdev = ifbdev;
ifbdev->helper.funcs = &intel_fb_helper_funcs;
drm_fb_helper_init(dev, &ifbdev->helper, 2,
INTELFB_CONN_LIMIT);
drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
drm_fb_helper_initial_config(&ifbdev->helper, 32);
return 0;
}
void intel_fbdev_fini(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
if (!dev_priv->fbdev)
return;
intel_fbdev_destroy(dev, dev_priv->fbdev);
kfree(dev_priv->fbdev);
dev_priv->fbdev = NULL;
}
MODULE_LICENSE("GPL and additional rights");
void intel_fb_output_poll_changed(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
}

View File

@ -39,7 +39,6 @@
struct intel_hdmi_priv {
u32 sdvox_reg;
u32 save_SDVOX;
bool has_hdmi_sink;
};
@ -63,8 +62,12 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
if (hdmi_priv->has_hdmi_sink)
sdvox |= SDVO_AUDIO_ENABLE;
if (intel_crtc->pipe == 1)
sdvox |= SDVO_PIPE_B_SELECT;
if (intel_crtc->pipe == 1) {
if (HAS_PCH_CPT(dev))
sdvox |= PORT_TRANS_B_SEL_CPT;
else
sdvox |= SDVO_PIPE_B_SELECT;
}
I915_WRITE(hdmi_priv->sdvox_reg, sdvox);
POSTING_READ(hdmi_priv->sdvox_reg);
@ -106,27 +109,6 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
}
}
static void intel_hdmi_save(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg);
}
static void intel_hdmi_restore(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX);
POSTING_READ(hdmi_priv->sdvox_reg);
}
static int intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
@ -151,13 +133,14 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector)
{
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
struct edid *edid = NULL;
enum drm_connector_status status = connector_status_disconnected;
hdmi_priv->has_hdmi_sink = false;
edid = drm_get_edid(&intel_encoder->base,
edid = drm_get_edid(connector,
intel_encoder->ddc_bus);
if (edid) {
@ -165,7 +148,7 @@ intel_hdmi_detect(struct drm_connector *connector)
status = connector_status_connected;
hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
}
intel_encoder->base.display_info.raw_edid = NULL;
connector->display_info.raw_edid = NULL;
kfree(edid);
}
@ -174,24 +157,21 @@ intel_hdmi_detect(struct drm_connector *connector)
static int intel_hdmi_get_modes(struct drm_connector *connector)
{
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
/* We should parse the EDID data and find out if it's an HDMI sink so
* we can send audio to it.
*/
return intel_ddc_get_modes(intel_encoder);
return intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
}
static void intel_hdmi_destroy(struct drm_connector *connector)
{
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
if (intel_encoder->i2c_bus)
intel_i2c_destroy(intel_encoder->i2c_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(intel_encoder);
kfree(connector);
}
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
@ -204,8 +184,6 @@ static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.save = intel_hdmi_save,
.restore = intel_hdmi_restore,
.detect = intel_hdmi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_hdmi_destroy,
@ -214,12 +192,17 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
.get_modes = intel_hdmi_get_modes,
.mode_valid = intel_hdmi_mode_valid,
.best_encoder = intel_best_encoder,
.best_encoder = intel_attached_encoder,
};
static void intel_hdmi_enc_destroy(struct drm_encoder *encoder)
{
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
if (intel_encoder->i2c_bus)
intel_i2c_destroy(intel_encoder->i2c_bus);
drm_encoder_cleanup(encoder);
kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
@ -231,21 +214,30 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
struct intel_hdmi_priv *hdmi_priv;
intel_encoder = kcalloc(sizeof(struct intel_encoder) +
sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
if (!intel_encoder)
return;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_encoder);
return;
}
hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1);
connector = &intel_encoder->base;
connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
intel_encoder->type = INTEL_OUTPUT_HDMI;
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
@ -285,7 +277,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs);
drm_mode_connector_attach_encoder(&intel_encoder->base,
drm_mode_connector_attach_encoder(&intel_connector->base,
&intel_encoder->enc);
drm_sysfs_connector_add(connector);
@ -303,6 +295,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
err_connector:
drm_connector_cleanup(connector);
kfree(intel_encoder);
kfree(intel_connector);
return;
}

View File

@ -139,75 +139,6 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
/* XXX: We never power down the LVDS pairs. */
}
static void intel_lvds_save(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
u32 pwm_ctl_reg;
if (HAS_PCH_SPLIT(dev)) {
pp_on_reg = PCH_PP_ON_DELAYS;
pp_off_reg = PCH_PP_OFF_DELAYS;
pp_ctl_reg = PCH_PP_CONTROL;
pp_div_reg = PCH_PP_DIVISOR;
pwm_ctl_reg = BLC_PWM_CPU_CTL;
} else {
pp_on_reg = PP_ON_DELAYS;
pp_off_reg = PP_OFF_DELAYS;
pp_ctl_reg = PP_CONTROL;
pp_div_reg = PP_DIVISOR;
pwm_ctl_reg = BLC_PWM_CTL;
}
dev_priv->savePP_ON = I915_READ(pp_on_reg);
dev_priv->savePP_OFF = I915_READ(pp_off_reg);
dev_priv->savePP_CONTROL = I915_READ(pp_ctl_reg);
dev_priv->savePP_DIVISOR = I915_READ(pp_div_reg);
dev_priv->saveBLC_PWM_CTL = I915_READ(pwm_ctl_reg);
dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
BACKLIGHT_DUTY_CYCLE_MASK);
/*
* If the light is off at server startup, just make it full brightness
*/
if (dev_priv->backlight_duty_cycle == 0)
dev_priv->backlight_duty_cycle =
intel_lvds_get_max_backlight(dev);
}
static void intel_lvds_restore(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
u32 pwm_ctl_reg;
if (HAS_PCH_SPLIT(dev)) {
pp_on_reg = PCH_PP_ON_DELAYS;
pp_off_reg = PCH_PP_OFF_DELAYS;
pp_ctl_reg = PCH_PP_CONTROL;
pp_div_reg = PCH_PP_DIVISOR;
pwm_ctl_reg = BLC_PWM_CPU_CTL;
} else {
pp_on_reg = PP_ON_DELAYS;
pp_off_reg = PP_OFF_DELAYS;
pp_ctl_reg = PP_CONTROL;
pp_div_reg = PP_DIVISOR;
pwm_ctl_reg = BLC_PWM_CTL;
}
I915_WRITE(pwm_ctl_reg, dev_priv->saveBLC_PWM_CTL);
I915_WRITE(pp_on_reg, dev_priv->savePP_ON);
I915_WRITE(pp_off_reg, dev_priv->savePP_OFF);
I915_WRITE(pp_div_reg, dev_priv->savePP_DIVISOR);
I915_WRITE(pp_ctl_reg, dev_priv->savePP_CONTROL);
if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
intel_lvds_set_power(dev, true);
else
intel_lvds_set_power(dev, false);
}
static int intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
@ -635,12 +566,13 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
static int intel_lvds_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
if (dev_priv->lvds_edid_good) {
ret = intel_ddc_get_modes(intel_encoder);
ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (ret)
return ret;
@ -717,11 +649,8 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
static void intel_lvds_destroy(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct drm_i915_private *dev_priv = dev->dev_private;
if (intel_encoder->ddc_bus)
intel_i2c_destroy(intel_encoder->ddc_bus);
if (dev_priv->lid_notifier.notifier_call)
acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
drm_sysfs_connector_remove(connector);
@ -734,13 +663,14 @@ static int intel_lvds_set_property(struct drm_connector *connector,
uint64_t value)
{
struct drm_device *dev = connector->dev;
struct intel_encoder *intel_encoder =
to_intel_encoder(connector);
if (property == dev->mode_config.scaling_mode_property &&
connector->encoder) {
struct drm_crtc *crtc = connector->encoder->crtc;
struct drm_encoder *encoder = connector->encoder;
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
if (value == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
return 0;
@ -774,13 +704,11 @@ static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
.get_modes = intel_lvds_get_modes,
.mode_valid = intel_lvds_mode_valid,
.best_encoder = intel_best_encoder,
.best_encoder = intel_attached_encoder,
};
static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.save = intel_lvds_save,
.restore = intel_lvds_restore,
.detect = intel_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_lvds_set_property,
@ -790,7 +718,12 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
static void intel_lvds_enc_destroy(struct drm_encoder *encoder)
{
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
if (intel_encoder->ddc_bus)
intel_i2c_destroy(intel_encoder->ddc_bus);
drm_encoder_cleanup(encoder);
kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
@ -979,6 +912,7 @@ void intel_lvds_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
@ -1012,19 +946,27 @@ void intel_lvds_init(struct drm_device *dev)
return;
}
connector = &intel_encoder->base;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_encoder);
return;
}
connector = &intel_connector->base;
encoder = &intel_encoder->enc;
drm_connector_init(dev, &intel_encoder->base, &intel_lvds_connector_funcs,
drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS);
drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
intel_encoder->type = INTEL_OUTPUT_LVDS;
intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
intel_encoder->crtc_mask = (1 << 1);
if (IS_I965G(dev))
intel_encoder->crtc_mask |= (1 << 0);
drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
@ -1039,7 +981,7 @@ void intel_lvds_init(struct drm_device *dev)
* the initial panel fitting mode will be FULL_SCREEN.
*/
drm_connector_attach_property(&intel_encoder->base,
drm_connector_attach_property(&intel_connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN;
@ -1067,7 +1009,7 @@ void intel_lvds_init(struct drm_device *dev)
*/
dev_priv->lvds_edid_good = true;
if (!intel_ddc_get_modes(intel_encoder))
if (!intel_ddc_get_modes(connector, intel_encoder->ddc_bus))
dev_priv->lvds_edid_good = false;
list_for_each_entry(scan, &connector->probed_modes, head) {
@ -1151,4 +1093,5 @@ failed:
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
kfree(intel_encoder);
kfree(intel_connector);
}

View File

@ -54,9 +54,9 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder)
}
};
intel_i2c_quirk_set(intel_encoder->base.dev, true);
intel_i2c_quirk_set(intel_encoder->enc.dev, true);
ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2);
intel_i2c_quirk_set(intel_encoder->base.dev, false);
intel_i2c_quirk_set(intel_encoder->enc.dev, false);
if (ret == 2)
return true;
@ -66,22 +66,23 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder)
/**
* intel_ddc_get_modes - get modelist from monitor
* @connector: DRM connector device to use
* @adapter: i2c adapter
*
* Fetch the EDID information from @connector using the DDC bus.
*/
int intel_ddc_get_modes(struct intel_encoder *intel_encoder)
int intel_ddc_get_modes(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
struct edid *edid;
int ret = 0;
intel_i2c_quirk_set(intel_encoder->base.dev, true);
edid = drm_get_edid(&intel_encoder->base, intel_encoder->ddc_bus);
intel_i2c_quirk_set(intel_encoder->base.dev, false);
intel_i2c_quirk_set(connector->dev, true);
edid = drm_get_edid(connector, adapter);
intel_i2c_quirk_set(connector->dev, false);
if (edid) {
drm_mode_connector_update_edid_property(&intel_encoder->base,
edid);
ret = drm_add_edid_modes(&intel_encoder->base, edid);
intel_encoder->base.display_info.raw_edid = NULL;
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
connector->display_info.raw_edid = NULL;
kfree(edid);
}

View File

@ -373,7 +373,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
/* never have the overlay hw on without showing a frame */
BUG_ON(!overlay->vid_bo);
obj = overlay->vid_bo->obj;
obj = &overlay->vid_bo->base;
i915_gem_object_unpin(obj);
drm_gem_object_unreference(obj);
@ -411,7 +411,7 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
switch (overlay->hw_wedged) {
case RELEASE_OLD_VID:
obj = overlay->old_vid_bo->obj;
obj = &overlay->old_vid_bo->base;
i915_gem_object_unpin(obj);
drm_gem_object_unreference(obj);
overlay->old_vid_bo = NULL;
@ -467,7 +467,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (ret != 0)
return ret;
obj = overlay->old_vid_bo->obj;
obj = &overlay->old_vid_bo->base;
i915_gem_object_unpin(obj);
drm_gem_object_unreference(obj);
overlay->old_vid_bo = NULL;
@ -1341,7 +1341,7 @@ void intel_setup_overlay(struct drm_device *dev)
return;
overlay->dev = dev;
reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE);
reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
if (!reg_bo)
goto out_free;
overlay->reg_bo = to_intel_bo(reg_bo);

File diff suppressed because it is too large Load Diff

View File

@ -916,143 +916,6 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode)
}
}
static void
intel_tv_save(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
int i;
tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1);
tv_priv->save_TV_H_CTL_2 = I915_READ(TV_H_CTL_2);
tv_priv->save_TV_H_CTL_3 = I915_READ(TV_H_CTL_3);
tv_priv->save_TV_V_CTL_1 = I915_READ(TV_V_CTL_1);
tv_priv->save_TV_V_CTL_2 = I915_READ(TV_V_CTL_2);
tv_priv->save_TV_V_CTL_3 = I915_READ(TV_V_CTL_3);
tv_priv->save_TV_V_CTL_4 = I915_READ(TV_V_CTL_4);
tv_priv->save_TV_V_CTL_5 = I915_READ(TV_V_CTL_5);
tv_priv->save_TV_V_CTL_6 = I915_READ(TV_V_CTL_6);
tv_priv->save_TV_V_CTL_7 = I915_READ(TV_V_CTL_7);
tv_priv->save_TV_SC_CTL_1 = I915_READ(TV_SC_CTL_1);
tv_priv->save_TV_SC_CTL_2 = I915_READ(TV_SC_CTL_2);
tv_priv->save_TV_SC_CTL_3 = I915_READ(TV_SC_CTL_3);
tv_priv->save_TV_CSC_Y = I915_READ(TV_CSC_Y);
tv_priv->save_TV_CSC_Y2 = I915_READ(TV_CSC_Y2);
tv_priv->save_TV_CSC_U = I915_READ(TV_CSC_U);
tv_priv->save_TV_CSC_U2 = I915_READ(TV_CSC_U2);
tv_priv->save_TV_CSC_V = I915_READ(TV_CSC_V);
tv_priv->save_TV_CSC_V2 = I915_READ(TV_CSC_V2);
tv_priv->save_TV_CLR_KNOBS = I915_READ(TV_CLR_KNOBS);
tv_priv->save_TV_CLR_LEVEL = I915_READ(TV_CLR_LEVEL);
tv_priv->save_TV_WIN_POS = I915_READ(TV_WIN_POS);
tv_priv->save_TV_WIN_SIZE = I915_READ(TV_WIN_SIZE);
tv_priv->save_TV_FILTER_CTL_1 = I915_READ(TV_FILTER_CTL_1);
tv_priv->save_TV_FILTER_CTL_2 = I915_READ(TV_FILTER_CTL_2);
tv_priv->save_TV_FILTER_CTL_3 = I915_READ(TV_FILTER_CTL_3);
for (i = 0; i < 60; i++)
tv_priv->save_TV_H_LUMA[i] = I915_READ(TV_H_LUMA_0 + (i <<2));
for (i = 0; i < 60; i++)
tv_priv->save_TV_H_CHROMA[i] = I915_READ(TV_H_CHROMA_0 + (i <<2));
for (i = 0; i < 43; i++)
tv_priv->save_TV_V_LUMA[i] = I915_READ(TV_V_LUMA_0 + (i <<2));
for (i = 0; i < 43; i++)
tv_priv->save_TV_V_CHROMA[i] = I915_READ(TV_V_CHROMA_0 + (i <<2));
tv_priv->save_TV_DAC = I915_READ(TV_DAC);
tv_priv->save_TV_CTL = I915_READ(TV_CTL);
}
static void
intel_tv_restore(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
struct drm_crtc *crtc = connector->encoder->crtc;
struct intel_crtc *intel_crtc;
int i;
/* FIXME: No CRTC? */
if (!crtc)
return;
intel_crtc = to_intel_crtc(crtc);
I915_WRITE(TV_H_CTL_1, tv_priv->save_TV_H_CTL_1);
I915_WRITE(TV_H_CTL_2, tv_priv->save_TV_H_CTL_2);
I915_WRITE(TV_H_CTL_3, tv_priv->save_TV_H_CTL_3);
I915_WRITE(TV_V_CTL_1, tv_priv->save_TV_V_CTL_1);
I915_WRITE(TV_V_CTL_2, tv_priv->save_TV_V_CTL_2);
I915_WRITE(TV_V_CTL_3, tv_priv->save_TV_V_CTL_3);
I915_WRITE(TV_V_CTL_4, tv_priv->save_TV_V_CTL_4);
I915_WRITE(TV_V_CTL_5, tv_priv->save_TV_V_CTL_5);
I915_WRITE(TV_V_CTL_6, tv_priv->save_TV_V_CTL_6);
I915_WRITE(TV_V_CTL_7, tv_priv->save_TV_V_CTL_7);
I915_WRITE(TV_SC_CTL_1, tv_priv->save_TV_SC_CTL_1);
I915_WRITE(TV_SC_CTL_2, tv_priv->save_TV_SC_CTL_2);
I915_WRITE(TV_SC_CTL_3, tv_priv->save_TV_SC_CTL_3);
I915_WRITE(TV_CSC_Y, tv_priv->save_TV_CSC_Y);
I915_WRITE(TV_CSC_Y2, tv_priv->save_TV_CSC_Y2);
I915_WRITE(TV_CSC_U, tv_priv->save_TV_CSC_U);
I915_WRITE(TV_CSC_U2, tv_priv->save_TV_CSC_U2);
I915_WRITE(TV_CSC_V, tv_priv->save_TV_CSC_V);
I915_WRITE(TV_CSC_V2, tv_priv->save_TV_CSC_V2);
I915_WRITE(TV_CLR_KNOBS, tv_priv->save_TV_CLR_KNOBS);
I915_WRITE(TV_CLR_LEVEL, tv_priv->save_TV_CLR_LEVEL);
{
int pipeconf_reg = (intel_crtc->pipe == 0) ?
PIPEACONF : PIPEBCONF;
int dspcntr_reg = (intel_crtc->plane == 0) ?
DSPACNTR : DSPBCNTR;
int pipeconf = I915_READ(pipeconf_reg);
int dspcntr = I915_READ(dspcntr_reg);
int dspbase_reg = (intel_crtc->plane == 0) ?
DSPAADDR : DSPBADDR;
/* Pipe must be off here */
I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
if (!IS_I9XX(dev)) {
/* Wait for vblank for the disable to take effect */
intel_wait_for_vblank(dev);
}
I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
/* Wait for vblank for the disable to take effect. */
intel_wait_for_vblank(dev);
/* Filter ctl must be set before TV_WIN_SIZE */
I915_WRITE(TV_FILTER_CTL_1, tv_priv->save_TV_FILTER_CTL_1);
I915_WRITE(TV_FILTER_CTL_2, tv_priv->save_TV_FILTER_CTL_2);
I915_WRITE(TV_FILTER_CTL_3, tv_priv->save_TV_FILTER_CTL_3);
I915_WRITE(TV_WIN_POS, tv_priv->save_TV_WIN_POS);
I915_WRITE(TV_WIN_SIZE, tv_priv->save_TV_WIN_SIZE);
I915_WRITE(pipeconf_reg, pipeconf);
I915_WRITE(dspcntr_reg, dspcntr);
/* Flush the plane changes */
I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
}
for (i = 0; i < 60; i++)
I915_WRITE(TV_H_LUMA_0 + (i <<2), tv_priv->save_TV_H_LUMA[i]);
for (i = 0; i < 60; i++)
I915_WRITE(TV_H_CHROMA_0 + (i <<2), tv_priv->save_TV_H_CHROMA[i]);
for (i = 0; i < 43; i++)
I915_WRITE(TV_V_LUMA_0 + (i <<2), tv_priv->save_TV_V_LUMA[i]);
for (i = 0; i < 43; i++)
I915_WRITE(TV_V_CHROMA_0 + (i <<2), tv_priv->save_TV_V_CHROMA[i]);
I915_WRITE(TV_DAC, tv_priv->save_TV_DAC);
I915_WRITE(TV_CTL, tv_priv->save_TV_CTL);
}
static const struct tv_mode *
intel_tv_mode_lookup (char *tv_format)
{
@ -1078,7 +941,8 @@ intel_tv_mode_find (struct intel_encoder *intel_encoder)
static enum drm_mode_status
intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
{
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
/* Ensure TV refresh is close to desired refresh */
@ -1441,7 +1305,8 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder
*/
static void intel_tv_find_better_format(struct drm_connector *connector)
{
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
int i;
@ -1475,9 +1340,9 @@ intel_tv_detect(struct drm_connector *connector)
{
struct drm_crtc *crtc;
struct drm_display_mode mode;
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
struct drm_encoder *encoder = &intel_encoder->enc;
int dpms_mode;
int type = tv_priv->type;
@ -1487,10 +1352,12 @@ intel_tv_detect(struct drm_connector *connector)
if (encoder->crtc && encoder->crtc->enabled) {
type = intel_tv_detect_type(encoder->crtc, intel_encoder);
} else {
crtc = intel_get_load_detect_pipe(intel_encoder, &mode, &dpms_mode);
crtc = intel_get_load_detect_pipe(intel_encoder, connector,
&mode, &dpms_mode);
if (crtc) {
type = intel_tv_detect_type(crtc, intel_encoder);
intel_release_load_detect_pipe(intel_encoder, dpms_mode);
intel_release_load_detect_pipe(intel_encoder, connector,
dpms_mode);
} else
type = -1;
}
@ -1525,7 +1392,8 @@ static void
intel_tv_chose_preferred_modes(struct drm_connector *connector,
struct drm_display_mode *mode_ptr)
{
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
@ -1550,7 +1418,8 @@ static int
intel_tv_get_modes(struct drm_connector *connector)
{
struct drm_display_mode *mode_ptr;
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
int j, count = 0;
u64 tmp;
@ -1604,11 +1473,9 @@ intel_tv_get_modes(struct drm_connector *connector)
static void
intel_tv_destroy (struct drm_connector *connector)
{
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(intel_encoder);
kfree(connector);
}
@ -1617,9 +1484,9 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
uint64_t val)
{
struct drm_device *dev = connector->dev;
struct intel_encoder *intel_encoder = to_intel_encoder(connector);
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
struct drm_encoder *encoder = &intel_encoder->enc;
struct drm_crtc *crtc = encoder->crtc;
int ret = 0;
bool changed = false;
@ -1676,8 +1543,6 @@ static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
static const struct drm_connector_funcs intel_tv_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.save = intel_tv_save,
.restore = intel_tv_restore,
.detect = intel_tv_detect,
.destroy = intel_tv_destroy,
.set_property = intel_tv_set_property,
@ -1687,12 +1552,15 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
.mode_valid = intel_tv_mode_valid,
.get_modes = intel_tv_get_modes,
.best_encoder = intel_best_encoder,
.best_encoder = intel_attached_encoder,
};
static void intel_tv_enc_destroy(struct drm_encoder *encoder)
{
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
drm_encoder_cleanup(encoder);
kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_tv_enc_funcs = {
@ -1741,6 +1609,7 @@ intel_tv_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
struct intel_tv_priv *tv_priv;
u32 tv_dac_on, tv_dac_off, save_tv_dac;
char **tv_format_names;
@ -1786,7 +1655,13 @@ intel_tv_init(struct drm_device *dev)
return;
}
connector = &intel_encoder->base;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_encoder);
return;
}
connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_tv_connector_funcs,
DRM_MODE_CONNECTOR_SVIDEO);
@ -1794,7 +1669,7 @@ intel_tv_init(struct drm_device *dev)
drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs,
DRM_MODE_ENCODER_TVDAC);
drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
tv_priv = (struct intel_tv_priv *)(intel_encoder + 1);
intel_encoder->type = INTEL_OUTPUT_TVOUT;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);

View File

@ -22,7 +22,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nv50_cursor.o nv50_display.o nv50_fbcon.o \
nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
nv17_gpio.o nv50_gpio.o
nv17_gpio.o nv50_gpio.o \
nv50_calc.o
nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o

View File

@ -26,6 +26,7 @@
#define NV_DEBUG_NOTRACE
#include "nouveau_drv.h"
#include "nouveau_hw.h"
#include "nouveau_encoder.h"
/* these defines are made up */
#define NV_CIO_CRE_44_HEADA 0x0
@ -256,6 +257,11 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
struct init_tbl_entry {
char *name;
uint8_t id;
/* Return:
* > 0: success, length of opcode
* 0: success, but abort further parsing of table (INIT_DONE etc)
* < 0: failure, table parsing will be aborted
*/
int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
};
@ -709,6 +715,83 @@ static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
return dcb_entry;
}
static int
read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
{
uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
int recordoffset = 0, rdofs = 1, wrofs = 0;
uint8_t port_type = 0;
if (!i2ctable)
return -EINVAL;
if (dcb_version >= 0x30) {
if (i2ctable[0] != dcb_version) /* necessary? */
NV_WARN(dev,
"DCB I2C table version mismatch (%02X vs %02X)\n",
i2ctable[0], dcb_version);
dcb_i2c_ver = i2ctable[0];
headerlen = i2ctable[1];
if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
i2c_entries = i2ctable[2];
else
NV_WARN(dev,
"DCB I2C table has more entries than indexable "
"(%d entries, max %d)\n", i2ctable[2],
DCB_MAX_NUM_I2C_ENTRIES);
entry_len = i2ctable[3];
/* [4] is i2c_default_indices, read in parse_dcb_table() */
}
/*
* It's your own fault if you call this function on a DCB 1.1 BIOS --
* the test below is for DCB 1.2
*/
if (dcb_version < 0x14) {
recordoffset = 2;
rdofs = 0;
wrofs = 1;
}
if (index == 0xf)
return 0;
if (index >= i2c_entries) {
NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
index, i2ctable[2]);
return -ENOENT;
}
if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
NV_ERROR(dev, "DCB I2C entry invalid\n");
return -EINVAL;
}
if (dcb_i2c_ver >= 0x30) {
port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
/*
* Fixup for chips using same address offset for read and
* write.
*/
if (port_type == 4) /* seen on C51 */
rdofs = wrofs = 1;
if (port_type >= 5) /* G80+ */
rdofs = wrofs = 0;
}
if (dcb_i2c_ver >= 0x40) {
if (port_type != 5 && port_type != 6)
NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
i2c->entry = ROM32(i2ctable[headerlen + recordoffset + entry_len * index]);
}
i2c->port_type = port_type;
i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
return 0;
}
static struct nouveau_i2c_chan *
init_i2c_device_find(struct drm_device *dev, int i2c_index)
{
@ -727,6 +810,20 @@ init_i2c_device_find(struct drm_device *dev, int i2c_index)
}
if (i2c_index == 0x80) /* g80+ */
i2c_index = dcb->i2c_default_indices & 0xf;
else
if (i2c_index == 0x81)
i2c_index = (dcb->i2c_default_indices & 0xf0) >> 4;
if (i2c_index > DCB_MAX_NUM_I2C_ENTRIES) {
NV_ERROR(dev, "invalid i2c_index 0x%x\n", i2c_index);
return NULL;
}
/* Make sure i2c table entry has been parsed, it may not
* have been if this is a bus not referenced by a DCB encoder
*/
read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
i2c_index, &dcb->i2c[i2c_index]);
return nouveau_i2c_find(dev, i2c_index);
}
@ -818,7 +915,7 @@ init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
offset, config, count);
return 0;
return -EINVAL;
}
configval = ROM32(bios->data[offset + 11 + config * 4]);
@ -920,7 +1017,7 @@ init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
offset, config, count);
return 0;
return -EINVAL;
}
freq = ROM16(bios->data[offset + 12 + config * 2]);
@ -1066,6 +1163,126 @@ init_io_flag_condition(struct nvbios *bios, uint16_t offset,
return 2;
}
static int
init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
* INIT_DP_CONDITION opcode: 0x3A ('')
*
* offset (8 bit): opcode
* offset + 1 (8 bit): "sub" opcode
* offset + 2 (8 bit): unknown
*
*/
struct bit_displayport_encoder_table *dpe = NULL;
struct dcb_entry *dcb = bios->display.output;
struct drm_device *dev = bios->dev;
uint8_t cond = bios->data[offset + 1];
int dummy;
BIOSLOG(bios, "0x%04X: subop 0x%02X\n", offset, cond);
if (!iexec->execute)
return 3;
dpe = nouveau_bios_dp_table(dev, dcb, &dummy);
if (!dpe) {
NV_ERROR(dev, "0x%04X: INIT_3A: no encoder table!!\n", offset);
return -EINVAL;
}
switch (cond) {
case 0:
{
struct dcb_connector_table_entry *ent =
&bios->dcb.connector.entry[dcb->connector];
if (ent->type != DCB_CONNECTOR_eDP)
iexec->execute = false;
}
break;
case 1:
case 2:
if (!(dpe->unknown & cond))
iexec->execute = false;
break;
case 5:
{
struct nouveau_i2c_chan *auxch;
int ret;
auxch = nouveau_i2c_find(dev, bios->display.output->i2c_index);
if (!auxch)
return -ENODEV;
ret = nouveau_dp_auxch(auxch, 9, 0xd, &cond, 1);
if (ret)
return ret;
if (cond & 1)
iexec->execute = false;
}
break;
default:
NV_WARN(dev, "0x%04X: unknown INIT_3A op: %d\n", offset, cond);
break;
}
if (iexec->execute)
BIOSLOG(bios, "0x%04X: continuing to execute\n", offset);
else
BIOSLOG(bios, "0x%04X: skipping following commands\n", offset);
return 3;
}
static int
init_op_3b(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
* INIT_3B opcode: 0x3B ('')
*
* offset (8 bit): opcode
* offset + 1 (8 bit): crtc index
*
*/
uint8_t or = ffs(bios->display.output->or) - 1;
uint8_t index = bios->data[offset + 1];
uint8_t data;
if (!iexec->execute)
return 2;
data = bios_idxprt_rd(bios, 0x3d4, index);
bios_idxprt_wr(bios, 0x3d4, index, data & ~(1 << or));
return 2;
}
static int
init_op_3c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
* INIT_3C opcode: 0x3C ('')
*
* offset (8 bit): opcode
* offset + 1 (8 bit): crtc index
*
*/
uint8_t or = ffs(bios->display.output->or) - 1;
uint8_t index = bios->data[offset + 1];
uint8_t data;
if (!iexec->execute)
return 2;
data = bios_idxprt_rd(bios, 0x3d4, index);
bios_idxprt_wr(bios, 0x3d4, index, data | (1 << or));
return 2;
}
static int
init_idx_addr_latched(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
@ -1170,7 +1387,7 @@ init_io_restrict_pll2(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
offset, config, count);
return 0;
return -EINVAL;
}
freq = ROM32(bios->data[offset + 11 + config * 4]);
@ -1231,12 +1448,11 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*/
uint8_t i2c_index = bios->data[offset + 1];
uint8_t i2c_address = bios->data[offset + 2];
uint8_t i2c_address = bios->data[offset + 2] >> 1;
uint8_t count = bios->data[offset + 3];
int len = 4 + count * 3;
struct nouveau_i2c_chan *chan;
struct i2c_msg msg;
int i;
int len = 4 + count * 3;
int ret, i;
if (!iexec->execute)
return len;
@ -1247,35 +1463,34 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
chan = init_i2c_device_find(bios->dev, i2c_index);
if (!chan)
return 0;
return -ENODEV;
for (i = 0; i < count; i++) {
uint8_t i2c_reg = bios->data[offset + 4 + i * 3];
uint8_t reg = bios->data[offset + 4 + i * 3];
uint8_t mask = bios->data[offset + 5 + i * 3];
uint8_t data = bios->data[offset + 6 + i * 3];
uint8_t value;
union i2c_smbus_data val;
msg.addr = i2c_address;
msg.flags = I2C_M_RD;
msg.len = 1;
msg.buf = &value;
if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
return 0;
ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
I2C_SMBUS_READ, reg,
I2C_SMBUS_BYTE_DATA, &val);
if (ret < 0)
return ret;
BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, "
"Mask: 0x%02X, Data: 0x%02X\n",
offset, i2c_reg, value, mask, data);
offset, reg, val.byte, mask, data);
value = (value & mask) | data;
if (!bios->execute)
continue;
if (bios->execute) {
msg.addr = i2c_address;
msg.flags = 0;
msg.len = 1;
msg.buf = &value;
if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
return 0;
}
val.byte &= mask;
val.byte |= data;
ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
I2C_SMBUS_WRITE, reg,
I2C_SMBUS_BYTE_DATA, &val);
if (ret < 0)
return ret;
}
return len;
@ -1301,12 +1516,11 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*/
uint8_t i2c_index = bios->data[offset + 1];
uint8_t i2c_address = bios->data[offset + 2];
uint8_t i2c_address = bios->data[offset + 2] >> 1;
uint8_t count = bios->data[offset + 3];
int len = 4 + count * 2;
struct nouveau_i2c_chan *chan;
struct i2c_msg msg;
int i;
int len = 4 + count * 2;
int ret, i;
if (!iexec->execute)
return len;
@ -1317,23 +1531,25 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
chan = init_i2c_device_find(bios->dev, i2c_index);
if (!chan)
return 0;
return -ENODEV;
for (i = 0; i < count; i++) {
uint8_t i2c_reg = bios->data[offset + 4 + i * 2];
uint8_t data = bios->data[offset + 5 + i * 2];
uint8_t reg = bios->data[offset + 4 + i * 2];
union i2c_smbus_data val;
val.byte = bios->data[offset + 5 + i * 2];
BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Data: 0x%02X\n",
offset, i2c_reg, data);
offset, reg, val.byte);
if (bios->execute) {
msg.addr = i2c_address;
msg.flags = 0;
msg.len = 1;
msg.buf = &data;
if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
return 0;
}
if (!bios->execute)
continue;
ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
I2C_SMBUS_WRITE, reg,
I2C_SMBUS_BYTE_DATA, &val);
if (ret < 0)
return ret;
}
return len;
@ -1357,7 +1573,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*/
uint8_t i2c_index = bios->data[offset + 1];
uint8_t i2c_address = bios->data[offset + 2];
uint8_t i2c_address = bios->data[offset + 2] >> 1;
uint8_t count = bios->data[offset + 3];
int len = 4 + count;
struct nouveau_i2c_chan *chan;
@ -1374,7 +1590,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
chan = init_i2c_device_find(bios->dev, i2c_index);
if (!chan)
return 0;
return -ENODEV;
for (i = 0; i < count; i++) {
data[i] = bios->data[offset + 4 + i];
@ -1388,7 +1604,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
msg.len = count;
msg.buf = data;
if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
return 0;
return -EIO;
}
return len;
@ -1427,7 +1643,7 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
reg = get_tmds_index_reg(bios->dev, mlv);
if (!reg)
return 0;
return -EINVAL;
bios_wr32(bios, reg,
tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE);
@ -1471,7 +1687,7 @@ init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
reg = get_tmds_index_reg(bios->dev, mlv);
if (!reg)
return 0;
return -EINVAL;
for (i = 0; i < count; i++) {
uint8_t tmdsaddr = bios->data[offset + 3 + i * 2];
@ -1946,7 +2162,7 @@ init_configure_mem(struct nvbios *bios, uint16_t offset,
uint32_t reg, data;
if (bios->major_version > 2)
return 0;
return -ENODEV;
bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd(
bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20);
@ -2001,7 +2217,7 @@ init_configure_clk(struct nvbios *bios, uint16_t offset,
int clock;
if (bios->major_version > 2)
return 0;
return -ENODEV;
clock = ROM16(bios->data[meminitoffs + 4]) * 10;
setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock);
@ -2034,7 +2250,7 @@ init_configure_preinit(struct nvbios *bios, uint16_t offset,
uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6));
if (bios->major_version > 2)
return 0;
return -ENODEV;
bios_idxprt_wr(bios, NV_CIO_CRX__COLOR,
NV_CIO_CRE_SCRATCH4__INDEX, cr3c);
@ -2656,7 +2872,7 @@ init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Zero block length - has the M table "
"been parsed?\n", offset);
return 0;
return -EINVAL;
}
strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf;
@ -2840,14 +3056,14 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
if (!bios->display.output) {
NV_ERROR(dev, "INIT_AUXCH: no active output\n");
return 0;
return -EINVAL;
}
auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
if (!auxch) {
NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n",
bios->display.output->i2c_index);
return 0;
return -ENODEV;
}
if (!iexec->execute)
@ -2860,7 +3076,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1);
if (ret) {
NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret);
return 0;
return ret;
}
data &= bios->data[offset + 0];
@ -2869,7 +3085,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1);
if (ret) {
NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret);
return 0;
return ret;
}
}
@ -2899,14 +3115,14 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
if (!bios->display.output) {
NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n");
return 0;
return -EINVAL;
}
auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
if (!auxch) {
NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n",
bios->display.output->i2c_index);
return 0;
return -ENODEV;
}
if (!iexec->execute)
@ -2917,7 +3133,7 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1);
if (ret) {
NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret);
return 0;
return ret;
}
}
@ -2934,6 +3150,9 @@ static struct init_tbl_entry itbl_entry[] = {
{ "INIT_COPY" , 0x37, init_copy },
{ "INIT_NOT" , 0x38, init_not },
{ "INIT_IO_FLAG_CONDITION" , 0x39, init_io_flag_condition },
{ "INIT_DP_CONDITION" , 0x3A, init_dp_condition },
{ "INIT_OP_3B" , 0x3B, init_op_3b },
{ "INIT_OP_3C" , 0x3C, init_op_3c },
{ "INIT_INDEX_ADDRESS_LATCHED" , 0x49, init_idx_addr_latched },
{ "INIT_IO_RESTRICT_PLL2" , 0x4A, init_io_restrict_pll2 },
{ "INIT_PLL2" , 0x4B, init_pll2 },
@ -3001,7 +3220,7 @@ parse_init_table(struct nvbios *bios, unsigned int offset,
* is changed back to EXECUTE.
*/
int count = 0, i, res;
int count = 0, i, ret;
uint8_t id;
/*
@ -3016,26 +3235,33 @@ parse_init_table(struct nvbios *bios, unsigned int offset,
for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != id); i++)
;
if (itbl_entry[i].name) {
BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n",
offset, itbl_entry[i].id, itbl_entry[i].name);
/* execute eventual command handler */
res = (*itbl_entry[i].handler)(bios, offset, iexec);
if (!res)
break;
/*
* Add the offset of the current command including all data
* of that command. The offset will then be pointing on the
* next op code.
*/
offset += res;
} else {
if (!itbl_entry[i].name) {
NV_ERROR(bios->dev,
"0x%04X: Init table command not found: "
"0x%02X\n", offset, id);
return -ENOENT;
}
BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n", offset,
itbl_entry[i].id, itbl_entry[i].name);
/* execute eventual command handler */
ret = (*itbl_entry[i].handler)(bios, offset, iexec);
if (ret < 0) {
NV_ERROR(bios->dev, "0x%04X: Failed parsing init "
"table opcode: %s %d\n", offset,
itbl_entry[i].name, ret);
}
if (ret <= 0)
break;
/*
* Add the offset of the current command including all data
* of that command. The offset will then be pointing on the
* next op code.
*/
offset += ret;
}
if (offset >= bios->length)
@ -4285,31 +4511,32 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
break;
}
#if 0 /* for easy debugging */
ErrorF("pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq);
ErrorF("pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq);
ErrorF("pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq);
ErrorF("pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq);
ErrorF("pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq);
ErrorF("pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq);
ErrorF("pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq);
ErrorF("pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq);
ErrorF("pll.vco1.min_n: %d\n", pll_lim->vco1.min_n);
ErrorF("pll.vco1.max_n: %d\n", pll_lim->vco1.max_n);
ErrorF("pll.vco1.min_m: %d\n", pll_lim->vco1.min_m);
ErrorF("pll.vco1.max_m: %d\n", pll_lim->vco1.max_m);
ErrorF("pll.vco2.min_n: %d\n", pll_lim->vco2.min_n);
ErrorF("pll.vco2.max_n: %d\n", pll_lim->vco2.max_n);
ErrorF("pll.vco2.min_m: %d\n", pll_lim->vco2.min_m);
ErrorF("pll.vco2.max_m: %d\n", pll_lim->vco2.max_m);
ErrorF("pll.max_log2p: %d\n", pll_lim->max_log2p);
ErrorF("pll.log2p_bias: %d\n", pll_lim->log2p_bias);
ErrorF("pll.refclk: %d\n", pll_lim->refclk);
#endif
NV_DEBUG(dev, "pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq);
NV_DEBUG(dev, "pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq);
NV_DEBUG(dev, "pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq);
NV_DEBUG(dev, "pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq);
NV_DEBUG(dev, "pll.vco1.min_n: %d\n", pll_lim->vco1.min_n);
NV_DEBUG(dev, "pll.vco1.max_n: %d\n", pll_lim->vco1.max_n);
NV_DEBUG(dev, "pll.vco1.min_m: %d\n", pll_lim->vco1.min_m);
NV_DEBUG(dev, "pll.vco1.max_m: %d\n", pll_lim->vco1.max_m);
if (pll_lim->vco2.maxfreq) {
NV_DEBUG(dev, "pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq);
NV_DEBUG(dev, "pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq);
NV_DEBUG(dev, "pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq);
NV_DEBUG(dev, "pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq);
NV_DEBUG(dev, "pll.vco2.min_n: %d\n", pll_lim->vco2.min_n);
NV_DEBUG(dev, "pll.vco2.max_n: %d\n", pll_lim->vco2.max_n);
NV_DEBUG(dev, "pll.vco2.min_m: %d\n", pll_lim->vco2.min_m);
NV_DEBUG(dev, "pll.vco2.max_m: %d\n", pll_lim->vco2.max_m);
}
if (!pll_lim->max_p) {
NV_DEBUG(dev, "pll.max_log2p: %d\n", pll_lim->max_log2p);
NV_DEBUG(dev, "pll.log2p_bias: %d\n", pll_lim->log2p_bias);
} else {
NV_DEBUG(dev, "pll.min_p: %d\n", pll_lim->min_p);
NV_DEBUG(dev, "pll.max_p: %d\n", pll_lim->max_p);
}
NV_DEBUG(dev, "pll.refclk: %d\n", pll_lim->refclk);
return 0;
}
@ -4953,79 +5180,6 @@ static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
return 0;
}
static int
read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
{
uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
int recordoffset = 0, rdofs = 1, wrofs = 0;
uint8_t port_type = 0;
if (!i2ctable)
return -EINVAL;
if (dcb_version >= 0x30) {
if (i2ctable[0] != dcb_version) /* necessary? */
NV_WARN(dev,
"DCB I2C table version mismatch (%02X vs %02X)\n",
i2ctable[0], dcb_version);
dcb_i2c_ver = i2ctable[0];
headerlen = i2ctable[1];
if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
i2c_entries = i2ctable[2];
else
NV_WARN(dev,
"DCB I2C table has more entries than indexable "
"(%d entries, max %d)\n", i2ctable[2],
DCB_MAX_NUM_I2C_ENTRIES);
entry_len = i2ctable[3];
/* [4] is i2c_default_indices, read in parse_dcb_table() */
}
/*
* It's your own fault if you call this function on a DCB 1.1 BIOS --
* the test below is for DCB 1.2
*/
if (dcb_version < 0x14) {
recordoffset = 2;
rdofs = 0;
wrofs = 1;
}
if (index == 0xf)
return 0;
if (index >= i2c_entries) {
NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
index, i2ctable[2]);
return -ENOENT;
}
if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
NV_ERROR(dev, "DCB I2C entry invalid\n");
return -EINVAL;
}
if (dcb_i2c_ver >= 0x30) {
port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
/*
* Fixup for chips using same address offset for read and
* write.
*/
if (port_type == 4) /* seen on C51 */
rdofs = wrofs = 1;
if (port_type >= 5) /* G80+ */
rdofs = wrofs = 0;
}
if (dcb_i2c_ver >= 0x40 && port_type != 5 && port_type != 6)
NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
i2c->port_type = port_type;
i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
return 0;
}
static struct dcb_gpio_entry *
new_gpio_entry(struct nvbios *bios)
{

View File

@ -35,6 +35,7 @@
#define DCB_LOC_ON_CHIP 0
struct dcb_i2c_entry {
uint32_t entry;
uint8_t port_type;
uint8_t read, write;
struct nouveau_i2c_chan *chan;

View File

@ -160,11 +160,11 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
ttm_bo_type_device, &nvbo->placement, align, 0,
false, NULL, size, nouveau_bo_del_ttm);
nvbo->channel = NULL;
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
return ret;
}
nvbo->channel = NULL;
spin_lock(&dev_priv->ttm.bo_list_lock);
list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list);
@ -225,7 +225,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
nouveau_bo_placement_set(nvbo, memtype, 0);
ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
@ -261,7 +261,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
@ -391,25 +391,16 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break;
case TTM_PL_VRAM:
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_MAPPABLE |
TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
man->io_addr = NULL;
man->io_offset = drm_get_resource_start(dev, 1);
man->io_size = drm_get_resource_len(dev, 1);
if (man->io_size > dev_priv->vram_size)
man->io_size = dev_priv->vram_size;
man->gpu_offset = dev_priv->vm_vram_base;
break;
case TTM_PL_TT:
switch (dev_priv->gart_info.type) {
case NOUVEAU_GART_AGP:
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED;
man->default_caching = TTM_PL_FLAG_UNCACHED;
break;
@ -424,10 +415,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
dev_priv->gart_info.type);
return -EINVAL;
}
man->io_offset = dev_priv->gart_info.aper_base;
man->io_size = dev_priv->gart_info.aper_size;
man->io_addr = NULL;
man->gpu_offset = dev_priv->vm_gart_base;
break;
default:
@ -462,7 +449,8 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
static int
nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
struct nouveau_bo *nvbo, bool evict, bool no_wait,
struct nouveau_bo *nvbo, bool evict,
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct nouveau_fence *fence = NULL;
@ -473,7 +461,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
return ret;
ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
evict, no_wait, new_mem);
evict, no_wait_reserve, no_wait_gpu, new_mem);
if (nvbo->channel && nvbo->channel != chan)
ret = nouveau_fence_wait(fence, NULL, false, false);
nouveau_fence_unref((void *)&fence);
@ -497,7 +485,8 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
static int
nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
int no_wait, struct ttm_mem_reg *new_mem)
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
@ -575,12 +564,13 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
dst_offset += (PAGE_SIZE * line_count);
}
return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem);
return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
}
static int
nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait, struct ttm_mem_reg *new_mem)
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
@ -593,7 +583,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
if (ret)
return ret;
@ -601,11 +591,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem);
ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
out:
if (tmp_mem.mm_node) {
spin_lock(&bo->bdev->glob->lru_lock);
@ -618,7 +608,8 @@ out:
static int
nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait, struct ttm_mem_reg *new_mem)
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
@ -631,15 +622,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
if (ret)
return ret;
ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem);
ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
if (ret)
goto out;
@ -706,7 +697,8 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
static int
nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait, struct ttm_mem_reg *new_mem)
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
@ -721,7 +713,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
/* Software copy if the card isn't up and running yet. */
if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
!dev_priv->channel) {
ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
goto out;
}
@ -735,17 +727,17 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
/* Hardware assisted copy. */
if (new_mem->mem_type == TTM_PL_SYSTEM)
ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem);
ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
else if (old_mem->mem_type == TTM_PL_SYSTEM)
ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem);
ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
else
ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
if (!ret)
goto out;
/* Fallback to software copy. */
ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
out:
if (ret)
@ -762,6 +754,55 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
return 0;
}
static int
nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
struct drm_device *dev = dev_priv->dev;
mem->bus.addr = NULL;
mem->bus.offset = 0;
mem->bus.size = mem->num_pages << PAGE_SHIFT;
mem->bus.base = 0;
mem->bus.is_iomem = false;
if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
return -EINVAL;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
/* System memory */
return 0;
case TTM_PL_TT:
#if __OS_HAS_AGP
if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
mem->bus.base = dev_priv->gart_info.aper_base;
mem->bus.is_iomem = true;
}
#endif
break;
case TTM_PL_VRAM:
mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
mem->bus.base = drm_get_resource_start(dev, 1);
mem->bus.is_iomem = true;
break;
default:
return -EINVAL;
}
return 0;
}
static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
}
static int
nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
return 0;
}
struct ttm_bo_driver nouveau_bo_driver = {
.create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
.invalidate_caches = nouveau_bo_invalidate_caches,
@ -774,5 +815,8 @@ struct ttm_bo_driver nouveau_bo_driver = {
.sync_obj_flush = nouveau_fence_flush,
.sync_obj_unref = nouveau_fence_unref,
.sync_obj_ref = nouveau_fence_ref,
.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
.io_mem_free = &nouveau_ttm_io_mem_free,
};

View File

@ -843,6 +843,7 @@ nouveau_connector_create(struct drm_device *dev,
switch (dcb->type) {
case DCB_CONNECTOR_VGA:
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
if (dev_priv->card_type >= NV_50) {
drm_connector_attach_property(connector,
dev->mode_config.scaling_mode_property,
@ -854,6 +855,17 @@ nouveau_connector_create(struct drm_device *dev,
case DCB_CONNECTOR_TV_3:
nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
break;
case DCB_CONNECTOR_DP:
case DCB_CONNECTOR_eDP:
case DCB_CONNECTOR_HDMI_0:
case DCB_CONNECTOR_HDMI_1:
case DCB_CONNECTOR_DVI_I:
case DCB_CONNECTOR_DVI_D:
if (dev_priv->card_type >= NV_50)
connector->polled = DRM_CONNECTOR_POLL_HPD;
else
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
/* fall-through */
default:
nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;

View File

@ -33,6 +33,8 @@
#include "drmP.h"
#include "nouveau_drv.h"
#include <ttm/ttm_page_alloc.h>
static int
nouveau_debugfs_channel_info(struct seq_file *m, void *data)
{
@ -159,6 +161,7 @@ static struct drm_info_list nouveau_debugfs_list[] = {
{ "chipset", nouveau_debugfs_chipset_info, 0, NULL },
{ "memory", nouveau_debugfs_memory_info, 0, NULL },
{ "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
{ "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
};
#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)

View File

@ -34,10 +34,6 @@ static void
nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
{
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
struct drm_device *dev = drm_fb->dev;
if (drm_fb->fbdev)
nouveau_fbcon_remove(dev, drm_fb);
if (fb->nvbo)
drm_gem_object_unreference_unlocked(fb->nvbo->gem);
@ -61,27 +57,20 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
.create_handle = nouveau_user_framebuffer_create_handle,
};
struct drm_framebuffer *
nouveau_framebuffer_create(struct drm_device *dev, struct nouveau_bo *nvbo,
struct drm_mode_fb_cmd *mode_cmd)
int
nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo)
{
struct nouveau_framebuffer *fb;
int ret;
fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
if (!fb)
return NULL;
ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs);
ret = drm_framebuffer_init(dev, &nouveau_fb->base, &nouveau_framebuffer_funcs);
if (ret) {
kfree(fb);
return NULL;
return ret;
}
drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
fb->nvbo = nvbo;
return &fb->base;
drm_helper_mode_fill_fb_struct(&nouveau_fb->base, mode_cmd);
nouveau_fb->nvbo = nvbo;
return 0;
}
static struct drm_framebuffer *
@ -89,24 +78,29 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_mode_fb_cmd *mode_cmd)
{
struct drm_framebuffer *fb;
struct nouveau_framebuffer *nouveau_fb;
struct drm_gem_object *gem;
int ret;
gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
if (!gem)
return NULL;
fb = nouveau_framebuffer_create(dev, nouveau_gem_object(gem), mode_cmd);
if (!fb) {
nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
if (!nouveau_fb)
return NULL;
ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem));
if (ret) {
drm_gem_object_unreference(gem);
return NULL;
}
return fb;
return &nouveau_fb->base;
}
const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
.fb_create = nouveau_user_framebuffer_create,
.fb_changed = nouveau_fbcon_probe,
.output_poll_changed = nouveau_fbcon_output_poll_changed,
};

View File

@ -153,7 +153,6 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_channel *chan;
struct drm_crtc *crtc;
uint32_t fbdev_flags;
int ret, i;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
@ -163,8 +162,7 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
return 0;
NV_INFO(dev, "Disabling fbcon acceleration...\n");
fbdev_flags = dev_priv->fbdev_info->flags;
dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
nouveau_fbcon_save_disable_accel(dev);
NV_INFO(dev, "Unpinning framebuffer(s)...\n");
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@ -230,9 +228,9 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
}
acquire_console_sem();
fb_set_suspend(dev_priv->fbdev_info, 1);
nouveau_fbcon_set_suspend(dev, 1);
release_console_sem();
dev_priv->fbdev_info->flags = fbdev_flags;
nouveau_fbcon_restore_accel(dev);
return 0;
out_abort:
@ -250,14 +248,12 @@ nouveau_pci_resume(struct pci_dev *pdev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine = &dev_priv->engine;
struct drm_crtc *crtc;
uint32_t fbdev_flags;
int ret, i;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
fbdev_flags = dev_priv->fbdev_info->flags;
dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
nouveau_fbcon_save_disable_accel(dev);
NV_INFO(dev, "We're back, enabling device...\n");
pci_set_power_state(pdev, PCI_D0);
@ -332,13 +328,14 @@ nouveau_pci_resume(struct pci_dev *pdev)
}
acquire_console_sem();
fb_set_suspend(dev_priv->fbdev_info, 0);
nouveau_fbcon_set_suspend(dev, 0);
release_console_sem();
nouveau_fbcon_zfill(dev);
nouveau_fbcon_zfill_all(dev);
drm_helper_resume_force_mode(dev);
dev_priv->fbdev_info->flags = fbdev_flags;
nouveau_fbcon_restore_accel(dev);
return 0;
}

View File

@ -535,6 +535,7 @@ struct drm_nouveau_private {
struct fb_info *fbdev_info;
int fifo_alloc_count;
struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
struct nouveau_engine engine;
@ -621,6 +622,9 @@ struct drm_nouveau_private {
struct {
struct dentry *channel_root;
} debugfs;
struct nouveau_fbdev *nfbdev;
struct apertures_struct *apertures;
};
static inline struct drm_nouveau_private *
@ -1166,6 +1170,12 @@ int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
/* nv50_calc. */
int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
int *N1, int *M1, int *N2, int *M2, int *P);
int nv50_calc_pll2(struct drm_device *, struct pll_lims *,
int clk, int *N, int *fN, int *M, int *P);
#ifndef ioread32_native
#ifdef __BIG_ENDIAN
#define ioread16_native ioread16be

View File

@ -48,6 +48,8 @@ struct nouveau_encoder {
union {
struct {
int mc_unknown;
uint32_t unk0;
uint32_t unk1;
int dpcd_version;
int link_nr;
int link_bw;

View File

@ -40,8 +40,6 @@ nouveau_framebuffer(struct drm_framebuffer *fb)
extern const struct drm_mode_config_funcs nouveau_mode_config_funcs;
struct drm_framebuffer *
nouveau_framebuffer_create(struct drm_device *, struct nouveau_bo *,
struct drm_mode_fb_cmd *);
int nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo);
#endif /* __NOUVEAU_FB_H__ */

View File

@ -52,8 +52,8 @@
static int
nouveau_fbcon_sync(struct fb_info *info)
{
struct nouveau_fbcon_par *par = info->par;
struct drm_device *dev = par->dev;
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
int ret, i;
@ -97,7 +97,6 @@ static struct fb_ops nouveau_fbcon_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@ -111,7 +110,6 @@ static struct fb_ops nv04_fbcon_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = nv04_fbcon_fillrect,
.fb_copyarea = nv04_fbcon_copyarea,
.fb_imageblit = nv04_fbcon_imageblit,
@ -125,7 +123,6 @@ static struct fb_ops nv50_fbcon_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_setcolreg = drm_fb_helper_setcolreg,
.fb_fillrect = nv50_fbcon_fillrect,
.fb_copyarea = nv50_fbcon_copyarea,
.fb_imageblit = nv50_fbcon_imageblit,
@ -155,54 +152,10 @@ static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
*blue = nv_crtc->lut.b[regno];
}
static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
.gamma_set = nouveau_fbcon_gamma_set,
.gamma_get = nouveau_fbcon_gamma_get
};
#if defined(__i386__) || defined(__x86_64__)
static bool
nouveau_fbcon_has_vesafb_or_efifb(struct drm_device *dev)
static void
nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
{
struct pci_dev *pdev = dev->pdev;
int ramin;
if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB &&
screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
return false;
if (screen_info.lfb_base < pci_resource_start(pdev, 1))
goto not_fb;
if (screen_info.lfb_base + screen_info.lfb_size >=
pci_resource_start(pdev, 1) + pci_resource_len(pdev, 1))
goto not_fb;
return true;
not_fb:
ramin = 2;
if (pci_resource_len(pdev, ramin) == 0) {
ramin = 3;
if (pci_resource_len(pdev, ramin) == 0)
return false;
}
if (screen_info.lfb_base < pci_resource_start(pdev, ramin))
return false;
if (screen_info.lfb_base + screen_info.lfb_size >=
pci_resource_start(pdev, ramin) + pci_resource_len(pdev, ramin))
return false;
return true;
}
#endif
void
nouveau_fbcon_zfill(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct fb_info *info = dev_priv->fbdev_info;
struct fb_info *info = nfbdev->helper.fbdev;
struct fb_fillrect rect;
/* Clear the entire fbcon. The drm will program every connector
@ -218,28 +171,27 @@ nouveau_fbcon_zfill(struct drm_device *dev)
}
static int
nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
uint32_t fb_height, uint32_t surface_width,
uint32_t surface_height, uint32_t surface_depth,
uint32_t surface_bpp, struct drm_framebuffer **pfb)
nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct fb_info *info;
struct nouveau_fbcon_par *par;
struct drm_framebuffer *fb;
struct nouveau_framebuffer *nouveau_fb;
struct nouveau_bo *nvbo;
struct drm_mode_fb_cmd mode_cmd;
struct device *device = &dev->pdev->dev;
struct pci_dev *pdev = dev->pdev;
struct device *device = &pdev->dev;
int size, ret;
mode_cmd.width = surface_width;
mode_cmd.height = surface_height;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
mode_cmd.bpp = surface_bpp;
mode_cmd.bpp = sizes->surface_bpp;
mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
mode_cmd.depth = surface_depth;
mode_cmd.depth = sizes->surface_depth;
size = mode_cmd.pitch * mode_cmd.height;
size = roundup(size, PAGE_SIZE);
@ -268,31 +220,28 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
mutex_lock(&dev->struct_mutex);
fb = nouveau_framebuffer_create(dev, nvbo, &mode_cmd);
if (!fb) {
ret = -ENOMEM;
NV_ERROR(dev, "failed to allocate fb.\n");
goto out_unref;
}
list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
nouveau_fb = nouveau_framebuffer(fb);
*pfb = fb;
info = framebuffer_alloc(sizeof(struct nouveau_fbcon_par), device);
info = framebuffer_alloc(0, device);
if (!info) {
ret = -ENOMEM;
goto out_unref;
}
par = info->par;
par->helper.funcs = &nouveau_fbcon_helper_funcs;
par->helper.dev = dev;
ret = drm_fb_helper_init_crtc_count(&par->helper, 2, 4);
if (ret)
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret) {
ret = -ENOMEM;
goto out_unref;
dev_priv->fbdev_info = info;
}
info->par = nfbdev;
nouveau_framebuffer_init(dev, &nfbdev->nouveau_fb, &mode_cmd, nvbo);
nouveau_fb = &nfbdev->nouveau_fb;
fb = &nouveau_fb->base;
/* setup helper */
nfbdev->helper.fb = fb;
nfbdev->helper.fbdev = info;
strcpy(info->fix.id, "nouveaufb");
if (nouveau_nofbaccel)
@ -310,31 +259,17 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
info->screen_size = size;
drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
/* FIXME: we really shouldn't expose mmio space at all */
info->fix.mmio_start = pci_resource_start(dev->pdev, 1);
info->fix.mmio_len = pci_resource_len(dev->pdev, 1);
info->fix.mmio_start = pci_resource_start(pdev, 1);
info->fix.mmio_len = pci_resource_len(pdev, 1);
/* Set aperture base/size for vesafb takeover */
#if defined(__i386__) || defined(__x86_64__)
if (nouveau_fbcon_has_vesafb_or_efifb(dev)) {
/* Some NVIDIA VBIOS' are stupid and decide to put the
* framebuffer in the middle of the PRAMIN BAR for
* whatever reason. We need to know the exact lfb_base
* to get vesafb kicked off, and the only reliable way
* we have left is to find out lfb_base the same way
* vesafb did.
*/
info->aperture_base = screen_info.lfb_base;
info->aperture_size = screen_info.lfb_size;
if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB)
info->aperture_size *= 65536;
} else
#endif
{
info->aperture_base = info->fix.mmio_start;
info->aperture_size = info->fix.mmio_len;
info->apertures = dev_priv->apertures;
if (!info->apertures) {
ret = -ENOMEM;
goto out_unref;
}
info->pixmap.size = 64*1024;
@ -343,11 +278,6 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->pixmap.scan_align = 1;
fb->fbdev = info;
par->nouveau_fb = nouveau_fb;
par->dev = dev;
if (dev_priv->channel && !nouveau_nofbaccel) {
switch (dev_priv->card_type) {
case NV_50:
@ -361,7 +291,7 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
};
}
nouveau_fbcon_zfill(dev);
nouveau_fbcon_zfill(dev, nfbdev);
/* To allow resizeing without swapping buffers */
NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n",
@ -379,44 +309,123 @@ out:
return ret;
}
int
nouveau_fbcon_probe(struct drm_device *dev)
static int
nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
NV_DEBUG_KMS(dev, "\n");
struct nouveau_fbdev *nfbdev = (struct nouveau_fbdev *)helper;
int new_fb = 0;
int ret;
return drm_fb_helper_single_fb_probe(dev, 32, nouveau_fbcon_create);
if (!helper->fb) {
ret = nouveau_fbcon_create(nfbdev, sizes);
if (ret)
return ret;
new_fb = 1;
}
return new_fb;
}
void
nouveau_fbcon_output_poll_changed(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
drm_fb_helper_hotplug_event(&dev_priv->nfbdev->helper);
}
int
nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb)
nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
{
struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fb);
struct nouveau_framebuffer *nouveau_fb = &nfbdev->nouveau_fb;
struct fb_info *info;
if (!fb)
return -EINVAL;
info = fb->fbdev;
if (info) {
struct nouveau_fbcon_par *par = info->par;
if (nfbdev->helper.fbdev) {
info = nfbdev->helper.fbdev;
unregister_framebuffer(info);
nouveau_bo_unmap(nouveau_fb->nvbo);
drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
nouveau_fb->nvbo = NULL;
if (par)
drm_fb_helper_free(&par->helper);
if (info->cmap.len)
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
if (nouveau_fb->nvbo) {
nouveau_bo_unmap(nouveau_fb->nvbo);
drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
nouveau_fb->nvbo = NULL;
}
drm_fb_helper_fini(&nfbdev->helper);
drm_framebuffer_cleanup(&nouveau_fb->base);
return 0;
}
void nouveau_fbcon_gpu_lockup(struct fb_info *info)
{
struct nouveau_fbcon_par *par = info->par;
struct drm_device *dev = par->dev;
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
info->flags |= FBINFO_HWACCEL_DISABLED;
}
static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
.gamma_set = nouveau_fbcon_gamma_set,
.gamma_get = nouveau_fbcon_gamma_get,
.fb_probe = nouveau_fbcon_find_or_create_single,
};
int nouveau_fbcon_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fbdev *nfbdev;
nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
if (!nfbdev)
return -ENOMEM;
nfbdev->dev = dev;
dev_priv->nfbdev = nfbdev;
nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs;
drm_fb_helper_init(dev, &nfbdev->helper, 2, 4);
drm_fb_helper_single_add_all_connectors(&nfbdev->helper);
drm_fb_helper_initial_config(&nfbdev->helper, 32);
return 0;
}
void nouveau_fbcon_fini(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
if (!dev_priv->nfbdev)
return;
nouveau_fbcon_destroy(dev, dev_priv->nfbdev);
kfree(dev_priv->nfbdev);
dev_priv->nfbdev = NULL;
}
void nouveau_fbcon_save_disable_accel(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
dev_priv->nfbdev->saved_flags = dev_priv->nfbdev->helper.fbdev->flags;
dev_priv->nfbdev->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
}
void nouveau_fbcon_restore_accel(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
dev_priv->nfbdev->helper.fbdev->flags = dev_priv->nfbdev->saved_flags;
}
void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state);
}
void nouveau_fbcon_zfill_all(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
nouveau_fbcon_zfill(dev, dev_priv->nfbdev);
}

View File

@ -29,16 +29,16 @@
#include "drm_fb_helper.h"
struct nouveau_fbcon_par {
#include "nouveau_fb.h"
struct nouveau_fbdev {
struct drm_fb_helper helper;
struct nouveau_framebuffer nouveau_fb;
struct list_head fbdev_list;
struct drm_device *dev;
struct nouveau_framebuffer *nouveau_fb;
unsigned int saved_flags;
};
int nouveau_fbcon_probe(struct drm_device *dev);
int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
void nouveau_fbcon_restore(void);
void nouveau_fbcon_zfill(struct drm_device *dev);
void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
@ -50,5 +50,14 @@ void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
int nv50_fbcon_accel_init(struct fb_info *info);
void nouveau_fbcon_gpu_lockup(struct fb_info *info);
int nouveau_fbcon_init(struct drm_device *dev);
void nouveau_fbcon_fini(struct drm_device *dev);
void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
void nouveau_fbcon_zfill_all(struct drm_device *dev);
void nouveau_fbcon_save_disable_accel(struct drm_device *dev);
void nouveau_fbcon_restore_accel(struct drm_device *dev);
void nouveau_fbcon_output_poll_changed(struct drm_device *dev);
#endif /* __NV50_FBCON_H__ */

View File

@ -57,6 +57,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
}
ttm_bo_unref(&bo);
drm_gem_object_release(gem);
kfree(gem);
}
int
@ -382,7 +385,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
nvbo->channel = chan;
ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
false, false);
false, false, false);
nvbo->channel = NULL;
if (unlikely(ret)) {
NV_ERROR(dev, "fail ttm_validate\n");

View File

@ -68,13 +68,12 @@ nouveau_grctx_prog_load(struct drm_device *dev)
return ret;
}
pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL);
pgraph->ctxprog = kmemdup(fw->data, fw->size, GFP_KERNEL);
if (!pgraph->ctxprog) {
NV_ERROR(dev, "OOM copying ctxprog\n");
release_firmware(fw);
return -ENOMEM;
}
memcpy(pgraph->ctxprog, fw->data, fw->size);
cp = pgraph->ctxprog;
if (le32_to_cpu(cp->signature) != 0x5043564e ||
@ -97,14 +96,13 @@ nouveau_grctx_prog_load(struct drm_device *dev)
return ret;
}
pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
pgraph->ctxvals = kmemdup(fw->data, fw->size, GFP_KERNEL);
if (!pgraph->ctxvals) {
NV_ERROR(dev, "OOM copying ctxvals\n");
release_firmware(fw);
nouveau_grctx_fini(dev);
return -ENOMEM;
}
memcpy(pgraph->ctxvals, fw->data, fw->size);
cv = (void *)pgraph->ctxvals;
if (le32_to_cpu(cv->signature) != 0x5643564e ||

View File

@ -254,16 +254,27 @@ struct nouveau_i2c_chan *
nouveau_i2c_find(struct drm_device *dev, int index)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvbios *bios = &dev_priv->vbios;
struct dcb_i2c_entry *i2c = &dev_priv->vbios.dcb.i2c[index];
if (index >= DCB_MAX_NUM_I2C_ENTRIES)
return NULL;
if (!bios->dcb.i2c[index].chan) {
if (nouveau_i2c_init(dev, &bios->dcb.i2c[index], index))
return NULL;
if (dev_priv->chipset >= NV_50 && (i2c->entry & 0x00000100)) {
uint32_t reg = 0xe500, val;
if (i2c->port_type == 6) {
reg += i2c->read * 0x50;
val = 0x2002;
} else {
reg += ((i2c->entry & 0x1e00) >> 9) * 0x50;
val = 0xe001;
}
nv_wr32(dev, reg, (nv_rd32(dev, reg) & ~0xf003) | val);
}
return bios->dcb.i2c[index].chan;
if (!i2c->chan && nouveau_i2c_init(dev, i2c, index))
return NULL;
return i2c->chan;
}

View File

@ -1204,7 +1204,7 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *)arg;
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t status, fbdev_flags = 0;
uint32_t status;
unsigned long flags;
status = nv_rd32(dev, NV03_PMC_INTR_0);
@ -1213,11 +1213,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
if (dev_priv->fbdev_info) {
fbdev_flags = dev_priv->fbdev_info->flags;
dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
}
if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
nouveau_fifo_irq_handler(dev);
status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
@ -1247,9 +1242,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
if (status)
NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
if (dev_priv->fbdev_info)
dev_priv->fbdev_info->flags = fbdev_flags;
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
return IRQ_HANDLED;

View File

@ -826,6 +826,7 @@
#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2 0x02000000
#define NV50_SOR_DP_UNK118(i,l) (0x0061c118 + (i) * 0x800 + (l) * 0x80)
#define NV50_SOR_DP_UNK120(i,l) (0x0061c120 + (i) * 0x800 + (l) * 0x80)
#define NV50_SOR_DP_UNK128(i,l) (0x0061c128 + (i) * 0x800 + (l) * 0x80)
#define NV50_SOR_DP_UNK130(i,l) (0x0061c130 + (i) * 0x800 + (l) * 0x80)
#define NV50_PDISPLAY_USER(i) ((i) * 0x1000 + 0x00640000)

View File

@ -34,6 +34,7 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
#include "nouveau_fbcon.h"
#include "nv50_display.h"
static void nouveau_stub_takedown(struct drm_device *dev) {}
@ -515,8 +516,10 @@ nouveau_card_init(struct drm_device *dev)
dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_helper_initial_config(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
nouveau_fbcon_init(dev);
drm_kms_helper_poll_init(dev);
}
return 0;
@ -563,6 +566,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) {
nouveau_backlight_exit(dev);
if (dev_priv->channel) {
@ -637,6 +641,48 @@ static void nouveau_OF_copy_vbios_to_ramin(struct drm_device *dev)
#endif
}
static struct apertures_struct *nouveau_get_apertures(struct drm_device *dev)
{
struct pci_dev *pdev = dev->pdev;
struct apertures_struct *aper = alloc_apertures(3);
if (!aper)
return NULL;
aper->ranges[0].base = pci_resource_start(pdev, 1);
aper->ranges[0].size = pci_resource_len(pdev, 1);
aper->count = 1;
if (pci_resource_len(pdev, 2)) {
aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
aper->count++;
}
if (pci_resource_len(pdev, 3)) {
aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
aper->count++;
}
return aper;
}
static int nouveau_remove_conflicting_drivers(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
bool primary = false;
dev_priv->apertures = nouveau_get_apertures(dev);
if (!dev_priv->apertures)
return -ENOMEM;
#ifdef CONFIG_X86
primary = dev->pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
#endif
remove_conflicting_framebuffers(dev_priv->apertures, "nouveaufb", primary);
return 0;
}
int nouveau_load(struct drm_device *dev, unsigned long flags)
{
struct drm_nouveau_private *dev_priv;
@ -724,6 +770,12 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
dev_priv->card_type, reg0);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
int ret = nouveau_remove_conflicting_drivers(dev);
if (ret)
return ret;
}
/* map larger RAMIN aperture on NV40 cards */
dev_priv->ramin = NULL;
if (dev_priv->card_type >= NV_40) {
@ -794,6 +846,8 @@ int nouveau_unload(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
drm_kms_helper_poll_fini(dev);
nouveau_fbcon_fini(dev);
if (dev_priv->card_type >= NV_50)
nv50_display_destroy(dev);
else

View File

@ -30,8 +30,8 @@
void
nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbcon_par *par = info->par;
struct drm_device *dev = par->dev;
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
@ -57,8 +57,8 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
void
nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbcon_par *par = info->par;
struct drm_device *dev = par->dev;
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
@ -91,8 +91,8 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
void
nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbcon_par *par = info->par;
struct drm_device *dev = par->dev;
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
uint32_t fg;
@ -179,8 +179,8 @@ nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
int
nv04_fbcon_accel_init(struct fb_info *info)
{
struct nouveau_fbcon_par *par = info->par;
struct drm_device *dev = par->dev;
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
const int sub = NvSubCtxSurf2D;
@ -236,7 +236,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
if (ret)
return ret;
ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ?
ret = nv04_fbcon_grobj_new(dev, dev_priv->chipset >= 0x11 ?
0x009f : 0x005f, NvImageBlit);
if (ret)
return ret;

View File

@ -532,9 +532,82 @@ nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
return 0;
}
static int
nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
/*
* Software methods, why they are needed, and how they all work:
*
* NV04 and NV05 keep most of the state in PGRAPH context itself, but some
* 2d engine settings are kept inside the grobjs themselves. The grobjs are
* 3 words long on both. grobj format on NV04 is:
*
* word 0:
* - bits 0-7: class
* - bit 12: color key active
* - bit 13: clip rect active
* - bit 14: if set, destination surface is swizzled and taken from buffer 5
* [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
* from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
* NV03_CONTEXT_SURFACE_DST].
* - bits 15-17: 2d operation [aka patch config]
* - bit 24: patch valid [enables rendering using this object]
* - bit 25: surf3d valid [for tex_tri and multitex_tri only]
* word 1:
* - bits 0-1: mono format
* - bits 8-13: color format
* - bits 16-31: DMA_NOTIFY instance
* word 2:
* - bits 0-15: DMA_A instance
* - bits 16-31: DMA_B instance
*
* On NV05 it's:
*
* word 0:
* - bits 0-7: class
* - bit 12: color key active
* - bit 13: clip rect active
* - bit 14: if set, destination surface is swizzled and taken from buffer 5
* [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
* from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
* NV03_CONTEXT_SURFACE_DST].
* - bits 15-17: 2d operation [aka patch config]
* - bits 20-22: dither mode
* - bit 24: patch valid [enables rendering using this object]
* - bit 25: surface_dst/surface_color/surf2d/surf3d valid
* - bit 26: surface_src/surface_zeta valid
* - bit 27: pattern valid
* - bit 28: rop valid
* - bit 29: beta1 valid
* - bit 30: beta4 valid
* word 1:
* - bits 0-1: mono format
* - bits 8-13: color format
* - bits 16-31: DMA_NOTIFY instance
* word 2:
* - bits 0-15: DMA_A instance
* - bits 16-31: DMA_B instance
*
* NV05 will set/unset the relevant valid bits when you poke the relevant
* object-binding methods with object of the proper type, or with the NULL
* type. It'll only allow rendering using the grobj if all needed objects
* are bound. The needed set of objects depends on selected operation: for
* example rop object is needed by ROP_AND, but not by SRCCOPY_AND.
*
* NV04 doesn't have these methods implemented at all, and doesn't have the
* relevant bits in grobj. Instead, it'll allow rendering whenever bit 24
* is set. So we have to emulate them in software, internally keeping the
* same bits as NV05 does. Since grobjs are aligned to 16 bytes on nv04,
* but the last word isn't actually used for anything, we abuse it for this
* purpose.
*
* Actually, NV05 can optionally check bit 24 too, but we disable this since
* there's no use for it.
*
* For unknown reasons, NV04 implements surf3d binding in hardware as an
* exception. Also for unknown reasons, NV04 doesn't implement the clipping
* methods on the surf3d object, so we have to emulate them too.
*/
static void
nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
{
struct drm_device *dev = chan->dev;
uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
@ -542,42 +615,509 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
uint32_t tmp;
tmp = nv_ri32(dev, instance);
tmp &= ~0x00038000;
tmp |= ((data & 7) << 15);
tmp &= ~mask;
tmp |= value;
nv_wi32(dev, instance, tmp);
nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp);
nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
}
static void
nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
{
struct drm_device *dev = chan->dev;
uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
uint32_t tmp, ctx1;
int class, op, valid = 1;
ctx1 = nv_ri32(dev, instance);
class = ctx1 & 0xff;
op = (ctx1 >> 15) & 7;
tmp = nv_ri32(dev, instance + 0xc);
tmp &= ~mask;
tmp |= value;
nv_wi32(dev, instance + 0xc, tmp);
/* check for valid surf2d/surf_dst/surf_color */
if (!(tmp & 0x02000000))
valid = 0;
/* check for valid surf_src/surf_zeta */
if ((class == 0x1f || class == 0x48) && !(tmp & 0x04000000))
valid = 0;
switch (op) {
/* SRCCOPY_AND, SRCCOPY: no extra objects required */
case 0:
case 3:
break;
/* ROP_AND: requires pattern and rop */
case 1:
if (!(tmp & 0x18000000))
valid = 0;
break;
/* BLEND_AND: requires beta1 */
case 2:
if (!(tmp & 0x20000000))
valid = 0;
break;
/* SRCCOPY_PREMULT, BLEND_PREMULT: beta4 required */
case 4:
case 5:
if (!(tmp & 0x40000000))
valid = 0;
break;
}
nv04_graph_set_ctx1(chan, 0x01000000, valid << 24);
}
static int
nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
if (data > 5)
return 1;
/* Old versions of the objects only accept first three operations. */
if (data > 2 && grclass < 0x40)
return 1;
nv04_graph_set_ctx1(chan, 0x00038000, data << 15);
/* changing operation changes set of objects needed for validation */
nv04_graph_set_ctx_val(chan, 0, 0);
return 0;
}
static int
nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
uint32_t min = data & 0xffff, max;
uint32_t w = data >> 16;
if (min & 0x8000)
/* too large */
return 1;
if (w & 0x8000)
/* yes, it accepts negative for some reason. */
w |= 0xffff0000;
max = min + w;
max &= 0x3ffff;
nv_wr32(chan->dev, 0x40053c, min);
nv_wr32(chan->dev, 0x400544, max);
return 0;
}
static int
nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
uint32_t min = data & 0xffff, max;
uint32_t w = data >> 16;
if (min & 0x8000)
/* too large */
return 1;
if (w & 0x8000)
/* yes, it accepts negative for some reason. */
w |= 0xffff0000;
max = min + w;
max &= 0x3ffff;
nv_wr32(chan->dev, 0x400540, min);
nv_wr32(chan->dev, 0x400548, max);
return 0;
}
static int
nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
nv04_graph_set_ctx1(chan, 0x00004000, 0);
nv04_graph_set_ctx_val(chan, 0x02000000, 0);
return 0;
case 0x42:
nv04_graph_set_ctx1(chan, 0x00004000, 0);
nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
return 0;
}
return 1;
}
static int
nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
nv04_graph_set_ctx1(chan, 0x00004000, 0);
nv04_graph_set_ctx_val(chan, 0x02000000, 0);
return 0;
case 0x42:
nv04_graph_set_ctx1(chan, 0x00004000, 0);
nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
return 0;
case 0x52:
nv04_graph_set_ctx1(chan, 0x00004000, 0x00004000);
nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
return 0;
}
return 1;
}
static int
nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
nv04_graph_set_ctx_val(chan, 0x08000000, 0);
return 0;
case 0x18:
nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
return 0;
}
return 1;
}
static int
nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
nv04_graph_set_ctx_val(chan, 0x08000000, 0);
return 0;
case 0x44:
nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
return 0;
}
return 1;
}
static int
nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
nv04_graph_set_ctx_val(chan, 0x10000000, 0);
return 0;
case 0x43:
nv04_graph_set_ctx_val(chan, 0x10000000, 0x10000000);
return 0;
}
return 1;
}
static int
nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
nv04_graph_set_ctx_val(chan, 0x20000000, 0);
return 0;
case 0x12:
nv04_graph_set_ctx_val(chan, 0x20000000, 0x20000000);
return 0;
}
return 1;
}
static int
nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
nv04_graph_set_ctx_val(chan, 0x40000000, 0);
return 0;
case 0x72:
nv04_graph_set_ctx_val(chan, 0x40000000, 0x40000000);
return 0;
}
return 1;
}
static int
nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
nv04_graph_set_ctx_val(chan, 0x02000000, 0);
return 0;
case 0x58:
nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
return 0;
}
return 1;
}
static int
nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
nv04_graph_set_ctx_val(chan, 0x04000000, 0);
return 0;
case 0x59:
nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
return 0;
}
return 1;
}
static int
nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
nv04_graph_set_ctx_val(chan, 0x02000000, 0);
return 0;
case 0x5a:
nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
return 0;
}
return 1;
}
static int
nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
nv04_graph_set_ctx_val(chan, 0x04000000, 0);
return 0;
case 0x5b:
nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
return 0;
}
return 1;
}
static int
nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
nv04_graph_set_ctx1(chan, 0x2000, 0);
return 0;
case 0x19:
nv04_graph_set_ctx1(chan, 0x2000, 0x2000);
return 0;
}
return 1;
}
static int
nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
switch (nv_ri32(chan->dev, data << 4) & 0xff) {
case 0x30:
nv04_graph_set_ctx1(chan, 0x1000, 0);
return 0;
/* Yes, for some reason even the old versions of objects
* accept 0x57 and not 0x17. Consistency be damned.
*/
case 0x57:
nv04_graph_set_ctx1(chan, 0x1000, 0x1000);
return 0;
}
return 1;
}
static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = {
{ 0x0150, nv04_graph_mthd_set_ref },
{}
};
static struct nouveau_pgraph_object_method nv04_graph_mthds_set_operation[] = {
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_gdirect[] = {
{ 0x0184, nv04_graph_mthd_bind_nv01_patt },
{ 0x0188, nv04_graph_mthd_bind_rop },
{ 0x018c, nv04_graph_mthd_bind_beta1 },
{ 0x0190, nv04_graph_mthd_bind_surf_dst },
{ 0x02fc, nv04_graph_mthd_set_operation },
{},
};
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_gdirect[] = {
{ 0x0188, nv04_graph_mthd_bind_nv04_patt },
{ 0x018c, nv04_graph_mthd_bind_rop },
{ 0x0190, nv04_graph_mthd_bind_beta1 },
{ 0x0194, nv04_graph_mthd_bind_beta4 },
{ 0x0198, nv04_graph_mthd_bind_surf2d },
{ 0x02fc, nv04_graph_mthd_set_operation },
{},
};
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_imageblit[] = {
{ 0x0184, nv04_graph_mthd_bind_chroma },
{ 0x0188, nv04_graph_mthd_bind_clip },
{ 0x018c, nv04_graph_mthd_bind_nv01_patt },
{ 0x0190, nv04_graph_mthd_bind_rop },
{ 0x0194, nv04_graph_mthd_bind_beta1 },
{ 0x0198, nv04_graph_mthd_bind_surf_dst },
{ 0x019c, nv04_graph_mthd_bind_surf_src },
{ 0x02fc, nv04_graph_mthd_set_operation },
{},
};
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_imageblit_ifc[] = {
{ 0x0184, nv04_graph_mthd_bind_chroma },
{ 0x0188, nv04_graph_mthd_bind_clip },
{ 0x018c, nv04_graph_mthd_bind_nv04_patt },
{ 0x0190, nv04_graph_mthd_bind_rop },
{ 0x0194, nv04_graph_mthd_bind_beta1 },
{ 0x0198, nv04_graph_mthd_bind_beta4 },
{ 0x019c, nv04_graph_mthd_bind_surf2d },
{ 0x02fc, nv04_graph_mthd_set_operation },
{},
};
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_iifc[] = {
{ 0x0188, nv04_graph_mthd_bind_chroma },
{ 0x018c, nv04_graph_mthd_bind_clip },
{ 0x0190, nv04_graph_mthd_bind_nv04_patt },
{ 0x0194, nv04_graph_mthd_bind_rop },
{ 0x0198, nv04_graph_mthd_bind_beta1 },
{ 0x019c, nv04_graph_mthd_bind_beta4 },
{ 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
{ 0x03e4, nv04_graph_mthd_set_operation },
{},
};
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_ifc[] = {
{ 0x0184, nv04_graph_mthd_bind_chroma },
{ 0x0188, nv04_graph_mthd_bind_clip },
{ 0x018c, nv04_graph_mthd_bind_nv01_patt },
{ 0x0190, nv04_graph_mthd_bind_rop },
{ 0x0194, nv04_graph_mthd_bind_beta1 },
{ 0x0198, nv04_graph_mthd_bind_surf_dst },
{ 0x02fc, nv04_graph_mthd_set_operation },
{},
};
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifc[] = {
{ 0x0184, nv04_graph_mthd_bind_chroma },
{ 0x0188, nv04_graph_mthd_bind_nv01_patt },
{ 0x018c, nv04_graph_mthd_bind_rop },
{ 0x0190, nv04_graph_mthd_bind_beta1 },
{ 0x0194, nv04_graph_mthd_bind_surf_dst },
{ 0x02fc, nv04_graph_mthd_set_operation },
{},
};
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifc[] = {
{ 0x0184, nv04_graph_mthd_bind_chroma },
{ 0x0188, nv04_graph_mthd_bind_nv04_patt },
{ 0x018c, nv04_graph_mthd_bind_rop },
{ 0x0190, nv04_graph_mthd_bind_beta1 },
{ 0x0194, nv04_graph_mthd_bind_beta4 },
{ 0x0198, nv04_graph_mthd_bind_surf2d },
{ 0x02fc, nv04_graph_mthd_set_operation },
{},
};
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifm[] = {
{ 0x0188, nv04_graph_mthd_bind_nv01_patt },
{ 0x018c, nv04_graph_mthd_bind_rop },
{ 0x0190, nv04_graph_mthd_bind_beta1 },
{ 0x0194, nv04_graph_mthd_bind_surf_dst },
{ 0x0304, nv04_graph_mthd_set_operation },
{},
};
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifm[] = {
{ 0x0188, nv04_graph_mthd_bind_nv04_patt },
{ 0x018c, nv04_graph_mthd_bind_rop },
{ 0x0190, nv04_graph_mthd_bind_beta1 },
{ 0x0194, nv04_graph_mthd_bind_beta4 },
{ 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf },
{ 0x0304, nv04_graph_mthd_set_operation },
{},
};
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_shape[] = {
{ 0x0184, nv04_graph_mthd_bind_clip },
{ 0x0188, nv04_graph_mthd_bind_nv01_patt },
{ 0x018c, nv04_graph_mthd_bind_rop },
{ 0x0190, nv04_graph_mthd_bind_beta1 },
{ 0x0194, nv04_graph_mthd_bind_surf_dst },
{ 0x02fc, nv04_graph_mthd_set_operation },
{},
};
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_shape[] = {
{ 0x0184, nv04_graph_mthd_bind_clip },
{ 0x0188, nv04_graph_mthd_bind_nv04_patt },
{ 0x018c, nv04_graph_mthd_bind_rop },
{ 0x0190, nv04_graph_mthd_bind_beta1 },
{ 0x0194, nv04_graph_mthd_bind_beta4 },
{ 0x0198, nv04_graph_mthd_bind_surf2d },
{ 0x02fc, nv04_graph_mthd_set_operation },
{},
};
static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_tex_tri[] = {
{ 0x0188, nv04_graph_mthd_bind_clip },
{ 0x018c, nv04_graph_mthd_bind_surf_color },
{ 0x0190, nv04_graph_mthd_bind_surf_zeta },
{},
};
static struct nouveau_pgraph_object_method nv04_graph_mthds_surf3d[] = {
{ 0x02f8, nv04_graph_mthd_surf3d_clip_h },
{ 0x02fc, nv04_graph_mthd_surf3d_clip_v },
{},
};
struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
{ 0x0039, false, NULL },
{ 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */
{ 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */
{ 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */
{ 0x0077, false, nv04_graph_mthds_set_operation }, /* sifm */
{ 0x0038, false, NULL }, /* dvd subpicture */
{ 0x0039, false, NULL }, /* m2mf */
{ 0x004b, false, nv04_graph_mthds_nv03_gdirect }, /* nv03 gdirect */
{ 0x004a, false, nv04_graph_mthds_nv04_gdirect }, /* nv04 gdirect */
{ 0x001f, false, nv04_graph_mthds_nv01_imageblit }, /* nv01 imageblit */
{ 0x005f, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 imageblit */
{ 0x0060, false, nv04_graph_mthds_nv04_iifc }, /* nv04 iifc */
{ 0x0064, false, NULL }, /* nv05 iifc */
{ 0x0021, false, nv04_graph_mthds_nv01_ifc }, /* nv01 ifc */
{ 0x0061, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 ifc */
{ 0x0065, false, NULL }, /* nv05 ifc */
{ 0x0036, false, nv04_graph_mthds_nv03_sifc }, /* nv03 sifc */
{ 0x0076, false, nv04_graph_mthds_nv04_sifc }, /* nv04 sifc */
{ 0x0066, false, NULL }, /* nv05 sifc */
{ 0x0037, false, nv04_graph_mthds_nv03_sifm }, /* nv03 sifm */
{ 0x0077, false, nv04_graph_mthds_nv04_sifm }, /* nv04 sifm */
{ 0x0030, false, NULL }, /* null */
{ 0x0042, false, NULL }, /* surf2d */
{ 0x0043, false, NULL }, /* rop */
{ 0x0012, false, NULL }, /* beta1 */
{ 0x0072, false, NULL }, /* beta4 */
{ 0x0019, false, NULL }, /* cliprect */
{ 0x0044, false, NULL }, /* pattern */
{ 0x0018, false, NULL }, /* nv01 pattern */
{ 0x0044, false, NULL }, /* nv04 pattern */
{ 0x0052, false, NULL }, /* swzsurf */
{ 0x0053, false, NULL }, /* surf3d */
{ 0x0053, false, nv04_graph_mthds_surf3d }, /* surf3d */
{ 0x0048, false, nv04_graph_mthds_nv03_tex_tri }, /* nv03 tex_tri */
{ 0x0054, false, NULL }, /* tex_tri */
{ 0x0055, false, NULL }, /* multitex_tri */
{ 0x0017, false, NULL }, /* nv01 chroma */
{ 0x0057, false, NULL }, /* nv04 chroma */
{ 0x0058, false, NULL }, /* surf_dst */
{ 0x0059, false, NULL }, /* surf_src */
{ 0x005a, false, NULL }, /* surf_color */
{ 0x005b, false, NULL }, /* surf_zeta */
{ 0x001c, false, nv04_graph_mthds_nv01_shape }, /* nv01 line */
{ 0x005c, false, nv04_graph_mthds_nv04_shape }, /* nv04 line */
{ 0x001d, false, nv04_graph_mthds_nv01_shape }, /* nv01 tri */
{ 0x005d, false, nv04_graph_mthds_nv04_shape }, /* nv04 tri */
{ 0x001e, false, nv04_graph_mthds_nv01_shape }, /* nv01 rect */
{ 0x005e, false, nv04_graph_mthds_nv04_shape }, /* nv04 rect */
{ 0x506e, true, nv04_graph_mthds_sw },
{}
};

View File

@ -115,11 +115,6 @@
/* TODO:
* - get vs count from 0x1540
* - document unimplemented bits compared to nvidia
* - nsource handling
* - R0 & 0x0200 handling
* - single-vs handling
* - 400314 bit 0
*/
static int

View File

@ -0,0 +1,87 @@
/*
* Copyright 2010 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "drmP.h"
#include "drm_fixed.h"
#include "nouveau_drv.h"
#include "nouveau_hw.h"
int
nv50_calc_pll(struct drm_device *dev, struct pll_lims *pll, int clk,
int *N1, int *M1, int *N2, int *M2, int *P)
{
struct nouveau_pll_vals pll_vals;
int ret;
ret = nouveau_calc_pll_mnp(dev, pll, clk, &pll_vals);
if (ret <= 0)
return ret;
*N1 = pll_vals.N1;
*M1 = pll_vals.M1;
*N2 = pll_vals.N2;
*M2 = pll_vals.M2;
*P = pll_vals.log2P;
return ret;
}
int
nv50_calc_pll2(struct drm_device *dev, struct pll_lims *pll, int clk,
int *N, int *fN, int *M, int *P)
{
fixed20_12 fb_div, a, b;
*P = pll->vco1.maxfreq / clk;
if (*P > pll->max_p)
*P = pll->max_p;
if (*P < pll->min_p)
*P = pll->min_p;
/* *M = ceil(refclk / pll->vco.max_inputfreq); */
a.full = dfixed_const(pll->refclk);
b.full = dfixed_const(pll->vco1.max_inputfreq);
a.full = dfixed_div(a, b);
a.full = dfixed_ceil(a);
*M = dfixed_trunc(a);
/* fb_div = (vco * *M) / refclk; */
fb_div.full = dfixed_const(clk * *P);
fb_div.full = dfixed_mul(fb_div, a);
a.full = dfixed_const(pll->refclk);
fb_div.full = dfixed_div(fb_div, a);
/* *N = floor(fb_div); */
a.full = dfixed_floor(fb_div);
*N = dfixed_trunc(fb_div);
/* *fN = (fmod(fb_div, 1.0) * 8192) - 4096; */
b.full = dfixed_const(8192);
a.full = dfixed_mul(a, b);
fb_div.full = dfixed_mul(fb_div, b);
fb_div.full = fb_div.full - a.full;
*fN = dfixed_trunc(fb_div) - 4096;
*fN &= 0xffff;
return clk;
}

View File

@ -264,32 +264,40 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
int
nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
{
uint32_t pll_reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
struct nouveau_pll_vals pll;
struct pll_lims limits;
uint32_t reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
struct pll_lims pll;
uint32_t reg1, reg2;
int ret;
int ret, N1, M1, N2, M2, P;
ret = get_pll_limits(dev, pll_reg, &limits);
ret = get_pll_limits(dev, reg, &pll);
if (ret)
return ret;
ret = nouveau_calc_pll_mnp(dev, &limits, pclk, &pll);
if (ret <= 0)
return ret;
if (pll.vco2.maxfreq) {
ret = nv50_calc_pll(dev, &pll, pclk, &N1, &M1, &N2, &M2, &P);
if (ret <= 0)
return 0;
if (limits.vco2.maxfreq) {
reg1 = nv_rd32(dev, pll_reg + 4) & 0xff00ff00;
reg2 = nv_rd32(dev, pll_reg + 8) & 0x8000ff00;
nv_wr32(dev, pll_reg, 0x10000611);
nv_wr32(dev, pll_reg + 4, reg1 | (pll.M1 << 16) | pll.N1);
nv_wr32(dev, pll_reg + 8,
reg2 | (pll.log2P << 28) | (pll.M2 << 16) | pll.N2);
NV_DEBUG(dev, "pclk %d out %d NM1 %d %d NM2 %d %d P %d\n",
pclk, ret, N1, M1, N2, M2, P);
reg1 = nv_rd32(dev, reg + 4) & 0xff00ff00;
reg2 = nv_rd32(dev, reg + 8) & 0x8000ff00;
nv_wr32(dev, reg, 0x10000611);
nv_wr32(dev, reg + 4, reg1 | (M1 << 16) | N1);
nv_wr32(dev, reg + 8, reg2 | (P << 28) | (M2 << 16) | N2);
} else {
reg1 = nv_rd32(dev, pll_reg + 4) & 0xffc00000;
nv_wr32(dev, pll_reg, 0x50000610);
nv_wr32(dev, pll_reg + 4, reg1 |
(pll.log2P << 16) | (pll.M1 << 8) | pll.N1);
ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P);
if (ret <= 0)
return 0;
NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n",
pclk, ret, N1, N2, M1, P);
reg1 = nv_rd32(dev, reg + 4) & 0xffc00000;
nv_wr32(dev, reg, 0x50000610);
nv_wr32(dev, reg + 4, reg1 | (P << 16) | (M1 << 8) | N1);
nv_wr32(dev, reg + 8, N2);
}
return 0;

View File

@ -29,6 +29,7 @@
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
#include "nouveau_fb.h"
#include "nouveau_fbcon.h"
#include "drm_crtc_helper.h"
static void
@ -782,6 +783,37 @@ ack:
nv_wr32(dev, 0x610030, 0x80000000);
}
static void
nv50_display_unk20_dp_hack(struct drm_device *dev, struct dcb_entry *dcb)
{
int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1);
struct drm_encoder *encoder;
uint32_t tmp, unk0 = 0, unk1 = 0;
if (dcb->type != OUTPUT_DP)
return;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
if (nv_encoder->dcb == dcb) {
unk0 = nv_encoder->dp.unk0;
unk1 = nv_encoder->dp.unk1;
break;
}
}
if (unk0 || unk1) {
tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
tmp &= 0xfffffe03;
nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp | unk0);
tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link));
tmp &= 0xfef080c0;
nv_wr32(dev, NV50_SOR_DP_UNK128(or, link), tmp | unk1);
}
}
static void
nv50_display_unk20_handler(struct drm_device *dev)
{
@ -805,6 +837,8 @@ nv50_display_unk20_handler(struct drm_device *dev)
nouveau_bios_run_display_table(dev, dcbent, script, pclk);
nv50_display_unk20_dp_hack(dev, dcbent);
tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head));
tmp &= ~0x000000f;
nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head), tmp);
@ -945,6 +979,8 @@ nv50_display_irq_hotplug_bh(struct work_struct *work)
nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054));
if (dev_priv->chipset >= 0x90)
nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074));
drm_helper_hpd_irq_event(dev);
}
void

View File

@ -6,8 +6,8 @@
void
nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbcon_par *par = info->par;
struct drm_device *dev = par->dev;
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
@ -49,8 +49,8 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
void
nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbcon_par *par = info->par;
struct drm_device *dev = par->dev;
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
@ -84,8 +84,8 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
void
nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbcon_par *par = info->par;
struct drm_device *dev = par->dev;
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
uint32_t width, dwords, *data = (uint32_t *)image->data;
@ -152,8 +152,8 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
int
nv50_fbcon_accel_init(struct fb_info *info)
{
struct nouveau_fbcon_par *par = info->par;
struct drm_device *dev = par->dev;
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
struct nouveau_gpuobj *eng2d = NULL;

View File

@ -321,18 +321,23 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
encoder->possible_clones = 0;
if (nv_encoder->dcb->type == OUTPUT_DP) {
uint32_t mc, or = nv_encoder->or;
int or = nv_encoder->or, link = !(entry->dpconf.sor.link & 1);
uint32_t tmp;
if (dev_priv->chipset < 0x90 ||
dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0)
mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(or));
tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(or));
else
mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(or));
tmp = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(or));
switch ((mc & 0x00000f00) >> 8) {
switch ((tmp & 0x00000f00) >> 8) {
case 8:
case 9:
nv_encoder->dp.mc_unknown = (mc & 0x000f0000) >> 16;
nv_encoder->dp.mc_unknown = (tmp & 0x000f0000) >> 16;
tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
nv_encoder->dp.unk0 = tmp & 0x000001fc;
tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link));
nv_encoder->dp.unk1 = tmp & 0x010f7f3f;
break;
default:
break;

View File

@ -5742,6 +5742,9 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER
#define ATOM_PP_THERMALCONTROLLER_RV6xx 7
#define ATOM_PP_THERMALCONTROLLER_RV770 8
#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11
#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller
typedef struct _ATOM_PPLIB_STATE
{
@ -5749,6 +5752,26 @@ typedef struct _ATOM_PPLIB_STATE
UCHAR ucClockStateIndices[1]; // variable-sized
} ATOM_PPLIB_STATE;
typedef struct _ATOM_PPLIB_FANTABLE
{
UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same.
UCHAR ucTHyst; // Temperature hysteresis. Integer.
USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
USHORT usTMed; // The middle temperature where we change slopes.
USHORT usTHigh; // The high point above TMed for adjusting the second slope.
USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments).
USHORT usPWMMed; // The PWM value (in percent) at TMed.
USHORT usPWMHigh; // The PWM value at THigh.
} ATOM_PPLIB_FANTABLE;
typedef struct _ATOM_PPLIB_EXTENDEDHEADER
{
USHORT usSize;
ULONG ulMaxEngineClock; // For Overdrive.
ULONG ulMaxMemoryClock; // For Overdrive.
// Add extra system parameters here, always adjust size to include all fields.
} ATOM_PPLIB_EXTENDEDHEADER;
//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
@ -5762,6 +5785,12 @@ typedef struct _ATOM_PPLIB_STATE
#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition.
#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC.
#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature.
#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state.
typedef struct _ATOM_PPLIB_POWERPLAYTABLE
{
@ -5797,6 +5826,21 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE
} ATOM_PPLIB_POWERPLAYTABLE;
typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
{
ATOM_PPLIB_POWERPLAYTABLE basicTable;
UCHAR ucNumCustomThermalPolicy;
USHORT usCustomThermalPolicyArrayOffset;
}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
{
ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
USHORT usFormatID; // To be used ONLY by PPGen.
USHORT usFanTableOffset;
USHORT usExtendendedHeaderOffset;
} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
@ -5816,7 +5860,9 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE
#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400
#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800
#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000
// remaining 3 bits are reserved
#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000
#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000
#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000
//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
@ -5840,9 +5886,15 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE
#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
//memory related flags
#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000
//M3 Arb //2bits, current 3 sets of parameters in total
#define ATOM_PPLIB_M3ARB_MASK 0x00060000
#define ATOM_PPLIB_M3ARB_SHIFT 17
// Contained in an array starting at the offset
// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
@ -5860,6 +5912,9 @@ typedef struct _ATOM_PPLIB_NONCLOCK_INFO
// Contained in an array starting at the offset
// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
{
USHORT usEngineClockLow;
@ -5882,6 +5937,23 @@ typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4
#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8
#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16
#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0).
typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
{
USHORT usEngineClockLow;
UCHAR ucEngineClockHigh;
USHORT usMemoryClockLow;
UCHAR ucMemoryClockHigh;
USHORT usVDDC;
USHORT usVDDCI;
USHORT usUnused;
ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO

View File

@ -26,7 +26,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
#include "radeon_fixed.h"
#include <drm/drm_fixed.h>
#include "radeon.h"
#include "atom.h"
#include "atom-bits.h"
@ -245,25 +245,27 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
radeon_crtc->enabled = true;
/* adjust pm to dpms changes BEFORE enabling crtcs */
radeon_pm_compute_clocks(rdev);
atombios_enable_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
atombios_blank_crtc(crtc, ATOM_DISABLE);
/* XXX re-enable when interrupt support is added */
if (!ASIC_IS_DCE4(rdev))
drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
/* XXX re-enable when interrupt support is added */
if (!ASIC_IS_DCE4(rdev))
drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
atombios_blank_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_DISABLE);
radeon_crtc->enabled = false;
/* adjust pm to dpms changes AFTER disabling crtcs */
radeon_pm_compute_clocks(rdev);
break;
}
}
@ -1160,6 +1162,12 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
/* adjust pm to upcoming mode change */
radeon_pm_compute_clocks(rdev);
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
return true;

View File

@ -351,7 +351,7 @@ retry:
args.v1.ucChannelID = chan->rec.i2c_id;
args.v1.ucDelay = delay / 10;
if (ASIC_IS_DCE4(rdev))
args.v2.ucHPD_ID = chan->rec.hpd_id;
args.v2.ucHPD_ID = chan->rec.hpd;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);

File diff suppressed because it is too large Load Diff

View File

@ -164,8 +164,12 @@
#define EVERGREEN_CRTC5_REGISTER_OFFSET (0x129f0 - 0x6df0)
/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
#define EVERGREEN_CRTC_V_BLANK_START_END 0x6e34
#define EVERGREEN_CRTC_CONTROL 0x6e70
# define EVERGREEN_CRTC_MASTER_EN (1 << 0)
# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
#define EVERGREEN_CRTC_STATUS 0x6e8c
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0

View File

@ -0,0 +1,556 @@
/*
* Copyright 2010 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Alex Deucher
*/
#ifndef EVERGREEND_H
#define EVERGREEND_H
#define EVERGREEN_MAX_SH_GPRS 256
#define EVERGREEN_MAX_TEMP_GPRS 16
#define EVERGREEN_MAX_SH_THREADS 256
#define EVERGREEN_MAX_SH_STACK_ENTRIES 4096
#define EVERGREEN_MAX_FRC_EOV_CNT 16384
#define EVERGREEN_MAX_BACKENDS 8
#define EVERGREEN_MAX_BACKENDS_MASK 0xFF
#define EVERGREEN_MAX_SIMDS 16
#define EVERGREEN_MAX_SIMDS_MASK 0xFFFF
#define EVERGREEN_MAX_PIPES 8
#define EVERGREEN_MAX_PIPES_MASK 0xFF
#define EVERGREEN_MAX_LDS_NUM 0xFFFF
/* Registers */
#define RCU_IND_INDEX 0x100
#define RCU_IND_DATA 0x104
#define GRBM_GFX_INDEX 0x802C
#define INSTANCE_INDEX(x) ((x) << 0)
#define SE_INDEX(x) ((x) << 16)
#define INSTANCE_BROADCAST_WRITES (1 << 30)
#define SE_BROADCAST_WRITES (1 << 31)
#define RLC_GFX_INDEX 0x3fC4
#define CC_GC_SHADER_PIPE_CONFIG 0x8950
#define WRITE_DIS (1 << 0)
#define CC_RB_BACKEND_DISABLE 0x98F4
#define BACKEND_DISABLE(x) ((x) << 16)
#define GB_ADDR_CONFIG 0x98F8
#define NUM_PIPES(x) ((x) << 0)
#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
#define BANK_INTERLEAVE_SIZE(x) ((x) << 8)
#define NUM_SHADER_ENGINES(x) ((x) << 12)
#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16)
#define NUM_GPUS(x) ((x) << 20)
#define MULTI_GPU_TILE_SIZE(x) ((x) << 24)
#define ROW_SIZE(x) ((x) << 28)
#define GB_BACKEND_MAP 0x98FC
#define DMIF_ADDR_CONFIG 0xBD4
#define HDP_ADDR_CONFIG 0x2F48
#define CC_SYS_RB_BACKEND_DISABLE 0x3F88
#define GC_USER_RB_BACKEND_DISABLE 0x9B7C
#define CGTS_SYS_TCC_DISABLE 0x3F90
#define CGTS_TCC_DISABLE 0x9148
#define CGTS_USER_SYS_TCC_DISABLE 0x3F94
#define CGTS_USER_TCC_DISABLE 0x914C
#define CONFIG_MEMSIZE 0x5428
#define CP_ME_CNTL 0x86D8
#define CP_ME_HALT (1 << 28)
#define CP_PFP_HALT (1 << 26)
#define CP_ME_RAM_DATA 0xC160
#define CP_ME_RAM_RADDR 0xC158
#define CP_ME_RAM_WADDR 0xC15C
#define CP_MEQ_THRESHOLDS 0x8764
#define STQ_SPLIT(x) ((x) << 0)
#define CP_PERFMON_CNTL 0x87FC
#define CP_PFP_UCODE_ADDR 0xC150
#define CP_PFP_UCODE_DATA 0xC154
#define CP_QUEUE_THRESHOLDS 0x8760
#define ROQ_IB1_START(x) ((x) << 0)
#define ROQ_IB2_START(x) ((x) << 8)
#define CP_RB_BASE 0xC100
#define CP_RB_CNTL 0xC104
#define RB_BUFSZ(x) ((x) << 0)
#define RB_BLKSZ(x) ((x) << 8)
#define RB_NO_UPDATE (1 << 27)
#define RB_RPTR_WR_ENA (1 << 31)
#define BUF_SWAP_32BIT (2 << 16)
#define CP_RB_RPTR 0x8700
#define CP_RB_RPTR_ADDR 0xC10C
#define CP_RB_RPTR_ADDR_HI 0xC110
#define CP_RB_RPTR_WR 0xC108
#define CP_RB_WPTR 0xC114
#define CP_RB_WPTR_ADDR 0xC118
#define CP_RB_WPTR_ADDR_HI 0xC11C
#define CP_RB_WPTR_DELAY 0x8704
#define CP_SEM_WAIT_TIMER 0x85BC
#define CP_DEBUG 0xC1FC
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8)
#define INACTIVE_QD_PIPES_MASK 0x0000FF00
#define INACTIVE_SIMDS(x) ((x) << 16)
#define INACTIVE_SIMDS_MASK 0x00FF0000
#define GRBM_CNTL 0x8000
#define GRBM_READ_TIMEOUT(x) ((x) << 0)
#define GRBM_SOFT_RESET 0x8020
#define SOFT_RESET_CP (1 << 0)
#define SOFT_RESET_CB (1 << 1)
#define SOFT_RESET_DB (1 << 3)
#define SOFT_RESET_PA (1 << 5)
#define SOFT_RESET_SC (1 << 6)
#define SOFT_RESET_SPI (1 << 8)
#define SOFT_RESET_SH (1 << 9)
#define SOFT_RESET_SX (1 << 10)
#define SOFT_RESET_TC (1 << 11)
#define SOFT_RESET_TA (1 << 12)
#define SOFT_RESET_VC (1 << 13)
#define SOFT_RESET_VGT (1 << 14)
#define GRBM_STATUS 0x8010
#define CMDFIFO_AVAIL_MASK 0x0000000F
#define SRBM_RQ_PENDING (1 << 5)
#define CF_RQ_PENDING (1 << 7)
#define PF_RQ_PENDING (1 << 8)
#define GRBM_EE_BUSY (1 << 10)
#define SX_CLEAN (1 << 11)
#define DB_CLEAN (1 << 12)
#define CB_CLEAN (1 << 13)
#define TA_BUSY (1 << 14)
#define VGT_BUSY_NO_DMA (1 << 16)
#define VGT_BUSY (1 << 17)
#define SX_BUSY (1 << 20)
#define SH_BUSY (1 << 21)
#define SPI_BUSY (1 << 22)
#define SC_BUSY (1 << 24)
#define PA_BUSY (1 << 25)
#define DB_BUSY (1 << 26)
#define CP_COHERENCY_BUSY (1 << 28)
#define CP_BUSY (1 << 29)
#define CB_BUSY (1 << 30)
#define GUI_ACTIVE (1 << 31)
#define GRBM_STATUS_SE0 0x8014
#define GRBM_STATUS_SE1 0x8018
#define SE_SX_CLEAN (1 << 0)
#define SE_DB_CLEAN (1 << 1)
#define SE_CB_CLEAN (1 << 2)
#define SE_TA_BUSY (1 << 25)
#define SE_SX_BUSY (1 << 26)
#define SE_SPI_BUSY (1 << 27)
#define SE_SH_BUSY (1 << 28)
#define SE_SC_BUSY (1 << 29)
#define SE_DB_BUSY (1 << 30)
#define SE_CB_BUSY (1 << 31)
#define HDP_HOST_PATH_CNTL 0x2C00
#define HDP_NONSURFACE_BASE 0x2C04
#define HDP_NONSURFACE_INFO 0x2C08
#define HDP_NONSURFACE_SIZE 0x2C0C
#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
#define HDP_TILING_CONFIG 0x2F3C
#define MC_SHARED_CHMAP 0x2004
#define NOOFCHAN_SHIFT 12
#define NOOFCHAN_MASK 0x00003000
#define MC_ARB_RAMCFG 0x2760
#define NOOFBANK_SHIFT 0
#define NOOFBANK_MASK 0x00000003
#define NOOFRANK_SHIFT 2
#define NOOFRANK_MASK 0x00000004
#define NOOFROWS_SHIFT 3
#define NOOFROWS_MASK 0x00000038
#define NOOFCOLS_SHIFT 6
#define NOOFCOLS_MASK 0x000000C0
#define CHANSIZE_SHIFT 8
#define CHANSIZE_MASK 0x00000100
#define BURSTLENGTH_SHIFT 9
#define BURSTLENGTH_MASK 0x00000200
#define CHANSIZE_OVERRIDE (1 << 11)
#define MC_VM_AGP_TOP 0x2028
#define MC_VM_AGP_BOT 0x202C
#define MC_VM_AGP_BASE 0x2030
#define MC_VM_FB_LOCATION 0x2024
#define MC_VM_MB_L1_TLB0_CNTL 0x2234
#define MC_VM_MB_L1_TLB1_CNTL 0x2238
#define MC_VM_MB_L1_TLB2_CNTL 0x223C
#define MC_VM_MB_L1_TLB3_CNTL 0x2240
#define ENABLE_L1_TLB (1 << 0)
#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3)
#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3)
#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3)
#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3)
#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5)
#define EFFECTIVE_L1_TLB_SIZE(x) ((x)<<15)
#define EFFECTIVE_L1_QUEUE_SIZE(x) ((x)<<18)
#define MC_VM_MD_L1_TLB0_CNTL 0x2654
#define MC_VM_MD_L1_TLB1_CNTL 0x2658
#define MC_VM_MD_L1_TLB2_CNTL 0x265C
#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
#define PA_CL_ENHANCE 0x8A14
#define CLIP_VTX_REORDER_ENA (1 << 0)
#define NUM_CLIP_SEQ(x) ((x) << 1)
#define PA_SC_AA_CONFIG 0x28C04
#define PA_SC_CLIPRECT_RULE 0x2820C
#define PA_SC_EDGERULE 0x28230
#define PA_SC_FIFO_SIZE 0x8BCC
#define SC_PRIM_FIFO_SIZE(x) ((x) << 0)
#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12)
#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20)
#define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24
#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
#define PA_SC_LINE_STIPPLE 0x28A0C
#define PA_SC_LINE_STIPPLE_STATE 0x8B10
#define SCRATCH_REG0 0x8500
#define SCRATCH_REG1 0x8504
#define SCRATCH_REG2 0x8508
#define SCRATCH_REG3 0x850C
#define SCRATCH_REG4 0x8510
#define SCRATCH_REG5 0x8514
#define SCRATCH_REG6 0x8518
#define SCRATCH_REG7 0x851C
#define SCRATCH_UMSK 0x8540
#define SCRATCH_ADDR 0x8544
#define SMX_DC_CTL0 0xA020
#define USE_HASH_FUNCTION (1 << 0)
#define NUMBER_OF_SETS(x) ((x) << 1)
#define FLUSH_ALL_ON_EVENT (1 << 10)
#define STALL_ON_EVENT (1 << 11)
#define SMX_EVENT_CTL 0xA02C
#define ES_FLUSH_CTL(x) ((x) << 0)
#define GS_FLUSH_CTL(x) ((x) << 3)
#define ACK_FLUSH_CTL(x) ((x) << 6)
#define SYNC_FLUSH_CTL (1 << 8)
#define SPI_CONFIG_CNTL 0x9100
#define GPR_WRITE_PRIORITY(x) ((x) << 0)
#define SPI_CONFIG_CNTL_1 0x913C
#define VTX_DONE_DELAY(x) ((x) << 0)
#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
#define SPI_INPUT_Z 0x286D8
#define SPI_PS_IN_CONTROL_0 0x286CC
#define NUM_INTERP(x) ((x)<<0)
#define POSITION_ENA (1<<8)
#define POSITION_CENTROID (1<<9)
#define POSITION_ADDR(x) ((x)<<10)
#define PARAM_GEN(x) ((x)<<15)
#define PARAM_GEN_ADDR(x) ((x)<<19)
#define BARYC_SAMPLE_CNTL(x) ((x)<<26)
#define PERSP_GRADIENT_ENA (1<<28)
#define LINEAR_GRADIENT_ENA (1<<29)
#define POSITION_SAMPLE (1<<30)
#define BARYC_AT_SAMPLE_ENA (1<<31)
#define SQ_CONFIG 0x8C00
#define VC_ENABLE (1 << 0)
#define EXPORT_SRC_C (1 << 1)
#define CS_PRIO(x) ((x) << 18)
#define LS_PRIO(x) ((x) << 20)
#define HS_PRIO(x) ((x) << 22)
#define PS_PRIO(x) ((x) << 24)
#define VS_PRIO(x) ((x) << 26)
#define GS_PRIO(x) ((x) << 28)
#define ES_PRIO(x) ((x) << 30)
#define SQ_GPR_RESOURCE_MGMT_1 0x8C04
#define NUM_PS_GPRS(x) ((x) << 0)
#define NUM_VS_GPRS(x) ((x) << 16)
#define NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28)
#define SQ_GPR_RESOURCE_MGMT_2 0x8C08
#define NUM_GS_GPRS(x) ((x) << 0)
#define NUM_ES_GPRS(x) ((x) << 16)
#define SQ_GPR_RESOURCE_MGMT_3 0x8C0C
#define NUM_HS_GPRS(x) ((x) << 0)
#define NUM_LS_GPRS(x) ((x) << 16)
#define SQ_THREAD_RESOURCE_MGMT 0x8C18
#define NUM_PS_THREADS(x) ((x) << 0)
#define NUM_VS_THREADS(x) ((x) << 8)
#define NUM_GS_THREADS(x) ((x) << 16)
#define NUM_ES_THREADS(x) ((x) << 24)
#define SQ_THREAD_RESOURCE_MGMT_2 0x8C1C
#define NUM_HS_THREADS(x) ((x) << 0)
#define NUM_LS_THREADS(x) ((x) << 8)
#define SQ_STACK_RESOURCE_MGMT_1 0x8C20
#define NUM_PS_STACK_ENTRIES(x) ((x) << 0)
#define NUM_VS_STACK_ENTRIES(x) ((x) << 16)
#define SQ_STACK_RESOURCE_MGMT_2 0x8C24
#define NUM_GS_STACK_ENTRIES(x) ((x) << 0)
#define NUM_ES_STACK_ENTRIES(x) ((x) << 16)
#define SQ_STACK_RESOURCE_MGMT_3 0x8C28
#define NUM_HS_STACK_ENTRIES(x) ((x) << 0)
#define NUM_LS_STACK_ENTRIES(x) ((x) << 16)
#define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C
#define SQ_LDS_RESOURCE_MGMT 0x8E2C
#define SQ_MS_FIFO_SIZES 0x8CF0
#define CACHE_FIFO_SIZE(x) ((x) << 0)
#define FETCH_FIFO_HIWATER(x) ((x) << 8)
#define DONE_FIFO_HIWATER(x) ((x) << 16)
#define ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24)
#define SX_DEBUG_1 0x9058
#define ENABLE_NEW_SMX_ADDRESS (1 << 16)
#define SX_EXPORT_BUFFER_SIZES 0x900C
#define COLOR_BUFFER_SIZE(x) ((x) << 0)
#define POSITION_BUFFER_SIZE(x) ((x) << 8)
#define SMX_BUFFER_SIZE(x) ((x) << 16)
#define SX_MISC 0x28350
#define CB_PERF_CTR0_SEL_0 0x9A20
#define CB_PERF_CTR0_SEL_1 0x9A24
#define CB_PERF_CTR1_SEL_0 0x9A28
#define CB_PERF_CTR1_SEL_1 0x9A2C
#define CB_PERF_CTR2_SEL_0 0x9A30
#define CB_PERF_CTR2_SEL_1 0x9A34
#define CB_PERF_CTR3_SEL_0 0x9A38
#define CB_PERF_CTR3_SEL_1 0x9A3C
#define TA_CNTL_AUX 0x9508
#define DISABLE_CUBE_WRAP (1 << 0)
#define DISABLE_CUBE_ANISO (1 << 1)
#define SYNC_GRADIENT (1 << 24)
#define SYNC_WALKER (1 << 25)
#define SYNC_ALIGNER (1 << 26)
#define VGT_CACHE_INVALIDATION 0x88C4
#define CACHE_INVALIDATION(x) ((x) << 0)
#define VC_ONLY 0
#define TC_ONLY 1
#define VC_AND_TC 2
#define AUTO_INVLD_EN(x) ((x) << 6)
#define NO_AUTO 0
#define ES_AUTO 1
#define GS_AUTO 2
#define ES_AND_GS_AUTO 3
#define VGT_GS_VERTEX_REUSE 0x88D4
#define VGT_NUM_INSTANCES 0x8974
#define VGT_OUT_DEALLOC_CNTL 0x28C5C
#define DEALLOC_DIST_MASK 0x0000007F
#define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58
#define VTX_REUSE_DEPTH_MASK 0x000000FF
#define VM_CONTEXT0_CNTL 0x1410
#define ENABLE_CONTEXT (1 << 0)
#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
#define VM_CONTEXT1_CNTL 0x1414
#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C
#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518
#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
#define RESPONSE_TYPE_MASK 0x000000F0
#define RESPONSE_TYPE_SHIFT 4
#define VM_L2_CNTL 0x1400
#define ENABLE_L2_CACHE (1 << 0)
#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 14)
#define VM_L2_CNTL2 0x1404
#define INVALIDATE_ALL_L1_TLBS (1 << 0)
#define INVALIDATE_L2_CACHE (1 << 1)
#define VM_L2_CNTL3 0x1408
#define BANK_SELECT(x) ((x) << 0)
#define CACHE_UPDATE_MODE(x) ((x) << 6)
#define VM_L2_STATUS 0x140C
#define L2_BUSY (1 << 0)
#define WAIT_UNTIL 0x8040
#define SRBM_STATUS 0x0E50
#define SRBM_SOFT_RESET 0x0E60
#define SRBM_SOFT_RESET_ALL_MASK 0x00FEEFA6
#define SOFT_RESET_BIF (1 << 1)
#define SOFT_RESET_CG (1 << 2)
#define SOFT_RESET_DC (1 << 5)
#define SOFT_RESET_GRBM (1 << 8)
#define SOFT_RESET_HDP (1 << 9)
#define SOFT_RESET_IH (1 << 10)
#define SOFT_RESET_MC (1 << 11)
#define SOFT_RESET_RLC (1 << 13)
#define SOFT_RESET_ROM (1 << 14)
#define SOFT_RESET_SEM (1 << 15)
#define SOFT_RESET_VMC (1 << 17)
#define SOFT_RESET_TST (1 << 21)
#define SOFT_RESET_REGBB (1 << 22)
#define SOFT_RESET_ORB (1 << 23)
#define IH_RB_CNTL 0x3e00
# define IH_RB_ENABLE (1 << 0)
# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
# define IH_WPTR_OVERFLOW_ENABLE (1 << 16)
# define IH_WPTR_OVERFLOW_CLEAR (1 << 31)
#define IH_RB_BASE 0x3e04
#define IH_RB_RPTR 0x3e08
#define IH_RB_WPTR 0x3e0c
# define RB_OVERFLOW (1 << 0)
# define WPTR_OFFSET_MASK 0x3fffc
#define IH_RB_WPTR_ADDR_HI 0x3e10
#define IH_RB_WPTR_ADDR_LO 0x3e14
#define IH_CNTL 0x3e18
# define ENABLE_INTR (1 << 0)
# define IH_MC_SWAP(x) ((x) << 2)
# define IH_MC_SWAP_NONE 0
# define IH_MC_SWAP_16BIT 1
# define IH_MC_SWAP_32BIT 2
# define IH_MC_SWAP_64BIT 3
# define RPTR_REARM (1 << 4)
# define MC_WRREQ_CREDIT(x) ((x) << 15)
# define MC_WR_CLEAN_CNT(x) ((x) << 20)
#define CP_INT_CNTL 0xc124
# define CNTX_BUSY_INT_ENABLE (1 << 19)
# define CNTX_EMPTY_INT_ENABLE (1 << 20)
# define SCRATCH_INT_ENABLE (1 << 25)
# define TIME_STAMP_INT_ENABLE (1 << 26)
# define IB2_INT_ENABLE (1 << 29)
# define IB1_INT_ENABLE (1 << 30)
# define RB_INT_ENABLE (1 << 31)
#define CP_INT_STATUS 0xc128
# define SCRATCH_INT_STAT (1 << 25)
# define TIME_STAMP_INT_STAT (1 << 26)
# define IB2_INT_STAT (1 << 29)
# define IB1_INT_STAT (1 << 30)
# define RB_INT_STAT (1 << 31)
#define GRBM_INT_CNTL 0x8060
# define RDERR_INT_ENABLE (1 << 0)
# define GUI_IDLE_INT_ENABLE (1 << 19)
/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
#define CRTC_STATUS_FRAME_COUNT 0x6e98
/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
#define VLINE_STATUS 0x6bb8
# define VLINE_OCCURRED (1 << 0)
# define VLINE_ACK (1 << 4)
# define VLINE_STAT (1 << 12)
# define VLINE_INTERRUPT (1 << 16)
# define VLINE_INTERRUPT_TYPE (1 << 17)
/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
#define VBLANK_STATUS 0x6bbc
# define VBLANK_OCCURRED (1 << 0)
# define VBLANK_ACK (1 << 4)
# define VBLANK_STAT (1 << 12)
# define VBLANK_INTERRUPT (1 << 16)
# define VBLANK_INTERRUPT_TYPE (1 << 17)
/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
#define INT_MASK 0x6b40
# define VBLANK_INT_MASK (1 << 0)
# define VLINE_INT_MASK (1 << 4)
#define DISP_INTERRUPT_STATUS 0x60f4
# define LB_D1_VLINE_INTERRUPT (1 << 2)
# define LB_D1_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD1_INTERRUPT (1 << 17)
# define DC_HPD1_RX_INTERRUPT (1 << 18)
# define DACA_AUTODETECT_INTERRUPT (1 << 22)
# define DACB_AUTODETECT_INTERRUPT (1 << 23)
# define DC_I2C_SW_DONE_INTERRUPT (1 << 24)
# define DC_I2C_HW_DONE_INTERRUPT (1 << 25)
#define DISP_INTERRUPT_STATUS_CONTINUE 0x60f8
# define LB_D2_VLINE_INTERRUPT (1 << 2)
# define LB_D2_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD2_INTERRUPT (1 << 17)
# define DC_HPD2_RX_INTERRUPT (1 << 18)
# define DISP_TIMER_INTERRUPT (1 << 24)
#define DISP_INTERRUPT_STATUS_CONTINUE2 0x60fc
# define LB_D3_VLINE_INTERRUPT (1 << 2)
# define LB_D3_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD3_INTERRUPT (1 << 17)
# define DC_HPD3_RX_INTERRUPT (1 << 18)
#define DISP_INTERRUPT_STATUS_CONTINUE3 0x6100
# define LB_D4_VLINE_INTERRUPT (1 << 2)
# define LB_D4_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD4_INTERRUPT (1 << 17)
# define DC_HPD4_RX_INTERRUPT (1 << 18)
#define DISP_INTERRUPT_STATUS_CONTINUE4 0x614c
# define LB_D5_VLINE_INTERRUPT (1 << 2)
# define LB_D5_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD5_INTERRUPT (1 << 17)
# define DC_HPD5_RX_INTERRUPT (1 << 18)
#define DISP_INTERRUPT_STATUS_CONTINUE5 0x6050
# define LB_D6_VLINE_INTERRUPT (1 << 2)
# define LB_D6_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD6_INTERRUPT (1 << 17)
# define DC_HPD6_RX_INTERRUPT (1 << 18)
/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
#define GRPH_INT_STATUS 0x6858
# define GRPH_PFLIP_INT_OCCURRED (1 << 0)
# define GRPH_PFLIP_INT_CLEAR (1 << 8)
/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
#define GRPH_INT_CONTROL 0x685c
# define GRPH_PFLIP_INT_MASK (1 << 0)
# define GRPH_PFLIP_INT_TYPE (1 << 8)
#define DACA_AUTODETECT_INT_CONTROL 0x66c8
#define DACB_AUTODETECT_INT_CONTROL 0x67c8
#define DC_HPD1_INT_STATUS 0x601c
#define DC_HPD2_INT_STATUS 0x6028
#define DC_HPD3_INT_STATUS 0x6034
#define DC_HPD4_INT_STATUS 0x6040
#define DC_HPD5_INT_STATUS 0x604c
#define DC_HPD6_INT_STATUS 0x6058
# define DC_HPDx_INT_STATUS (1 << 0)
# define DC_HPDx_SENSE (1 << 1)
# define DC_HPDx_RX_INT_STATUS (1 << 8)
#define DC_HPD1_INT_CONTROL 0x6020
#define DC_HPD2_INT_CONTROL 0x602c
#define DC_HPD3_INT_CONTROL 0x6038
#define DC_HPD4_INT_CONTROL 0x6044
#define DC_HPD5_INT_CONTROL 0x6050
#define DC_HPD6_INT_CONTROL 0x605c
# define DC_HPDx_INT_ACK (1 << 0)
# define DC_HPDx_INT_POLARITY (1 << 8)
# define DC_HPDx_INT_EN (1 << 16)
# define DC_HPDx_RX_INT_ACK (1 << 20)
# define DC_HPDx_RX_INT_EN (1 << 24)
#define DC_HPD1_CONTROL 0x6024
#define DC_HPD2_CONTROL 0x6030
#define DC_HPD3_CONTROL 0x603c
#define DC_HPD4_CONTROL 0x6048
#define DC_HPD5_CONTROL 0x6054
#define DC_HPD6_CONTROL 0x6060
# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
# define DC_HPDx_EN (1 << 28)
#endif

View File

@ -37,6 +37,7 @@
#include "rs100d.h"
#include "rv200d.h"
#include "rv250d.h"
#include "atom.h"
#include <linux/firmware.h>
#include <linux/platform_device.h>
@ -67,6 +68,264 @@ MODULE_FIRMWARE(FIRMWARE_R520);
* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
*/
void r100_pm_get_dynpm_state(struct radeon_device *rdev)
{
int i;
rdev->pm.dynpm_can_upclock = true;
rdev->pm.dynpm_can_downclock = true;
switch (rdev->pm.dynpm_planned_action) {
case DYNPM_ACTION_MINIMUM:
rdev->pm.requested_power_state_index = 0;
rdev->pm.dynpm_can_downclock = false;
break;
case DYNPM_ACTION_DOWNCLOCK:
if (rdev->pm.current_power_state_index == 0) {
rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
rdev->pm.dynpm_can_downclock = false;
} else {
if (rdev->pm.active_crtc_count > 1) {
for (i = 0; i < rdev->pm.num_power_states; i++) {
if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
continue;
else if (i >= rdev->pm.current_power_state_index) {
rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
break;
} else {
rdev->pm.requested_power_state_index = i;
break;
}
}
} else
rdev->pm.requested_power_state_index =
rdev->pm.current_power_state_index - 1;
}
/* don't use the power state if crtcs are active and no display flag is set */
if ((rdev->pm.active_crtc_count > 0) &&
(rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags &
RADEON_PM_MODE_NO_DISPLAY)) {
rdev->pm.requested_power_state_index++;
}
break;
case DYNPM_ACTION_UPCLOCK:
if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
rdev->pm.dynpm_can_upclock = false;
} else {
if (rdev->pm.active_crtc_count > 1) {
for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
continue;
else if (i <= rdev->pm.current_power_state_index) {
rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
break;
} else {
rdev->pm.requested_power_state_index = i;
break;
}
}
} else
rdev->pm.requested_power_state_index =
rdev->pm.current_power_state_index + 1;
}
break;
case DYNPM_ACTION_DEFAULT:
rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.dynpm_can_upclock = false;
break;
case DYNPM_ACTION_NONE:
default:
DRM_ERROR("Requested mode for not defined action\n");
return;
}
/* only one clock mode per power state */
rdev->pm.requested_clock_mode_index = 0;
DRM_DEBUG("Requested: e: %d m: %d p: %d\n",
rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].sclk,
rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].mclk,
rdev->pm.power_state[rdev->pm.requested_power_state_index].
pcie_lanes);
}
void r100_pm_init_profile(struct radeon_device *rdev)
{
/* default */
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
/* low sh */
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
/* low mh */
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
}
void r100_pm_misc(struct radeon_device *rdev)
{
int requested_index = rdev->pm.requested_power_state_index;
struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl;
if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
tmp = RREG32(voltage->gpio.reg);
if (voltage->active_high)
tmp |= voltage->gpio.mask;
else
tmp &= ~(voltage->gpio.mask);
WREG32(voltage->gpio.reg, tmp);
if (voltage->delay)
udelay(voltage->delay);
} else {
tmp = RREG32(voltage->gpio.reg);
if (voltage->active_high)
tmp &= ~voltage->gpio.mask;
else
tmp |= voltage->gpio.mask;
WREG32(voltage->gpio.reg, tmp);
if (voltage->delay)
udelay(voltage->delay);
}
}
sclk_cntl = RREG32_PLL(SCLK_CNTL);
sclk_cntl2 = RREG32_PLL(SCLK_CNTL2);
sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3);
sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL);
sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3);
if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
sclk_more_cntl |= REDUCED_SPEED_SCLK_EN;
if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE)
sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE;
else
sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE;
if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2)
sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0);
else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4)
sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2);
} else
sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN;
if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
sclk_more_cntl |= IO_CG_VOLTAGE_DROP;
if (voltage->delay) {
sclk_more_cntl |= VOLTAGE_DROP_SYNC;
switch (voltage->delay) {
case 33:
sclk_more_cntl |= VOLTAGE_DELAY_SEL(0);
break;
case 66:
sclk_more_cntl |= VOLTAGE_DELAY_SEL(1);
break;
case 99:
sclk_more_cntl |= VOLTAGE_DELAY_SEL(2);
break;
case 132:
sclk_more_cntl |= VOLTAGE_DELAY_SEL(3);
break;
}
} else
sclk_more_cntl &= ~VOLTAGE_DROP_SYNC;
} else
sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP;
if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
sclk_cntl &= ~FORCE_HDP;
else
sclk_cntl |= FORCE_HDP;
WREG32_PLL(SCLK_CNTL, sclk_cntl);
WREG32_PLL(SCLK_CNTL2, sclk_cntl2);
WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl);
/* set pcie lanes */
if ((rdev->flags & RADEON_IS_PCIE) &&
!(rdev->flags & RADEON_IS_IGP) &&
rdev->asic->set_pcie_lanes &&
(ps->pcie_lanes !=
rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
radeon_set_pcie_lanes(rdev,
ps->pcie_lanes);
DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
}
}
void r100_pm_prepare(struct radeon_device *rdev)
{
struct drm_device *ddev = rdev->ddev;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
u32 tmp;
/* disable any active CRTCs */
list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
radeon_crtc = to_radeon_crtc(crtc);
if (radeon_crtc->enabled) {
if (radeon_crtc->crtc_id) {
tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
tmp |= RADEON_CRTC2_DISP_REQ_EN_B;
WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
} else {
tmp = RREG32(RADEON_CRTC_GEN_CNTL);
tmp |= RADEON_CRTC_DISP_REQ_EN_B;
WREG32(RADEON_CRTC_GEN_CNTL, tmp);
}
}
}
}
void r100_pm_finish(struct radeon_device *rdev)
{
struct drm_device *ddev = rdev->ddev;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
u32 tmp;
/* enable any active CRTCs */
list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
radeon_crtc = to_radeon_crtc(crtc);
if (radeon_crtc->enabled) {
if (radeon_crtc->crtc_id) {
tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B;
WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
} else {
tmp = RREG32(RADEON_CRTC_GEN_CNTL);
tmp &= ~RADEON_CRTC_DISP_REQ_EN_B;
WREG32(RADEON_CRTC_GEN_CNTL, tmp);
}
}
}
}
bool r100_gui_idle(struct radeon_device *rdev)
{
if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
return false;
else
return true;
}
/* hpd for digital panel detect/disconnect */
bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
@ -254,6 +513,9 @@ int r100_irq_set(struct radeon_device *rdev)
if (rdev->irq.sw_int) {
tmp |= RADEON_SW_INT_ENABLE;
}
if (rdev->irq.gui_idle) {
tmp |= RADEON_GUI_IDLE_MASK;
}
if (rdev->irq.crtc_vblank_int[0]) {
tmp |= RADEON_CRTC_VBLANK_MASK;
}
@ -288,6 +550,12 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
/* the interrupt works, but the status bit is permanently asserted */
if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
if (!rdev->irq.gui_idle_acked)
irq_mask |= RADEON_GUI_IDLE_STAT;
}
if (irqs) {
WREG32(RADEON_GEN_INT_STATUS, irqs);
}
@ -299,6 +567,9 @@ int r100_irq_process(struct radeon_device *rdev)
uint32_t status, msi_rearm;
bool queue_hotplug = false;
/* reset gui idle ack. the status bit is broken */
rdev->irq.gui_idle_acked = false;
status = r100_irq_ack(rdev);
if (!status) {
return IRQ_NONE;
@ -311,6 +582,12 @@ int r100_irq_process(struct radeon_device *rdev)
if (status & RADEON_SW_INT_TEST) {
radeon_fence_process(rdev);
}
/* gui idle interrupt */
if (status & RADEON_GUI_IDLE_STAT) {
rdev->irq.gui_idle_acked = true;
rdev->pm.gui_idle = true;
wake_up(&rdev->irq.idle_queue);
}
/* Vertical blank interrupts */
if (status & RADEON_CRTC_VBLANK_STAT) {
drm_handle_vblank(rdev->ddev, 0);
@ -332,6 +609,8 @@ int r100_irq_process(struct radeon_device *rdev)
}
status = r100_irq_ack(rdev);
}
/* reset gui idle ack. the status bit is broken */
rdev->irq.gui_idle_acked = false;
if (queue_hotplug)
queue_work(rdev->wq, &rdev->hotplug_work);
if (rdev->msi_enabled) {
@ -663,26 +942,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
if (r100_debugfs_cp_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for CP !\n");
}
/* Reset CP */
tmp = RREG32(RADEON_CP_CSQ_STAT);
if ((tmp & (1 << 31))) {
DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp);
WREG32(RADEON_CP_CSQ_MODE, 0);
WREG32(RADEON_CP_CSQ_CNTL, 0);
WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
tmp = RREG32(RADEON_RBBM_SOFT_RESET);
mdelay(2);
WREG32(RADEON_RBBM_SOFT_RESET, 0);
tmp = RREG32(RADEON_RBBM_SOFT_RESET);
mdelay(2);
tmp = RREG32(RADEON_CP_CSQ_STAT);
if ((tmp & (1 << 31))) {
DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp);
}
} else {
DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
}
if (!rdev->me_fw) {
r = r100_cp_init_microcode(rdev);
if (r) {
@ -787,39 +1046,6 @@ void r100_cp_disable(struct radeon_device *rdev)
}
}
int r100_cp_reset(struct radeon_device *rdev)
{
uint32_t tmp;
bool reinit_cp;
int i;
reinit_cp = rdev->cp.ready;
rdev->cp.ready = false;
WREG32(RADEON_CP_CSQ_MODE, 0);
WREG32(RADEON_CP_CSQ_CNTL, 0);
WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
(void)RREG32(RADEON_RBBM_SOFT_RESET);
udelay(200);
WREG32(RADEON_RBBM_SOFT_RESET, 0);
/* Wait to prevent race in RBBM_STATUS */
mdelay(1);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(RADEON_RBBM_STATUS);
if (!(tmp & (1 << 16))) {
DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n",
tmp);
if (reinit_cp) {
return r100_cp_init(rdev, rdev->cp.ring_size);
}
return 0;
}
DRM_UDELAY(1);
}
tmp = RREG32(RADEON_RBBM_STATUS);
DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp);
return -1;
}
void r100_cp_commit(struct radeon_device *rdev)
{
WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
@ -1733,76 +1959,163 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
return -1;
}
void r100_gpu_init(struct radeon_device *rdev)
void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
{
/* TODO: anythings to do here ? pipes ? */
r100_hdp_reset(rdev);
lockup->last_cp_rptr = cp->rptr;
lockup->last_jiffies = jiffies;
}
void r100_hdp_reset(struct radeon_device *rdev)
/**
* r100_gpu_cp_is_lockup() - check if CP is lockup by recording information
* @rdev: radeon device structure
* @lockup: r100_gpu_lockup structure holding CP lockup tracking informations
* @cp: radeon_cp structure holding CP information
*
* We don't need to initialize the lockup tracking information as we will either
* have CP rptr to a different value of jiffies wrap around which will force
* initialization of the lockup tracking informations.
*
* A possible false positivie is if we get call after while and last_cp_rptr ==
* the current CP rptr, even if it's unlikely it might happen. To avoid this
* if the elapsed time since last call is bigger than 2 second than we return
* false and update the tracking information. Due to this the caller must call
* r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported
* the fencing code should be cautious about that.
*
* Caller should write to the ring to force CP to do something so we don't get
* false positive when CP is just gived nothing to do.
*
**/
bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
{
uint32_t tmp;
unsigned long cjiffies, elapsed;
tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
tmp |= (7 << 28);
WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
(void)RREG32(RADEON_HOST_PATH_CNTL);
udelay(200);
WREG32(RADEON_RBBM_SOFT_RESET, 0);
WREG32(RADEON_HOST_PATH_CNTL, tmp);
(void)RREG32(RADEON_HOST_PATH_CNTL);
cjiffies = jiffies;
if (!time_after(cjiffies, lockup->last_jiffies)) {
/* likely a wrap around */
lockup->last_cp_rptr = cp->rptr;
lockup->last_jiffies = jiffies;
return false;
}
if (cp->rptr != lockup->last_cp_rptr) {
/* CP is still working no lockup */
lockup->last_cp_rptr = cp->rptr;
lockup->last_jiffies = jiffies;
return false;
}
elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
if (elapsed >= 3000) {
/* very likely the improbable case where current
* rptr is equal to last recorded, a while ago, rptr
* this is more likely a false positive update tracking
* information which should force us to be recall at
* latter point
*/
lockup->last_cp_rptr = cp->rptr;
lockup->last_jiffies = jiffies;
return false;
}
if (elapsed >= 1000) {
dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
return true;
}
/* give a chance to the GPU ... */
return false;
}
int r100_rb2d_reset(struct radeon_device *rdev)
bool r100_gpu_is_lockup(struct radeon_device *rdev)
{
uint32_t tmp;
int i;
u32 rbbm_status;
int r;
WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2);
(void)RREG32(RADEON_RBBM_SOFT_RESET);
udelay(200);
WREG32(RADEON_RBBM_SOFT_RESET, 0);
/* Wait to prevent race in RBBM_STATUS */
rbbm_status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp);
return false;
}
/* force CP activities */
r = radeon_ring_lock(rdev, 2);
if (!r) {
/* PACKET2 NOP */
radeon_ring_write(rdev, 0x80000000);
radeon_ring_write(rdev, 0x80000000);
radeon_ring_unlock_commit(rdev);
}
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp);
}
void r100_bm_disable(struct radeon_device *rdev)
{
u32 tmp;
/* disable bus mastering */
tmp = RREG32(R_000030_BUS_CNTL);
WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
mdelay(1);
WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
mdelay(1);
WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
tmp = RREG32(RADEON_BUS_CNTL);
mdelay(1);
pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
mdelay(1);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(RADEON_RBBM_STATUS);
if (!(tmp & (1 << 26))) {
DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n",
tmp);
return 0;
}
DRM_UDELAY(1);
}
tmp = RREG32(RADEON_RBBM_STATUS);
DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp);
return -1;
}
int r100_gpu_reset(struct radeon_device *rdev)
int r100_asic_reset(struct radeon_device *rdev)
{
uint32_t status;
struct r100_mc_save save;
u32 status, tmp;
/* reset order likely matter */
status = RREG32(RADEON_RBBM_STATUS);
/* reset HDP */
r100_hdp_reset(rdev);
/* reset rb2d */
if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
r100_rb2d_reset(rdev);
r100_mc_stop(rdev, &save);
status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(status)) {
return 0;
}
/* TODO: reset 3D engine */
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* stop CP */
WREG32(RADEON_CP_CSQ_CNTL, 0);
tmp = RREG32(RADEON_CP_RB_CNTL);
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
WREG32(RADEON_CP_RB_RPTR_WR, 0);
WREG32(RADEON_CP_RB_WPTR, 0);
WREG32(RADEON_CP_RB_CNTL, tmp);
/* save PCI state */
pci_save_state(rdev->pdev);
/* disable bus mastering */
r100_bm_disable(rdev);
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
S_0000F0_SOFT_RESET_RE(1) |
S_0000F0_SOFT_RESET_PP(1) |
S_0000F0_SOFT_RESET_RB(1));
RREG32(R_0000F0_RBBM_SOFT_RESET);
mdelay(500);
WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
mdelay(1);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* reset CP */
status = RREG32(RADEON_RBBM_STATUS);
if (status & (1 << 16)) {
r100_cp_reset(rdev);
}
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
RREG32(R_0000F0_RBBM_SOFT_RESET);
mdelay(500);
WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
mdelay(1);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* restore PCI & busmastering */
pci_restore_state(rdev->pdev);
r100_enable_bm(rdev);
/* Check if GPU is idle */
status = RREG32(RADEON_RBBM_STATUS);
if (status & RADEON_RBBM_ACTIVE) {
DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
rdev->gpu_lockup = true;
return -1;
}
DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
r100_mc_resume(rdev, &save);
dev_info(rdev->dev, "GPU reset succeed\n");
return 0;
}
@ -2002,11 +2315,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
else
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
}
/* FIXME remove this once we support unmappable VRAM */
if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
rdev->mc.mc_vram_size = rdev->mc.aper_size;
rdev->mc.real_vram_size = rdev->mc.aper_size;
}
}
void r100_vga_set_state(struct radeon_device *rdev, bool state)
@ -2335,53 +2643,53 @@ void r100_bandwidth_update(struct radeon_device *rdev)
fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
fixed20_12 memtcas_ff[8] = {
fixed_init(1),
fixed_init(2),
fixed_init(3),
fixed_init(0),
fixed_init_half(1),
fixed_init_half(2),
fixed_init(0),
dfixed_init(1),
dfixed_init(2),
dfixed_init(3),
dfixed_init(0),
dfixed_init_half(1),
dfixed_init_half(2),
dfixed_init(0),
};
fixed20_12 memtcas_rs480_ff[8] = {
fixed_init(0),
fixed_init(1),
fixed_init(2),
fixed_init(3),
fixed_init(0),
fixed_init_half(1),
fixed_init_half(2),
fixed_init_half(3),
dfixed_init(0),
dfixed_init(1),
dfixed_init(2),
dfixed_init(3),
dfixed_init(0),
dfixed_init_half(1),
dfixed_init_half(2),
dfixed_init_half(3),
};
fixed20_12 memtcas2_ff[8] = {
fixed_init(0),
fixed_init(1),
fixed_init(2),
fixed_init(3),
fixed_init(4),
fixed_init(5),
fixed_init(6),
fixed_init(7),
dfixed_init(0),
dfixed_init(1),
dfixed_init(2),
dfixed_init(3),
dfixed_init(4),
dfixed_init(5),
dfixed_init(6),
dfixed_init(7),
};
fixed20_12 memtrbs[8] = {
fixed_init(1),
fixed_init_half(1),
fixed_init(2),
fixed_init_half(2),
fixed_init(3),
fixed_init_half(3),
fixed_init(4),
fixed_init_half(4)
dfixed_init(1),
dfixed_init_half(1),
dfixed_init(2),
dfixed_init_half(2),
dfixed_init(3),
dfixed_init_half(3),
dfixed_init(4),
dfixed_init_half(4)
};
fixed20_12 memtrbs_r4xx[8] = {
fixed_init(4),
fixed_init(5),
fixed_init(6),
fixed_init(7),
fixed_init(8),
fixed_init(9),
fixed_init(10),
fixed_init(11)
dfixed_init(4),
dfixed_init(5),
dfixed_init(6),
dfixed_init(7),
dfixed_init(8),
dfixed_init(9),
dfixed_init(10),
dfixed_init(11)
};
fixed20_12 min_mem_eff;
fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
@ -2412,7 +2720,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
}
}
min_mem_eff.full = rfixed_const_8(0);
min_mem_eff.full = dfixed_const_8(0);
/* get modes */
if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
@ -2433,28 +2741,28 @@ void r100_bandwidth_update(struct radeon_device *rdev)
mclk_ff = rdev->pm.mclk;
temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
temp_ff.full = rfixed_const(temp);
mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
temp_ff.full = dfixed_const(temp);
mem_bw.full = dfixed_mul(mclk_ff, temp_ff);
pix_clk.full = 0;
pix_clk2.full = 0;
peak_disp_bw.full = 0;
if (mode1) {
temp_ff.full = rfixed_const(1000);
pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
pix_clk.full = rfixed_div(pix_clk, temp_ff);
temp_ff.full = rfixed_const(pixel_bytes1);
peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
temp_ff.full = dfixed_const(1000);
pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */
pix_clk.full = dfixed_div(pix_clk, temp_ff);
temp_ff.full = dfixed_const(pixel_bytes1);
peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff);
}
if (mode2) {
temp_ff.full = rfixed_const(1000);
pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
temp_ff.full = rfixed_const(pixel_bytes2);
peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
temp_ff.full = dfixed_const(1000);
pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */
pix_clk2.full = dfixed_div(pix_clk2, temp_ff);
temp_ff.full = dfixed_const(pixel_bytes2);
peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff);
}
mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
mem_bw.full = dfixed_mul(mem_bw, min_mem_eff);
if (peak_disp_bw.full >= mem_bw.full) {
DRM_ERROR("You may not have enough display bandwidth for current mode\n"
"If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
@ -2496,9 +2804,9 @@ void r100_bandwidth_update(struct radeon_device *rdev)
mem_tras = ((temp >> 12) & 0xf) + 4;
}
/* convert to FF */
trcd_ff.full = rfixed_const(mem_trcd);
trp_ff.full = rfixed_const(mem_trp);
tras_ff.full = rfixed_const(mem_tras);
trcd_ff.full = dfixed_const(mem_trcd);
trp_ff.full = dfixed_const(mem_trp);
tras_ff.full = dfixed_const(mem_tras);
/* Get values from the MEM_SDRAM_MODE_REG register...converting its */
temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
@ -2516,7 +2824,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
/* extra cas latency stored in bits 23-25 0-4 clocks */
data = (temp >> 23) & 0x7;
if (data < 5)
tcas_ff.full += rfixed_const(data);
tcas_ff.full += dfixed_const(data);
}
if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
@ -2553,72 +2861,72 @@ void r100_bandwidth_update(struct radeon_device *rdev)
if (rdev->flags & RADEON_IS_AGP) {
fixed20_12 agpmode_ff;
agpmode_ff.full = rfixed_const(radeon_agpmode);
temp_ff.full = rfixed_const_666(16);
sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff);
agpmode_ff.full = dfixed_const(radeon_agpmode);
temp_ff.full = dfixed_const_666(16);
sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff);
}
/* TODO PCIE lanes may affect this - agpmode == 16?? */
if (ASIC_IS_R300(rdev)) {
sclk_delay_ff.full = rfixed_const(250);
sclk_delay_ff.full = dfixed_const(250);
} else {
if ((rdev->family == CHIP_RV100) ||
rdev->flags & RADEON_IS_IGP) {
if (rdev->mc.vram_is_ddr)
sclk_delay_ff.full = rfixed_const(41);
sclk_delay_ff.full = dfixed_const(41);
else
sclk_delay_ff.full = rfixed_const(33);
sclk_delay_ff.full = dfixed_const(33);
} else {
if (rdev->mc.vram_width == 128)
sclk_delay_ff.full = rfixed_const(57);
sclk_delay_ff.full = dfixed_const(57);
else
sclk_delay_ff.full = rfixed_const(41);
sclk_delay_ff.full = dfixed_const(41);
}
}
mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff);
mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff);
if (rdev->mc.vram_is_ddr) {
if (rdev->mc.vram_width == 32) {
k1.full = rfixed_const(40);
k1.full = dfixed_const(40);
c = 3;
} else {
k1.full = rfixed_const(20);
k1.full = dfixed_const(20);
c = 1;
}
} else {
k1.full = rfixed_const(40);
k1.full = dfixed_const(40);
c = 3;
}
temp_ff.full = rfixed_const(2);
mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff);
temp_ff.full = rfixed_const(c);
mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff);
temp_ff.full = rfixed_const(4);
mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff);
mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff);
temp_ff.full = dfixed_const(2);
mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff);
temp_ff.full = dfixed_const(c);
mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff);
temp_ff.full = dfixed_const(4);
mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff);
mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff);
mc_latency_mclk.full += k1.full;
mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff);
mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff);
mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff);
mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff);
/*
HW cursor time assuming worst case of full size colour cursor.
*/
temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
temp_ff.full += trcd_ff.full;
if (temp_ff.full < tras_ff.full)
temp_ff.full = tras_ff.full;
cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff);
cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff);
temp_ff.full = rfixed_const(cur_size);
cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff);
temp_ff.full = dfixed_const(cur_size);
cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff);
/*
Find the total latency for the display data.
*/
disp_latency_overhead.full = rfixed_const(8);
disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff);
disp_latency_overhead.full = dfixed_const(8);
disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff);
mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
@ -2646,16 +2954,16 @@ void r100_bandwidth_update(struct radeon_device *rdev)
/*
Find the drain rate of the display buffer.
*/
temp_ff.full = rfixed_const((16/pixel_bytes1));
disp_drain_rate.full = rfixed_div(pix_clk, temp_ff);
temp_ff.full = dfixed_const((16/pixel_bytes1));
disp_drain_rate.full = dfixed_div(pix_clk, temp_ff);
/*
Find the critical point of the display buffer.
*/
crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency);
crit_point_ff.full += rfixed_const_half(0);
crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency);
crit_point_ff.full += dfixed_const_half(0);
critical_point = rfixed_trunc(crit_point_ff);
critical_point = dfixed_trunc(crit_point_ff);
if (rdev->disp_priority == 2) {
critical_point = 0;
@ -2726,8 +3034,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
/*
Find the drain rate of the display buffer.
*/
temp_ff.full = rfixed_const((16/pixel_bytes2));
disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff);
temp_ff.full = dfixed_const((16/pixel_bytes2));
disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff);
grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
@ -2748,8 +3056,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
critical_point2 = 0;
else {
temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
temp_ff.full = rfixed_const(temp);
temp_ff.full = rfixed_mul(mclk_ff, temp_ff);
temp_ff.full = dfixed_const(temp);
temp_ff.full = dfixed_mul(mclk_ff, temp_ff);
if (sclk_ff.full < temp_ff.full)
temp_ff.full = sclk_ff.full;
@ -2757,15 +3065,15 @@ void r100_bandwidth_update(struct radeon_device *rdev)
if (mode1) {
temp_ff.full = read_return_rate.full - disp_drain_rate.full;
time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff);
time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff);
} else {
time_disp1_drop_priority.full = 0;
}
crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2);
crit_point_ff.full += rfixed_const_half(0);
crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2);
crit_point_ff.full += dfixed_const_half(0);
critical_point2 = rfixed_trunc(crit_point_ff);
critical_point2 = dfixed_trunc(crit_point_ff);
if (rdev->disp_priority == 2) {
critical_point2 = 0;
@ -3399,7 +3707,7 @@ static int r100_startup(struct radeon_device *rdev)
/* Resume clock */
r100_clock_startup(rdev);
/* Initialize GPU configuration (# pipes, ...) */
r100_gpu_init(rdev);
// r100_gpu_init(rdev);
/* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */
r100_enable_bm(rdev);
@ -3436,7 +3744,7 @@ int r100_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
r100_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@ -3462,7 +3770,6 @@ int r100_suspend(struct radeon_device *rdev)
void r100_fini(struct radeon_device *rdev)
{
radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@ -3505,7 +3812,7 @@ int r100_init(struct radeon_device *rdev)
return r;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@ -3518,8 +3825,6 @@ int r100_init(struct radeon_device *rdev)
r100_errata(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);

View File

@ -74,6 +74,134 @@
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
/* Registers */
#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
#define S_0000F0_SOFT_RESET_SE(x) (((x) & 0x1) << 2)
#define G_0000F0_SOFT_RESET_SE(x) (((x) >> 2) & 0x1)
#define C_0000F0_SOFT_RESET_SE 0xFFFFFFFB
#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
#define R_000030_BUS_CNTL 0x000030
#define S_000030_BUS_DBL_RESYNC(x) (((x) & 0x1) << 0)
#define G_000030_BUS_DBL_RESYNC(x) (((x) >> 0) & 0x1)
#define C_000030_BUS_DBL_RESYNC 0xFFFFFFFE
#define S_000030_BUS_MSTR_RESET(x) (((x) & 0x1) << 1)
#define G_000030_BUS_MSTR_RESET(x) (((x) >> 1) & 0x1)
#define C_000030_BUS_MSTR_RESET 0xFFFFFFFD
#define S_000030_BUS_FLUSH_BUF(x) (((x) & 0x1) << 2)
#define G_000030_BUS_FLUSH_BUF(x) (((x) >> 2) & 0x1)
#define C_000030_BUS_FLUSH_BUF 0xFFFFFFFB
#define S_000030_BUS_STOP_REQ_DIS(x) (((x) & 0x1) << 3)
#define G_000030_BUS_STOP_REQ_DIS(x) (((x) >> 3) & 0x1)
#define C_000030_BUS_STOP_REQ_DIS 0xFFFFFFF7
#define S_000030_BUS_PM4_READ_COMBINE_EN(x) (((x) & 0x1) << 4)
#define G_000030_BUS_PM4_READ_COMBINE_EN(x) (((x) >> 4) & 0x1)
#define C_000030_BUS_PM4_READ_COMBINE_EN 0xFFFFFFEF
#define S_000030_BUS_WRT_COMBINE_EN(x) (((x) & 0x1) << 5)
#define G_000030_BUS_WRT_COMBINE_EN(x) (((x) >> 5) & 0x1)
#define C_000030_BUS_WRT_COMBINE_EN 0xFFFFFFDF
#define S_000030_BUS_MASTER_DIS(x) (((x) & 0x1) << 6)
#define G_000030_BUS_MASTER_DIS(x) (((x) >> 6) & 0x1)
#define C_000030_BUS_MASTER_DIS 0xFFFFFFBF
#define S_000030_BIOS_ROM_WRT_EN(x) (((x) & 0x1) << 7)
#define G_000030_BIOS_ROM_WRT_EN(x) (((x) >> 7) & 0x1)
#define C_000030_BIOS_ROM_WRT_EN 0xFFFFFF7F
#define S_000030_BM_DAC_CRIPPLE(x) (((x) & 0x1) << 8)
#define G_000030_BM_DAC_CRIPPLE(x) (((x) >> 8) & 0x1)
#define C_000030_BM_DAC_CRIPPLE 0xFFFFFEFF
#define S_000030_BUS_NON_PM4_READ_COMBINE_EN(x) (((x) & 0x1) << 9)
#define G_000030_BUS_NON_PM4_READ_COMBINE_EN(x) (((x) >> 9) & 0x1)
#define C_000030_BUS_NON_PM4_READ_COMBINE_EN 0xFFFFFDFF
#define S_000030_BUS_XFERD_DISCARD_EN(x) (((x) & 0x1) << 10)
#define G_000030_BUS_XFERD_DISCARD_EN(x) (((x) >> 10) & 0x1)
#define C_000030_BUS_XFERD_DISCARD_EN 0xFFFFFBFF
#define S_000030_BUS_SGL_READ_DISABLE(x) (((x) & 0x1) << 11)
#define G_000030_BUS_SGL_READ_DISABLE(x) (((x) >> 11) & 0x1)
#define C_000030_BUS_SGL_READ_DISABLE 0xFFFFF7FF
#define S_000030_BIOS_DIS_ROM(x) (((x) & 0x1) << 12)
#define G_000030_BIOS_DIS_ROM(x) (((x) >> 12) & 0x1)
#define C_000030_BIOS_DIS_ROM 0xFFFFEFFF
#define S_000030_BUS_PCI_READ_RETRY_EN(x) (((x) & 0x1) << 13)
#define G_000030_BUS_PCI_READ_RETRY_EN(x) (((x) >> 13) & 0x1)
#define C_000030_BUS_PCI_READ_RETRY_EN 0xFFFFDFFF
#define S_000030_BUS_AGP_AD_STEPPING_EN(x) (((x) & 0x1) << 14)
#define G_000030_BUS_AGP_AD_STEPPING_EN(x) (((x) >> 14) & 0x1)
#define C_000030_BUS_AGP_AD_STEPPING_EN 0xFFFFBFFF
#define S_000030_BUS_PCI_WRT_RETRY_EN(x) (((x) & 0x1) << 15)
#define G_000030_BUS_PCI_WRT_RETRY_EN(x) (((x) >> 15) & 0x1)
#define C_000030_BUS_PCI_WRT_RETRY_EN 0xFFFF7FFF
#define S_000030_BUS_RETRY_WS(x) (((x) & 0xF) << 16)
#define G_000030_BUS_RETRY_WS(x) (((x) >> 16) & 0xF)
#define C_000030_BUS_RETRY_WS 0xFFF0FFFF
#define S_000030_BUS_MSTR_RD_MULT(x) (((x) & 0x1) << 20)
#define G_000030_BUS_MSTR_RD_MULT(x) (((x) >> 20) & 0x1)
#define C_000030_BUS_MSTR_RD_MULT 0xFFEFFFFF
#define S_000030_BUS_MSTR_RD_LINE(x) (((x) & 0x1) << 21)
#define G_000030_BUS_MSTR_RD_LINE(x) (((x) >> 21) & 0x1)
#define C_000030_BUS_MSTR_RD_LINE 0xFFDFFFFF
#define S_000030_BUS_SUSPEND(x) (((x) & 0x1) << 22)
#define G_000030_BUS_SUSPEND(x) (((x) >> 22) & 0x1)
#define C_000030_BUS_SUSPEND 0xFFBFFFFF
#define S_000030_LAT_16X(x) (((x) & 0x1) << 23)
#define G_000030_LAT_16X(x) (((x) >> 23) & 0x1)
#define C_000030_LAT_16X 0xFF7FFFFF
#define S_000030_BUS_RD_DISCARD_EN(x) (((x) & 0x1) << 24)
#define G_000030_BUS_RD_DISCARD_EN(x) (((x) >> 24) & 0x1)
#define C_000030_BUS_RD_DISCARD_EN 0xFEFFFFFF
#define S_000030_ENFRCWRDY(x) (((x) & 0x1) << 25)
#define G_000030_ENFRCWRDY(x) (((x) >> 25) & 0x1)
#define C_000030_ENFRCWRDY 0xFDFFFFFF
#define S_000030_BUS_MSTR_WS(x) (((x) & 0x1) << 26)
#define G_000030_BUS_MSTR_WS(x) (((x) >> 26) & 0x1)
#define C_000030_BUS_MSTR_WS 0xFBFFFFFF
#define S_000030_BUS_PARKING_DIS(x) (((x) & 0x1) << 27)
#define G_000030_BUS_PARKING_DIS(x) (((x) >> 27) & 0x1)
#define C_000030_BUS_PARKING_DIS 0xF7FFFFFF
#define S_000030_BUS_MSTR_DISCONNECT_EN(x) (((x) & 0x1) << 28)
#define G_000030_BUS_MSTR_DISCONNECT_EN(x) (((x) >> 28) & 0x1)
#define C_000030_BUS_MSTR_DISCONNECT_EN 0xEFFFFFFF
#define S_000030_SERR_EN(x) (((x) & 0x1) << 29)
#define G_000030_SERR_EN(x) (((x) >> 29) & 0x1)
#define C_000030_SERR_EN 0xDFFFFFFF
#define S_000030_BUS_READ_BURST(x) (((x) & 0x1) << 30)
#define G_000030_BUS_READ_BURST(x) (((x) >> 30) & 0x1)
#define C_000030_BUS_READ_BURST 0xBFFFFFFF
#define S_000030_BUS_RDY_READ_DLY(x) (((x) & 0x1) << 31)
#define G_000030_BUS_RDY_READ_DLY(x) (((x) >> 31) & 0x1)
#define C_000030_BUS_RDY_READ_DLY 0x7FFFFFFF
#define R_000040_GEN_INT_CNTL 0x000040
#define S_000040_CRTC_VBLANK(x) (((x) & 0x1) << 0)
#define G_000040_CRTC_VBLANK(x) (((x) >> 0) & 0x1)
@ -710,5 +838,41 @@
#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1)
#define C_00000D_FORCE_RB 0xEFFFFFFF
/* PLL regs */
#define SCLK_CNTL 0xd
#define FORCE_HDP (1 << 17)
#define CLK_PWRMGT_CNTL 0x14
#define GLOBAL_PMAN_EN (1 << 10)
#define DISP_PM (1 << 20)
#define PLL_PWRMGT_CNTL 0x15
#define MPLL_TURNOFF (1 << 0)
#define SPLL_TURNOFF (1 << 1)
#define PPLL_TURNOFF (1 << 2)
#define P2PLL_TURNOFF (1 << 3)
#define TVPLL_TURNOFF (1 << 4)
#define MOBILE_SU (1 << 16)
#define SU_SCLK_USE_BCLK (1 << 17)
#define SCLK_CNTL2 0x1e
#define REDUCED_SPEED_SCLK_MODE (1 << 16)
#define REDUCED_SPEED_SCLK_SEL(x) ((x) << 17)
#define MCLK_MISC 0x1f
#define EN_MCLK_TRISTATE_IN_SUSPEND (1 << 18)
#define SCLK_MORE_CNTL 0x35
#define REDUCED_SPEED_SCLK_EN (1 << 16)
#define IO_CG_VOLTAGE_DROP (1 << 17)
#define VOLTAGE_DELAY_SEL(x) ((x) << 20)
#define VOLTAGE_DROP_SYNC (1 << 19)
/* mmreg */
#define DISP_PWR_MAN 0xd08
#define DISP_D3_GRPH_RST (1 << 18)
#define DISP_D3_SUBPIC_RST (1 << 19)
#define DISP_D3_OV0_RST (1 << 20)
#define DISP_D1D2_GRPH_RST (1 << 21)
#define DISP_D1D2_SUBPIC_RST (1 << 22)
#define DISP_D1D2_OV0_RST (1 << 23)
#define DISP_DVO_ENABLE_RST (1 << 24)
#define TV_ENABLE_RST (1 << 25)
#define AUTO_PWRUP_EN (1 << 26)
#endif

View File

@ -27,8 +27,9 @@
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
#include "drmP.h"
#include "drm.h"
#include <drm/drmP.h>
#include <drm/drm.h>
#include <drm/drm_crtc_helper.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
@ -151,6 +152,10 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev)
u32 tmp;
int r;
WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
@ -323,7 +328,6 @@ void r300_gpu_init(struct radeon_device *rdev)
{
uint32_t gb_tile_config, tmp;
r100_hdp_reset(rdev);
if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
(rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) {
/* r300,r350 */
@ -375,89 +379,85 @@ void r300_gpu_init(struct radeon_device *rdev)
rdev->num_gb_pipes, rdev->num_z_pipes);
}
int r300_ga_reset(struct radeon_device *rdev)
bool r300_gpu_is_lockup(struct radeon_device *rdev)
{
uint32_t tmp;
bool reinit_cp;
int i;
u32 rbbm_status;
int r;
reinit_cp = rdev->cp.ready;
rdev->cp.ready = false;
for (i = 0; i < rdev->usec_timeout; i++) {
WREG32(RADEON_CP_CSQ_MODE, 0);
WREG32(RADEON_CP_CSQ_CNTL, 0);
WREG32(RADEON_RBBM_SOFT_RESET, 0x32005);
(void)RREG32(RADEON_RBBM_SOFT_RESET);
udelay(200);
WREG32(RADEON_RBBM_SOFT_RESET, 0);
/* Wait to prevent race in RBBM_STATUS */
mdelay(1);
tmp = RREG32(RADEON_RBBM_STATUS);
if (tmp & ((1 << 20) | (1 << 26))) {
DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp);
/* GA still busy soft reset it */
WREG32(0x429C, 0x200);
WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
WREG32(R300_RE_SCISSORS_TL, 0);
WREG32(R300_RE_SCISSORS_BR, 0);
WREG32(0x24AC, 0);
}
/* Wait to prevent race in RBBM_STATUS */
mdelay(1);
tmp = RREG32(RADEON_RBBM_STATUS);
if (!(tmp & ((1 << 20) | (1 << 26)))) {
break;
}
rbbm_status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
return false;
}
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(RADEON_RBBM_STATUS);
if (!(tmp & ((1 << 20) | (1 << 26)))) {
DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
tmp);
if (reinit_cp) {
return r100_cp_init(rdev, rdev->cp.ring_size);
}
return 0;
}
DRM_UDELAY(1);
/* force CP activities */
r = radeon_ring_lock(rdev, 2);
if (!r) {
/* PACKET2 NOP */
radeon_ring_write(rdev, 0x80000000);
radeon_ring_write(rdev, 0x80000000);
radeon_ring_unlock_commit(rdev);
}
tmp = RREG32(RADEON_RBBM_STATUS);
DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
return -1;
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
}
int r300_gpu_reset(struct radeon_device *rdev)
int r300_asic_reset(struct radeon_device *rdev)
{
uint32_t status;
struct r100_mc_save save;
u32 status, tmp;
/* reset order likely matter */
status = RREG32(RADEON_RBBM_STATUS);
/* reset HDP */
r100_hdp_reset(rdev);
/* reset rb2d */
if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
r100_rb2d_reset(rdev);
}
/* reset GA */
if (status & ((1 << 20) | (1 << 26))) {
r300_ga_reset(rdev);
}
/* reset CP */
status = RREG32(RADEON_RBBM_STATUS);
if (status & (1 << 16)) {
r100_cp_reset(rdev);
r100_mc_stop(rdev, &save);
status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(status)) {
return 0;
}
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* stop CP */
WREG32(RADEON_CP_CSQ_CNTL, 0);
tmp = RREG32(RADEON_CP_RB_CNTL);
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
WREG32(RADEON_CP_RB_RPTR_WR, 0);
WREG32(RADEON_CP_RB_WPTR, 0);
WREG32(RADEON_CP_RB_CNTL, tmp);
/* save PCI state */
pci_save_state(rdev->pdev);
/* disable bus mastering */
r100_bm_disable(rdev);
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
S_0000F0_SOFT_RESET_GA(1));
RREG32(R_0000F0_RBBM_SOFT_RESET);
mdelay(500);
WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
mdelay(1);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* resetting the CP seems to be problematic sometimes it end up
* hard locking the computer, but it's necessary for successfull
* reset more test & playing is needed on R3XX/R4XX to find a
* reliable (if any solution)
*/
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
RREG32(R_0000F0_RBBM_SOFT_RESET);
mdelay(500);
WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
mdelay(1);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* restore PCI & busmastering */
pci_restore_state(rdev->pdev);
r100_enable_bm(rdev);
/* Check if GPU is idle */
status = RREG32(RADEON_RBBM_STATUS);
if (status & RADEON_RBBM_ACTIVE) {
DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
rdev->gpu_lockup = true;
return -1;
}
DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
r100_mc_resume(rdev, &save);
dev_info(rdev->dev, "GPU reset succeed\n");
return 0;
}
/*
* r300,r350,rv350,rv380 VRAM info
*/
@ -1316,7 +1316,7 @@ int r300_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
r300_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@ -1344,7 +1344,6 @@ int r300_suspend(struct radeon_device *rdev)
void r300_fini(struct radeon_device *rdev)
{
radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@ -1387,7 +1386,7 @@ int r300_init(struct radeon_device *rdev)
return r;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@ -1400,8 +1399,6 @@ int r300_init(struct radeon_device *rdev)
r300_errata(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);

View File

@ -209,7 +209,52 @@
#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
#define G_0000F0_SOFT_RESET_CP(x) (((x) >> 0) & 0x1)
#define C_0000F0_SOFT_RESET_CP 0xFFFFFFFE
#define S_0000F0_SOFT_RESET_HI(x) (((x) & 0x1) << 1)
#define G_0000F0_SOFT_RESET_HI(x) (((x) >> 1) & 0x1)
#define C_0000F0_SOFT_RESET_HI 0xFFFFFFFD
#define S_0000F0_SOFT_RESET_VAP(x) (((x) & 0x1) << 2)
#define G_0000F0_SOFT_RESET_VAP(x) (((x) >> 2) & 0x1)
#define C_0000F0_SOFT_RESET_VAP 0xFFFFFFFB
#define S_0000F0_SOFT_RESET_RE(x) (((x) & 0x1) << 3)
#define G_0000F0_SOFT_RESET_RE(x) (((x) >> 3) & 0x1)
#define C_0000F0_SOFT_RESET_RE 0xFFFFFFF7
#define S_0000F0_SOFT_RESET_PP(x) (((x) & 0x1) << 4)
#define G_0000F0_SOFT_RESET_PP(x) (((x) >> 4) & 0x1)
#define C_0000F0_SOFT_RESET_PP 0xFFFFFFEF
#define S_0000F0_SOFT_RESET_E2(x) (((x) & 0x1) << 5)
#define G_0000F0_SOFT_RESET_E2(x) (((x) >> 5) & 0x1)
#define C_0000F0_SOFT_RESET_E2 0xFFFFFFDF
#define S_0000F0_SOFT_RESET_RB(x) (((x) & 0x1) << 6)
#define G_0000F0_SOFT_RESET_RB(x) (((x) >> 6) & 0x1)
#define C_0000F0_SOFT_RESET_RB 0xFFFFFFBF
#define S_0000F0_SOFT_RESET_HDP(x) (((x) & 0x1) << 7)
#define G_0000F0_SOFT_RESET_HDP(x) (((x) >> 7) & 0x1)
#define C_0000F0_SOFT_RESET_HDP 0xFFFFFF7F
#define S_0000F0_SOFT_RESET_MC(x) (((x) & 0x1) << 8)
#define G_0000F0_SOFT_RESET_MC(x) (((x) >> 8) & 0x1)
#define C_0000F0_SOFT_RESET_MC 0xFFFFFEFF
#define S_0000F0_SOFT_RESET_AIC(x) (((x) & 0x1) << 9)
#define G_0000F0_SOFT_RESET_AIC(x) (((x) >> 9) & 0x1)
#define C_0000F0_SOFT_RESET_AIC 0xFFFFFDFF
#define S_0000F0_SOFT_RESET_VIP(x) (((x) & 0x1) << 10)
#define G_0000F0_SOFT_RESET_VIP(x) (((x) >> 10) & 0x1)
#define C_0000F0_SOFT_RESET_VIP 0xFFFFFBFF
#define S_0000F0_SOFT_RESET_DISP(x) (((x) & 0x1) << 11)
#define G_0000F0_SOFT_RESET_DISP(x) (((x) >> 11) & 0x1)
#define C_0000F0_SOFT_RESET_DISP 0xFFFFF7FF
#define S_0000F0_SOFT_RESET_CG(x) (((x) & 0x1) << 12)
#define G_0000F0_SOFT_RESET_CG(x) (((x) >> 12) & 0x1)
#define C_0000F0_SOFT_RESET_CG 0xFFFFEFFF
#define S_0000F0_SOFT_RESET_GA(x) (((x) & 0x1) << 13)
#define G_0000F0_SOFT_RESET_GA(x) (((x) >> 13) & 0x1)
#define C_0000F0_SOFT_RESET_GA 0xFFFFDFFF
#define S_0000F0_SOFT_RESET_IDCT(x) (((x) & 0x1) << 14)
#define G_0000F0_SOFT_RESET_IDCT(x) (((x) >> 14) & 0x1)
#define C_0000F0_SOFT_RESET_IDCT 0xFFFFBFFF
#define R_00000D_SCLK_CNTL 0x00000D
#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0)

View File

@ -36,6 +36,35 @@
#include "r420d.h"
#include "r420_reg_safe.h"
void r420_pm_init_profile(struct radeon_device *rdev)
{
/* default */
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
/* low sh */
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
/* low mh */
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
}
static void r420_set_reg_safe(struct radeon_device *rdev)
{
rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
@ -241,7 +270,7 @@ int r420_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
r420_clock_resume(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@ -274,7 +303,6 @@ int r420_suspend(struct radeon_device *rdev)
void r420_fini(struct radeon_device *rdev)
{
radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@ -322,7 +350,7 @@ int r420_init(struct radeon_device *rdev)
}
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@ -334,8 +362,6 @@ int r420_init(struct radeon_device *rdev)
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);

View File

@ -347,9 +347,11 @@
#define AVIVO_D1CRTC_CONTROL 0x6080
# define AVIVO_CRTC_EN (1 << 0)
# define AVIVO_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
#define AVIVO_D1CRTC_BLANK_CONTROL 0x6084
#define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088
#define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c
#define AVIVO_D1CRTC_STATUS_POSITION 0x60a0
#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
@ -488,6 +490,7 @@
#define AVIVO_D2CRTC_BLANK_CONTROL 0x6884
#define AVIVO_D2CRTC_INTERLACE_CONTROL 0x6888
#define AVIVO_D2CRTC_INTERLACE_STATUS 0x688c
#define AVIVO_D2CRTC_STATUS_POSITION 0x68a0
#define AVIVO_D2CRTC_FRAME_COUNT 0x68a4
#define AVIVO_D2CRTC_STEREO_CONTROL 0x68c4

View File

@ -53,7 +53,6 @@ static void r520_gpu_init(struct radeon_device *rdev)
{
unsigned pipe_select_current, gb_pipe_select, tmp;
r100_hdp_reset(rdev);
rv515_vga_render_disable(rdev);
/*
* DST_PIPE_CONFIG 0x170C
@ -209,7 +208,7 @@ int r520_resume(struct radeon_device *rdev)
/* Resume clock before doing reset */
rv515_clock_startup(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
@ -246,7 +245,7 @@ int r520_init(struct radeon_device *rdev)
return -EINVAL;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_gpu_reset(rdev)) {
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
@ -262,8 +261,6 @@ int r520_init(struct radeon_device *rdev)
}
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);

View File

@ -44,6 +44,9 @@
#define R700_PFP_UCODE_SIZE 848
#define R700_PM4_UCODE_SIZE 1360
#define R700_RLC_UCODE_SIZE 1024
#define EVERGREEN_PFP_UCODE_SIZE 1120
#define EVERGREEN_PM4_UCODE_SIZE 1376
#define EVERGREEN_RLC_UCODE_SIZE 768
/* Firmware Names */
MODULE_FIRMWARE("radeon/R600_pfp.bin");
@ -68,6 +71,18 @@ MODULE_FIRMWARE("radeon/RV710_pfp.bin");
MODULE_FIRMWARE("radeon/RV710_me.bin");
MODULE_FIRMWARE("radeon/R600_rlc.bin");
MODULE_FIRMWARE("radeon/R700_rlc.bin");
MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
MODULE_FIRMWARE("radeon/CEDAR_me.bin");
MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
@ -75,6 +90,401 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
void r600_gpu_init(struct radeon_device *rdev);
void r600_fini(struct radeon_device *rdev);
void r600_irq_disable(struct radeon_device *rdev);
void r600_pm_get_dynpm_state(struct radeon_device *rdev)
{
int i;
rdev->pm.dynpm_can_upclock = true;
rdev->pm.dynpm_can_downclock = true;
/* power state array is low to high, default is first */
if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
int min_power_state_index = 0;
if (rdev->pm.num_power_states > 2)
min_power_state_index = 1;
switch (rdev->pm.dynpm_planned_action) {
case DYNPM_ACTION_MINIMUM:
rdev->pm.requested_power_state_index = min_power_state_index;
rdev->pm.requested_clock_mode_index = 0;
rdev->pm.dynpm_can_downclock = false;
break;
case DYNPM_ACTION_DOWNCLOCK:
if (rdev->pm.current_power_state_index == min_power_state_index) {
rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
rdev->pm.dynpm_can_downclock = false;
} else {
if (rdev->pm.active_crtc_count > 1) {
for (i = 0; i < rdev->pm.num_power_states; i++) {
if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
continue;
else if (i >= rdev->pm.current_power_state_index) {
rdev->pm.requested_power_state_index =
rdev->pm.current_power_state_index;
break;
} else {
rdev->pm.requested_power_state_index = i;
break;
}
}
} else
rdev->pm.requested_power_state_index =
rdev->pm.current_power_state_index - 1;
}
rdev->pm.requested_clock_mode_index = 0;
/* don't use the power state if crtcs are active and no display flag is set */
if ((rdev->pm.active_crtc_count > 0) &&
(rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].flags &
RADEON_PM_MODE_NO_DISPLAY)) {
rdev->pm.requested_power_state_index++;
}
break;
case DYNPM_ACTION_UPCLOCK:
if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
rdev->pm.dynpm_can_upclock = false;
} else {
if (rdev->pm.active_crtc_count > 1) {
for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
continue;
else if (i <= rdev->pm.current_power_state_index) {
rdev->pm.requested_power_state_index =
rdev->pm.current_power_state_index;
break;
} else {
rdev->pm.requested_power_state_index = i;
break;
}
}
} else
rdev->pm.requested_power_state_index =
rdev->pm.current_power_state_index + 1;
}
rdev->pm.requested_clock_mode_index = 0;
break;
case DYNPM_ACTION_DEFAULT:
rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.requested_clock_mode_index = 0;
rdev->pm.dynpm_can_upclock = false;
break;
case DYNPM_ACTION_NONE:
default:
DRM_ERROR("Requested mode for not defined action\n");
return;
}
} else {
/* XXX select a power state based on AC/DC, single/dualhead, etc. */
/* for now just select the first power state and switch between clock modes */
/* power state array is low to high, default is first (0) */
if (rdev->pm.active_crtc_count > 1) {
rdev->pm.requested_power_state_index = -1;
/* start at 1 as we don't want the default mode */
for (i = 1; i < rdev->pm.num_power_states; i++) {
if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
continue;
else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
(rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
rdev->pm.requested_power_state_index = i;
break;
}
}
/* if nothing selected, grab the default state. */
if (rdev->pm.requested_power_state_index == -1)
rdev->pm.requested_power_state_index = 0;
} else
rdev->pm.requested_power_state_index = 1;
switch (rdev->pm.dynpm_planned_action) {
case DYNPM_ACTION_MINIMUM:
rdev->pm.requested_clock_mode_index = 0;
rdev->pm.dynpm_can_downclock = false;
break;
case DYNPM_ACTION_DOWNCLOCK:
if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
if (rdev->pm.current_clock_mode_index == 0) {
rdev->pm.requested_clock_mode_index = 0;
rdev->pm.dynpm_can_downclock = false;
} else
rdev->pm.requested_clock_mode_index =
rdev->pm.current_clock_mode_index - 1;
} else {
rdev->pm.requested_clock_mode_index = 0;
rdev->pm.dynpm_can_downclock = false;
}
/* don't use the power state if crtcs are active and no display flag is set */
if ((rdev->pm.active_crtc_count > 0) &&
(rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].flags &
RADEON_PM_MODE_NO_DISPLAY)) {
rdev->pm.requested_clock_mode_index++;
}
break;
case DYNPM_ACTION_UPCLOCK:
if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
if (rdev->pm.current_clock_mode_index ==
(rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
rdev->pm.dynpm_can_upclock = false;
} else
rdev->pm.requested_clock_mode_index =
rdev->pm.current_clock_mode_index + 1;
} else {
rdev->pm.requested_clock_mode_index =
rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
rdev->pm.dynpm_can_upclock = false;
}
break;
case DYNPM_ACTION_DEFAULT:
rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.requested_clock_mode_index = 0;
rdev->pm.dynpm_can_upclock = false;
break;
case DYNPM_ACTION_NONE:
default:
DRM_ERROR("Requested mode for not defined action\n");
return;
}
}
DRM_DEBUG("Requested: e: %d m: %d p: %d\n",
rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].sclk,
rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].mclk,
rdev->pm.power_state[rdev->pm.requested_power_state_index].
pcie_lanes);
}
static int r600_pm_get_type_index(struct radeon_device *rdev,
enum radeon_pm_state_type ps_type,
int instance)
{
int i;
int found_instance = -1;
for (i = 0; i < rdev->pm.num_power_states; i++) {
if (rdev->pm.power_state[i].type == ps_type) {
found_instance++;
if (found_instance == instance)
return i;
}
}
/* return default if no match */
return rdev->pm.default_power_state_index;
}
void rs780_pm_init_profile(struct radeon_device *rdev)
{
if (rdev->pm.num_power_states == 2) {
/* default */
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
/* low sh */
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
/* low mh */
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
} else if (rdev->pm.num_power_states == 3) {
/* default */
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
/* low sh */
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
/* low mh */
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
} else {
/* default */
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
/* low sh */
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
/* low mh */
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
}
}
void r600_pm_init_profile(struct radeon_device *rdev)
{
if (rdev->family == CHIP_R600) {
/* XXX */
/* default */
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
/* low sh */
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
/* low mh */
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
} else {
if (rdev->pm.num_power_states < 4) {
/* default */
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
/* low sh */
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
/* low mh */
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1;
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
} else {
/* default */
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
/* low sh */
if (rdev->flags & RADEON_IS_MOBILITY) {
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
} else {
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
}
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx =
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx =
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
/* low mh */
if (rdev->flags & RADEON_IS_MOBILITY) {
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 2;
} else {
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1;
}
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx =
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx =
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
}
}
}
void r600_pm_misc(struct radeon_device *rdev)
{
}
bool r600_gui_idle(struct radeon_device *rdev)
{
if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
return false;
else
return true;
}
/* hpd for digital panel detect/disconnect */
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
@ -714,11 +1124,6 @@ int r600_mc_init(struct radeon_device *rdev)
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.visible_vram_size = rdev->mc.aper_size;
/* FIXME remove this once we support unmappable VRAM */
if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
rdev->mc.mc_vram_size = rdev->mc.aper_size;
rdev->mc.real_vram_size = rdev->mc.aper_size;
}
r600_vram_gtt_location(rdev, &rdev->mc);
if (rdev->flags & RADEON_IS_IGP)
@ -750,7 +1155,6 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
u32 srbm_reset = 0;
u32 tmp;
dev_info(rdev->dev, "GPU softreset \n");
@ -765,7 +1169,7 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
/* Disable CP parsing/prefetching */
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff));
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
/* Check if any of the rendering block is busy and reset it */
if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
(RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
@ -784,72 +1188,56 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
S_008020_SOFT_RESET_VGT(1);
dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(R_008020_GRBM_SOFT_RESET, tmp);
(void)RREG32(R_008020_GRBM_SOFT_RESET);
udelay(50);
RREG32(R_008020_GRBM_SOFT_RESET);
mdelay(15);
WREG32(R_008020_GRBM_SOFT_RESET, 0);
(void)RREG32(R_008020_GRBM_SOFT_RESET);
}
/* Reset CP (we always reset CP) */
tmp = S_008020_SOFT_RESET_CP(1);
dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(R_008020_GRBM_SOFT_RESET, tmp);
(void)RREG32(R_008020_GRBM_SOFT_RESET);
udelay(50);
RREG32(R_008020_GRBM_SOFT_RESET);
mdelay(15);
WREG32(R_008020_GRBM_SOFT_RESET, 0);
(void)RREG32(R_008020_GRBM_SOFT_RESET);
/* Reset others GPU block if necessary */
if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_GRBM(1);
if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_IH(1);
if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_VMC(1);
if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_MC(1);
if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_MC(1);
if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_MC(1);
if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_MC(1);
if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_MC(1);
if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS)))
srbm_reset |= S_000E60_SOFT_RESET_BIF(1);
dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
(void)RREG32(R_000E60_SRBM_SOFT_RESET);
udelay(50);
WREG32(R_000E60_SRBM_SOFT_RESET, 0);
(void)RREG32(R_000E60_SRBM_SOFT_RESET);
WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
(void)RREG32(R_000E60_SRBM_SOFT_RESET);
udelay(50);
WREG32(R_000E60_SRBM_SOFT_RESET, 0);
(void)RREG32(R_000E60_SRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
mdelay(1);
dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
RREG32(R_008010_GRBM_STATUS));
dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
RREG32(R_008014_GRBM_STATUS2));
dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
RREG32(R_000E50_SRBM_STATUS));
/* After reset we need to reinit the asic as GPU often endup in an
* incoherent state.
*/
atom_asic_init(rdev->mode_info.atom_context);
rv515_mc_resume(rdev, &save);
return 0;
}
int r600_gpu_reset(struct radeon_device *rdev)
bool r600_gpu_is_lockup(struct radeon_device *rdev)
{
u32 srbm_status;
u32 grbm_status;
u32 grbm_status2;
int r;
srbm_status = RREG32(R_000E50_SRBM_STATUS);
grbm_status = RREG32(R_008010_GRBM_STATUS);
grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
if (!G_008010_GUI_ACTIVE(grbm_status)) {
r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
return false;
}
/* force CP activities */
r = radeon_ring_lock(rdev, 2);
if (!r) {
/* PACKET2 NOP */
radeon_ring_write(rdev, 0x80000000);
radeon_ring_write(rdev, 0x80000000);
radeon_ring_unlock_commit(rdev);
}
rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
}
int r600_asic_reset(struct radeon_device *rdev)
{
return r600_gpu_soft_reset(rdev);
}
@ -1467,10 +1855,31 @@ int r600_init_microcode(struct radeon_device *rdev)
chip_name = "RV710";
rlc_chip_name = "R700";
break;
case CHIP_CEDAR:
chip_name = "CEDAR";
rlc_chip_name = "CEDAR";
break;
case CHIP_REDWOOD:
chip_name = "REDWOOD";
rlc_chip_name = "REDWOOD";
break;
case CHIP_JUNIPER:
chip_name = "JUNIPER";
rlc_chip_name = "JUNIPER";
break;
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
chip_name = "CYPRESS";
rlc_chip_name = "CYPRESS";
break;
default: BUG();
}
if (rdev->family >= CHIP_RV770) {
if (rdev->family >= CHIP_CEDAR) {
pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
} else if (rdev->family >= CHIP_RV770) {
pfp_req_size = R700_PFP_UCODE_SIZE * 4;
me_req_size = R700_PM4_UCODE_SIZE * 4;
rlc_req_size = R700_RLC_UCODE_SIZE * 4;
@ -1584,12 +1993,15 @@ int r600_cp_start(struct radeon_device *rdev)
}
radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(rdev, 0x1);
if (rdev->family < CHIP_RV770) {
radeon_ring_write(rdev, 0x3);
radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
} else {
if (rdev->family >= CHIP_CEDAR) {
radeon_ring_write(rdev, 0x0);
radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
} else if (rdev->family >= CHIP_RV770) {
radeon_ring_write(rdev, 0x0);
radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
} else {
radeon_ring_write(rdev, 0x3);
radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
}
radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(rdev, 0);
@ -2051,8 +2463,6 @@ int r600_init(struct radeon_device *rdev)
r = radeon_clocks_init(rdev);
if (r)
return r;
/* Initialize power management */
radeon_pm_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
@ -2117,7 +2527,6 @@ int r600_init(struct radeon_device *rdev)
void r600_fini(struct radeon_device *rdev)
{
radeon_pm_fini(rdev);
r600_audio_fini(rdev);
r600_blit_fini(rdev);
r600_cp_fini(rdev);
@ -2290,10 +2699,11 @@ static void r600_ih_ring_fini(struct radeon_device *rdev)
}
}
static void r600_rlc_stop(struct radeon_device *rdev)
void r600_rlc_stop(struct radeon_device *rdev)
{
if (rdev->family >= CHIP_RV770) {
if ((rdev->family >= CHIP_RV770) &&
(rdev->family <= CHIP_RV740)) {
/* r7xx asics need to soft reset RLC before halting */
WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
RREG32(SRBM_SOFT_RESET);
@ -2330,7 +2740,12 @@ static int r600_rlc_init(struct radeon_device *rdev)
WREG32(RLC_UCODE_CNTL, 0);
fw_data = (const __be32 *)rdev->rlc_fw->data;
if (rdev->family >= CHIP_RV770) {
if (rdev->family >= CHIP_CEDAR) {
for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
}
} else if (rdev->family >= CHIP_RV770) {
for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
@ -2360,7 +2775,7 @@ static void r600_enable_interrupts(struct radeon_device *rdev)
rdev->ih.enabled = true;
}
static void r600_disable_interrupts(struct radeon_device *rdev)
void r600_disable_interrupts(struct radeon_device *rdev)
{
u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
u32 ih_cntl = RREG32(IH_CNTL);
@ -2475,7 +2890,10 @@ int r600_irq_init(struct radeon_device *rdev)
WREG32(IH_CNTL, ih_cntl);
/* force the active interrupt state to all disabled */
r600_disable_interrupt_state(rdev);
if (rdev->family >= CHIP_CEDAR)
evergreen_disable_interrupt_state(rdev);
else
r600_disable_interrupt_state(rdev);
/* enable irqs */
r600_enable_interrupts(rdev);
@ -2485,7 +2903,7 @@ int r600_irq_init(struct radeon_device *rdev)
void r600_irq_suspend(struct radeon_device *rdev)
{
r600_disable_interrupts(rdev);
r600_irq_disable(rdev);
r600_rlc_stop(rdev);
}
@ -2500,6 +2918,8 @@ int r600_irq_set(struct radeon_device *rdev)
u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
u32 mode_int = 0;
u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
u32 grbm_int_cntl = 0;
u32 hdmi1, hdmi2;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
@ -2513,7 +2933,9 @@ int r600_irq_set(struct radeon_device *rdev)
return 0;
}
hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
if (ASIC_IS_DCE3(rdev)) {
hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
@ -2523,6 +2945,7 @@ int r600_irq_set(struct radeon_device *rdev)
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
}
} else {
hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
@ -2564,10 +2987,25 @@ int r600_irq_set(struct radeon_device *rdev)
DRM_DEBUG("r600_irq_set: hpd 6\n");
hpd6 |= DC_HPDx_INT_EN;
}
if (rdev->irq.hdmi[0]) {
DRM_DEBUG("r600_irq_set: hdmi 1\n");
hdmi1 |= R600_HDMI_INT_EN;
}
if (rdev->irq.hdmi[1]) {
DRM_DEBUG("r600_irq_set: hdmi 2\n");
hdmi2 |= R600_HDMI_INT_EN;
}
if (rdev->irq.gui_idle) {
DRM_DEBUG("gui idle\n");
grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
}
WREG32(CP_INT_CNTL, cp_int_cntl);
WREG32(DxMODE_INT_MASK, mode_int);
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
if (ASIC_IS_DCE3(rdev)) {
WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
WREG32(DC_HPD1_INT_CONTROL, hpd1);
WREG32(DC_HPD2_INT_CONTROL, hpd2);
WREG32(DC_HPD3_INT_CONTROL, hpd3);
@ -2577,6 +3015,7 @@ int r600_irq_set(struct radeon_device *rdev)
WREG32(DC_HPD6_INT_CONTROL, hpd6);
}
} else {
WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
@ -2660,6 +3099,18 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
}
if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
}
if (ASIC_IS_DCE3(rdev)) {
if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
}
} else {
if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
}
}
}
void r600_irq_disable(struct radeon_device *rdev)
@ -2713,6 +3164,8 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
* 19 1 FP Hot plug detection B
* 19 2 DAC A auto-detection
* 19 3 DAC B auto-detection
* 21 4 HDMI block A
* 21 5 HDMI block B
* 176 - CP_INT RB
* 177 - CP_INT IB1
* 178 - CP_INT IB2
@ -2852,6 +3305,10 @@ restart_ih:
break;
}
break;
case 21: /* HDMI */
DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
r600_audio_schedule_polling(rdev);
break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
@ -2861,6 +3318,11 @@ restart_ih:
case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n");
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: CP EOP\n");
rdev->pm.gui_idle = true;
wake_up(&rdev->irq.idle_queue);
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;

Some files were not shown because too many files have changed in this diff Show More