Archived
14
0
Fork 0
This repository has been archived on 2022-02-17. You can view files and clone it, but cannot push or open issues or pull requests.
linux-2.6/drivers/gpu/drm/radeon/r420.c
Dave Airlie 4c712e6c7e drm/radeon/kms: check/restore sanity before doing anything else with GPU.
On systems using kexec, the new kernel is booted straight from the old kernel, without any warning to the graphics driver. So the GPU is basically left as-is in a running state, however the CPU side is completly reset.

Without stating the saneness of anyone using kexec on live systems, we should at least try not to crash the GPU. This patch resets 3 registers to 0 that could cause bad things to happen to the running system.

This allows kexec to work on a Power6/RN50 system.

Signed-off-by: Dave Airlie <airlied@redhat.com>
2010-08-02 10:00:09 +10:00

467 lines
13 KiB
C

/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
* Copyright 2009 Jerome Glisse.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
* Jerome Glisse
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
#include "drmP.h"
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "atom.h"
#include "r100d.h"
#include "r420d.h"
#include "r420_reg_safe.h"
void r420_pm_init_profile(struct radeon_device *rdev)
{
/* default */
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
/* low sh */
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
/* mid sh */
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
/* low mh */
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
/* mid mh */
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
}
static void r420_set_reg_safe(struct radeon_device *rdev)
{
rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
}
void r420_pipes_init(struct radeon_device *rdev)
{
unsigned tmp;
unsigned gb_pipe_select;
unsigned num_pipes;
/* GA_ENHANCE workaround TCL deadlock issue */
WREG32(R300_GA_ENHANCE, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL |
(1 << 2) | (1 << 3));
/* add idle wait as per freedesktop.org bug 24041 */
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
"programming pipes. Bad things might happen.\n");
}
/* get max number of pipes */
gb_pipe_select = RREG32(0x402C);
num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
/* SE chips have 1 pipe */
if ((rdev->pdev->device == 0x5e4c) ||
(rdev->pdev->device == 0x5e4f))
num_pipes = 1;
rdev->num_gb_pipes = num_pipes;
tmp = 0;
switch (num_pipes) {
default:
/* force to 1 pipe */
num_pipes = 1;
case 1:
tmp = (0 << 1);
break;
case 2:
tmp = (3 << 1);
break;
case 3:
tmp = (6 << 1);
break;
case 4:
tmp = (7 << 1);
break;
}
WREG32(R500_SU_REG_DEST, (1 << num_pipes) - 1);
/* Sub pixel 1/12 so we can have 4K rendering according to doc */
tmp |= R300_TILE_SIZE_16 | R300_ENABLE_TILING;
WREG32(R300_GB_TILE_CONFIG, tmp);
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
"programming pipes. Bad things might happen.\n");
}
tmp = RREG32(R300_DST_PIPE_CONFIG);
WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
WREG32(R300_RB2D_DSTCACHE_MODE,
RREG32(R300_RB2D_DSTCACHE_MODE) |
R300_DC_AUTOFLUSH_ENABLE |
R300_DC_DC_DISABLE_IGNORE_PE);
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
"programming pipes. Bad things might happen.\n");
}
if (rdev->family == CHIP_RV530) {
tmp = RREG32(RV530_GB_PIPE_SELECT2);
if ((tmp & 3) == 3)
rdev->num_z_pipes = 2;
else
rdev->num_z_pipes = 1;
} else
rdev->num_z_pipes = 1;
DRM_INFO("radeon: %d quad pipes, %d z pipes initialized.\n",
rdev->num_gb_pipes, rdev->num_z_pipes);
}
u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg)
{
u32 r;
WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg));
r = RREG32(R_0001FC_MC_IND_DATA);
return r;
}
void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) |
S_0001F8_MC_IND_WR_EN(1));
WREG32(R_0001FC_MC_IND_DATA, v);
}
static void r420_debugfs(struct radeon_device *rdev)
{
if (r100_debugfs_rbbm_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for RBBM !\n");
}
if (r420_debugfs_pipes_info_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for pipes !\n");
}
}
static void r420_clock_resume(struct radeon_device *rdev)
{
u32 sclk_cntl;
if (radeon_dynclks != -1 && radeon_dynclks)
radeon_atom_set_clock_gating(rdev, 1);
sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL);
sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
if (rdev->family == CHIP_R420)
sclk_cntl |= S_00000D_FORCE_PX(1) | S_00000D_FORCE_TX(1);
WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl);
}
static void r420_cp_errata_init(struct radeon_device *rdev)
{
/* RV410 and R420 can lock up if CP DMA to host memory happens
* while the 2D engine is busy.
*
* The proper workaround is to queue a RESYNC at the beginning
* of the CP init, apparently.
*/
radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
radeon_ring_lock(rdev, 8);
radeon_ring_write(rdev, PACKET0(R300_CP_RESYNC_ADDR, 1));
radeon_ring_write(rdev, rdev->config.r300.resync_scratch);
radeon_ring_write(rdev, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev);
}
static void r420_cp_errata_fini(struct radeon_device *rdev)
{
/* Catch the RESYNC we dispatched all the way back,
* at the very beginning of the CP init.
*/
radeon_ring_lock(rdev, 8);
radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, R300_RB3D_DC_FINISH);
radeon_ring_unlock_commit(rdev);
radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
}
static int r420_startup(struct radeon_device *rdev)
{
int r;
/* set common regs */
r100_set_common_regs(rdev);
/* program mc */
r300_mc_program(rdev);
/* Resume clock */
r420_clock_resume(rdev);
/* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */
if (rdev->flags & RADEON_IS_PCIE) {
r = rv370_pcie_gart_enable(rdev);
if (r)
return r;
}
if (rdev->flags & RADEON_IS_PCI) {
r = r100_pci_gart_enable(rdev);
if (r)
return r;
}
r420_pipes_init(rdev);
/* Enable IRQ */
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
return r;
}
r420_cp_errata_init(rdev);
r = r100_wb_init(rdev);
if (r) {
dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
}
r = r100_ib_init(rdev);
if (r) {
dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
return r;
}
return 0;
}
int r420_resume(struct radeon_device *rdev)
{
/* Make sur GART are not working */
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_disable(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_disable(rdev);
/* Resume clock before doing reset */
r420_clock_resume(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
if (rdev->is_atom_bios) {
atom_asic_init(rdev->mode_info.atom_context);
} else {
radeon_combios_asic_init(rdev->ddev);
}
/* Resume clock after posting */
r420_clock_resume(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
return r420_startup(rdev);
}
int r420_suspend(struct radeon_device *rdev)
{
r420_cp_errata_fini(rdev);
r100_cp_disable(rdev);
r100_wb_disable(rdev);
r100_irq_disable(rdev);
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_disable(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_disable(rdev);
return 0;
}
void r420_fini(struct radeon_device *rdev)
{
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_gem_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_fini(rdev);
radeon_agp_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
if (rdev->is_atom_bios) {
radeon_atombios_fini(rdev);
} else {
radeon_combios_fini(rdev);
}
kfree(rdev->bios);
rdev->bios = NULL;
}
int r420_init(struct radeon_device *rdev)
{
int r;
/* Initialize scratch registers */
radeon_scratch_init(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
/* TODO: disable VGA need to use VGA request */
/* restore some register to sane defaults */
r100_restore_sanity(rdev);
/* BIOS*/
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
return -EINVAL;
}
if (rdev->is_atom_bios) {
r = radeon_atombios_init(rdev);
if (r) {
return r;
}
} else {
r = radeon_combios_init(rdev);
if (r) {
return r;
}
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
if (radeon_boot_test_post_card(rdev) == false)
return -EINVAL;
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
if (r) {
radeon_agp_disable(rdev);
}
}
/* initialize memory controller */
r300_mc_init(rdev);
r420_debugfs(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r) {
return r;
}
r = radeon_irq_kms_init(rdev);
if (r) {
return r;
}
/* Memory manager */
r = radeon_bo_init(rdev);
if (r) {
return r;
}
if (rdev->family == CHIP_R420)
r100_enable_bm(rdev);
if (rdev->flags & RADEON_IS_PCIE) {
r = rv370_pcie_gart_init(rdev);
if (r)
return r;
}
if (rdev->flags & RADEON_IS_PCI) {
r = r100_pci_gart_init(rdev);
if (r)
return r;
}
r420_set_reg_safe(rdev);
rdev->accel_working = true;
r = r420_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_fini(rdev);
radeon_agp_fini(rdev);
rdev->accel_working = false;
}
return 0;
}
/*
* Debugfs info
*/
#if defined(CONFIG_DEBUG_FS)
static int r420_debugfs_pipes_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
uint32_t tmp;
tmp = RREG32(R400_GB_PIPE_SELECT);
seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
tmp = RREG32(R300_GB_TILE_CONFIG);
seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
tmp = RREG32(R300_DST_PIPE_CONFIG);
seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
return 0;
}
static struct drm_info_list r420_pipes_info_list[] = {
{"r420_pipes_info", r420_debugfs_pipes_info, 0, NULL},
};
#endif
int r420_debugfs_pipes_info_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
return radeon_debugfs_add_files(rdev, r420_pipes_info_list, 1);
#else
return 0;
#endif
}