drm/radeon: fix & improve ih ring handling v3
The spinlock was actually there to protect the rptr, but rptr was read outside of the locked area. Also we don't really need a spinlock here, an atomic should to quite fine since we only need to prevent it from being reentrant. v2: Keep the spinlock.... v3: Back to an atomic again after finding & fixing the real bug. Signed-off-by: Christian Koenig <christian.koenig@amd.com>
This commit is contained in:
parent
6823d74003
commit
c20dc3698d
|
@ -2676,7 +2676,6 @@ int evergreen_irq_process(struct radeon_device *rdev)
|
||||||
u32 rptr;
|
u32 rptr;
|
||||||
u32 src_id, src_data;
|
u32 src_id, src_data;
|
||||||
u32 ring_index;
|
u32 ring_index;
|
||||||
unsigned long flags;
|
|
||||||
bool queue_hotplug = false;
|
bool queue_hotplug = false;
|
||||||
bool queue_hdmi = false;
|
bool queue_hdmi = false;
|
||||||
|
|
||||||
|
@ -2684,22 +2683,21 @@ int evergreen_irq_process(struct radeon_device *rdev)
|
||||||
return IRQ_NONE;
|
return IRQ_NONE;
|
||||||
|
|
||||||
wptr = evergreen_get_ih_wptr(rdev);
|
wptr = evergreen_get_ih_wptr(rdev);
|
||||||
|
|
||||||
|
restart_ih:
|
||||||
|
/* is somebody else already processing irqs? */
|
||||||
|
if (atomic_xchg(&rdev->ih.lock, 1))
|
||||||
|
return IRQ_NONE;
|
||||||
|
|
||||||
rptr = rdev->ih.rptr;
|
rptr = rdev->ih.rptr;
|
||||||
DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
|
DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
|
||||||
|
|
||||||
spin_lock_irqsave(&rdev->ih.lock, flags);
|
|
||||||
if (rptr == wptr) {
|
|
||||||
spin_unlock_irqrestore(&rdev->ih.lock, flags);
|
|
||||||
return IRQ_NONE;
|
|
||||||
}
|
|
||||||
restart_ih:
|
|
||||||
/* Order reading of wptr vs. reading of IH ring data */
|
/* Order reading of wptr vs. reading of IH ring data */
|
||||||
rmb();
|
rmb();
|
||||||
|
|
||||||
/* display interrupts */
|
/* display interrupts */
|
||||||
evergreen_irq_ack(rdev);
|
evergreen_irq_ack(rdev);
|
||||||
|
|
||||||
rdev->ih.wptr = wptr;
|
|
||||||
while (rptr != wptr) {
|
while (rptr != wptr) {
|
||||||
/* wptr/rptr are in bytes! */
|
/* wptr/rptr are in bytes! */
|
||||||
ring_index = rptr / 4;
|
ring_index = rptr / 4;
|
||||||
|
@ -2998,17 +2996,19 @@ restart_ih:
|
||||||
rptr += 16;
|
rptr += 16;
|
||||||
rptr &= rdev->ih.ptr_mask;
|
rptr &= rdev->ih.ptr_mask;
|
||||||
}
|
}
|
||||||
/* make sure wptr hasn't changed while processing */
|
|
||||||
wptr = evergreen_get_ih_wptr(rdev);
|
|
||||||
if (wptr != rdev->ih.wptr)
|
|
||||||
goto restart_ih;
|
|
||||||
if (queue_hotplug)
|
if (queue_hotplug)
|
||||||
schedule_work(&rdev->hotplug_work);
|
schedule_work(&rdev->hotplug_work);
|
||||||
if (queue_hdmi)
|
if (queue_hdmi)
|
||||||
schedule_work(&rdev->audio_work);
|
schedule_work(&rdev->audio_work);
|
||||||
rdev->ih.rptr = rptr;
|
rdev->ih.rptr = rptr;
|
||||||
WREG32(IH_RB_RPTR, rdev->ih.rptr);
|
WREG32(IH_RB_RPTR, rdev->ih.rptr);
|
||||||
spin_unlock_irqrestore(&rdev->ih.lock, flags);
|
atomic_set(&rdev->ih.lock, 0);
|
||||||
|
|
||||||
|
/* make sure wptr hasn't changed while processing */
|
||||||
|
wptr = evergreen_get_ih_wptr(rdev);
|
||||||
|
if (wptr != rptr)
|
||||||
|
goto restart_ih;
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2858,7 +2858,6 @@ void r600_disable_interrupts(struct radeon_device *rdev)
|
||||||
WREG32(IH_RB_RPTR, 0);
|
WREG32(IH_RB_RPTR, 0);
|
||||||
WREG32(IH_RB_WPTR, 0);
|
WREG32(IH_RB_WPTR, 0);
|
||||||
rdev->ih.enabled = false;
|
rdev->ih.enabled = false;
|
||||||
rdev->ih.wptr = 0;
|
|
||||||
rdev->ih.rptr = 0;
|
rdev->ih.rptr = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3310,7 +3309,6 @@ int r600_irq_process(struct radeon_device *rdev)
|
||||||
u32 rptr;
|
u32 rptr;
|
||||||
u32 src_id, src_data;
|
u32 src_id, src_data;
|
||||||
u32 ring_index;
|
u32 ring_index;
|
||||||
unsigned long flags;
|
|
||||||
bool queue_hotplug = false;
|
bool queue_hotplug = false;
|
||||||
bool queue_hdmi = false;
|
bool queue_hdmi = false;
|
||||||
|
|
||||||
|
@ -3322,24 +3320,21 @@ int r600_irq_process(struct radeon_device *rdev)
|
||||||
RREG32(IH_RB_WPTR);
|
RREG32(IH_RB_WPTR);
|
||||||
|
|
||||||
wptr = r600_get_ih_wptr(rdev);
|
wptr = r600_get_ih_wptr(rdev);
|
||||||
|
|
||||||
|
restart_ih:
|
||||||
|
/* is somebody else already processing irqs? */
|
||||||
|
if (atomic_xchg(&rdev->ih.lock, 1))
|
||||||
|
return IRQ_NONE;
|
||||||
|
|
||||||
rptr = rdev->ih.rptr;
|
rptr = rdev->ih.rptr;
|
||||||
DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
|
DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
|
||||||
|
|
||||||
spin_lock_irqsave(&rdev->ih.lock, flags);
|
|
||||||
|
|
||||||
if (rptr == wptr) {
|
|
||||||
spin_unlock_irqrestore(&rdev->ih.lock, flags);
|
|
||||||
return IRQ_NONE;
|
|
||||||
}
|
|
||||||
|
|
||||||
restart_ih:
|
|
||||||
/* Order reading of wptr vs. reading of IH ring data */
|
/* Order reading of wptr vs. reading of IH ring data */
|
||||||
rmb();
|
rmb();
|
||||||
|
|
||||||
/* display interrupts */
|
/* display interrupts */
|
||||||
r600_irq_ack(rdev);
|
r600_irq_ack(rdev);
|
||||||
|
|
||||||
rdev->ih.wptr = wptr;
|
|
||||||
while (rptr != wptr) {
|
while (rptr != wptr) {
|
||||||
/* wptr/rptr are in bytes! */
|
/* wptr/rptr are in bytes! */
|
||||||
ring_index = rptr / 4;
|
ring_index = rptr / 4;
|
||||||
|
@ -3493,17 +3488,19 @@ restart_ih:
|
||||||
rptr += 16;
|
rptr += 16;
|
||||||
rptr &= rdev->ih.ptr_mask;
|
rptr &= rdev->ih.ptr_mask;
|
||||||
}
|
}
|
||||||
/* make sure wptr hasn't changed while processing */
|
|
||||||
wptr = r600_get_ih_wptr(rdev);
|
|
||||||
if (wptr != rdev->ih.wptr)
|
|
||||||
goto restart_ih;
|
|
||||||
if (queue_hotplug)
|
if (queue_hotplug)
|
||||||
schedule_work(&rdev->hotplug_work);
|
schedule_work(&rdev->hotplug_work);
|
||||||
if (queue_hdmi)
|
if (queue_hdmi)
|
||||||
schedule_work(&rdev->audio_work);
|
schedule_work(&rdev->audio_work);
|
||||||
rdev->ih.rptr = rptr;
|
rdev->ih.rptr = rptr;
|
||||||
WREG32(IH_RB_RPTR, rdev->ih.rptr);
|
WREG32(IH_RB_RPTR, rdev->ih.rptr);
|
||||||
spin_unlock_irqrestore(&rdev->ih.lock, flags);
|
atomic_set(&rdev->ih.lock, 0);
|
||||||
|
|
||||||
|
/* make sure wptr hasn't changed while processing */
|
||||||
|
wptr = r600_get_ih_wptr(rdev);
|
||||||
|
if (wptr != rptr)
|
||||||
|
goto restart_ih;
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -738,11 +738,10 @@ struct r600_ih {
|
||||||
struct radeon_bo *ring_obj;
|
struct radeon_bo *ring_obj;
|
||||||
volatile uint32_t *ring;
|
volatile uint32_t *ring;
|
||||||
unsigned rptr;
|
unsigned rptr;
|
||||||
unsigned wptr;
|
|
||||||
unsigned ring_size;
|
unsigned ring_size;
|
||||||
uint64_t gpu_addr;
|
uint64_t gpu_addr;
|
||||||
uint32_t ptr_mask;
|
uint32_t ptr_mask;
|
||||||
spinlock_t lock;
|
atomic_t lock;
|
||||||
bool enabled;
|
bool enabled;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -731,8 +731,7 @@ int radeon_device_init(struct radeon_device *rdev,
|
||||||
radeon_mutex_init(&rdev->cs_mutex);
|
radeon_mutex_init(&rdev->cs_mutex);
|
||||||
mutex_init(&rdev->ring_lock);
|
mutex_init(&rdev->ring_lock);
|
||||||
mutex_init(&rdev->dc_hw_i2c_mutex);
|
mutex_init(&rdev->dc_hw_i2c_mutex);
|
||||||
if (rdev->family >= CHIP_R600)
|
atomic_set(&rdev->ih.lock, 0);
|
||||||
spin_lock_init(&rdev->ih.lock);
|
|
||||||
mutex_init(&rdev->gem.mutex);
|
mutex_init(&rdev->gem.mutex);
|
||||||
mutex_init(&rdev->pm.mutex);
|
mutex_init(&rdev->pm.mutex);
|
||||||
init_rwsem(&rdev->pm.mclk_lock);
|
init_rwsem(&rdev->pm.mclk_lock);
|
||||||
|
|
|
@ -2942,7 +2942,6 @@ static void si_disable_interrupts(struct radeon_device *rdev)
|
||||||
WREG32(IH_RB_RPTR, 0);
|
WREG32(IH_RB_RPTR, 0);
|
||||||
WREG32(IH_RB_WPTR, 0);
|
WREG32(IH_RB_WPTR, 0);
|
||||||
rdev->ih.enabled = false;
|
rdev->ih.enabled = false;
|
||||||
rdev->ih.wptr = 0;
|
|
||||||
rdev->ih.rptr = 0;
|
rdev->ih.rptr = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3359,29 +3358,27 @@ int si_irq_process(struct radeon_device *rdev)
|
||||||
u32 rptr;
|
u32 rptr;
|
||||||
u32 src_id, src_data, ring_id;
|
u32 src_id, src_data, ring_id;
|
||||||
u32 ring_index;
|
u32 ring_index;
|
||||||
unsigned long flags;
|
|
||||||
bool queue_hotplug = false;
|
bool queue_hotplug = false;
|
||||||
|
|
||||||
if (!rdev->ih.enabled || rdev->shutdown)
|
if (!rdev->ih.enabled || rdev->shutdown)
|
||||||
return IRQ_NONE;
|
return IRQ_NONE;
|
||||||
|
|
||||||
wptr = si_get_ih_wptr(rdev);
|
wptr = si_get_ih_wptr(rdev);
|
||||||
|
|
||||||
|
restart_ih:
|
||||||
|
/* is somebody else already processing irqs? */
|
||||||
|
if (atomic_xchg(&rdev->ih.lock, 1))
|
||||||
|
return IRQ_NONE;
|
||||||
|
|
||||||
rptr = rdev->ih.rptr;
|
rptr = rdev->ih.rptr;
|
||||||
DRM_DEBUG("si_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
|
DRM_DEBUG("si_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
|
||||||
|
|
||||||
spin_lock_irqsave(&rdev->ih.lock, flags);
|
|
||||||
if (rptr == wptr) {
|
|
||||||
spin_unlock_irqrestore(&rdev->ih.lock, flags);
|
|
||||||
return IRQ_NONE;
|
|
||||||
}
|
|
||||||
restart_ih:
|
|
||||||
/* Order reading of wptr vs. reading of IH ring data */
|
/* Order reading of wptr vs. reading of IH ring data */
|
||||||
rmb();
|
rmb();
|
||||||
|
|
||||||
/* display interrupts */
|
/* display interrupts */
|
||||||
si_irq_ack(rdev);
|
si_irq_ack(rdev);
|
||||||
|
|
||||||
rdev->ih.wptr = wptr;
|
|
||||||
while (rptr != wptr) {
|
while (rptr != wptr) {
|
||||||
/* wptr/rptr are in bytes! */
|
/* wptr/rptr are in bytes! */
|
||||||
ring_index = rptr / 4;
|
ring_index = rptr / 4;
|
||||||
|
@ -3632,15 +3629,17 @@ restart_ih:
|
||||||
rptr += 16;
|
rptr += 16;
|
||||||
rptr &= rdev->ih.ptr_mask;
|
rptr &= rdev->ih.ptr_mask;
|
||||||
}
|
}
|
||||||
/* make sure wptr hasn't changed while processing */
|
|
||||||
wptr = si_get_ih_wptr(rdev);
|
|
||||||
if (wptr != rdev->ih.wptr)
|
|
||||||
goto restart_ih;
|
|
||||||
if (queue_hotplug)
|
if (queue_hotplug)
|
||||||
schedule_work(&rdev->hotplug_work);
|
schedule_work(&rdev->hotplug_work);
|
||||||
rdev->ih.rptr = rptr;
|
rdev->ih.rptr = rptr;
|
||||||
WREG32(IH_RB_RPTR, rdev->ih.rptr);
|
WREG32(IH_RB_RPTR, rdev->ih.rptr);
|
||||||
spin_unlock_irqrestore(&rdev->ih.lock, flags);
|
atomic_set(&rdev->ih.lock, 0);
|
||||||
|
|
||||||
|
/* make sure wptr hasn't changed while processing */
|
||||||
|
wptr = si_get_ih_wptr(rdev);
|
||||||
|
if (wptr != rptr)
|
||||||
|
goto restart_ih;
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Reference in New Issue