dect
/
linux-2.6
Archived
13
0
Fork 0

[PATCH] s390: fix endless retry loop in tape driver

If a tape device is assigned to another host, the interrupt for the assign
operation comes back with deferred condition code 1.  Under some conditions
this can lead to an endless loop of retries.  Check if the current request is
still in IO in deferred condition code handling and prevent retries when the
request has already been cancelled.

Signed-off-by: Michael Holzheu <holzheu@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Michael Holzheu 2006-03-24 03:15:28 -08:00 committed by Linus Torvalds
parent 4cd190a736
commit 5f38433885
3 changed files with 35 additions and 13 deletions

View File

@ -250,6 +250,7 @@ extern void tape_free_request(struct tape_request *);
extern int tape_do_io(struct tape_device *, struct tape_request *);
extern int tape_do_io_async(struct tape_device *, struct tape_request *);
extern int tape_do_io_interruptible(struct tape_device *, struct tape_request *);
extern int tape_cancel_io(struct tape_device *, struct tape_request *);
void tape_hotplug_event(struct tape_device *, int major, int action);
static inline int

View File

@ -761,6 +761,13 @@ __tape_start_next_request(struct tape_device *device)
*/
if (request->status == TAPE_REQUEST_IN_IO)
return;
/*
* Request has already been stopped. We have to wait until
* the request is removed from the queue in the interrupt
* handling.
*/
if (request->status == TAPE_REQUEST_DONE)
return;
/*
* We wanted to cancel the request but the common I/O layer
@ -1023,6 +1030,20 @@ tape_do_io_interruptible(struct tape_device *device,
return rc;
}
/*
* Stop running ccw.
*/
int
tape_cancel_io(struct tape_device *device, struct tape_request *request)
{
int rc;
spin_lock_irq(get_ccwdev_lock(device->cdev));
rc = __tape_cancel_io(device, request);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
return rc;
}
/*
* Tape interrupt routine, called from the ccw_device layer
*/
@ -1068,12 +1089,12 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
* error might still apply. So we just schedule the request to be
* started later.
*/
if (irb->scsw.cc != 0 && (irb->scsw.fctl & SCSW_FCTL_START_FUNC)) {
PRINT_WARN("(%s): deferred cc=%i. restaring\n",
cdev->dev.bus_id,
irb->scsw.cc);
if (irb->scsw.cc != 0 && (irb->scsw.fctl & SCSW_FCTL_START_FUNC) &&
(request->status == TAPE_REQUEST_IN_IO)) {
DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n",
device->cdev_id, irb->scsw.cc, irb->scsw.fctl);
request->status = TAPE_REQUEST_QUEUED;
schedule_work(&device->tape_dnr);
schedule_delayed_work(&device->tape_dnr, HZ);
return;
}
@ -1287,4 +1308,5 @@ EXPORT_SYMBOL(tape_dump_sense_dbf);
EXPORT_SYMBOL(tape_do_io);
EXPORT_SYMBOL(tape_do_io_async);
EXPORT_SYMBOL(tape_do_io_interruptible);
EXPORT_SYMBOL(tape_cancel_io);
EXPORT_SYMBOL(tape_mtop);

View File

@ -37,20 +37,19 @@ tape_std_assign_timeout(unsigned long data)
{
struct tape_request * request;
struct tape_device * device;
int rc;
request = (struct tape_request *) data;
if ((device = request->device) == NULL)
BUG();
spin_lock_irq(get_ccwdev_lock(device->cdev));
if (request->callback != NULL) {
DBF_EVENT(3, "%08x: Assignment timeout. Device busy.\n",
DBF_EVENT(3, "%08x: Assignment timeout. Device busy.\n",
device->cdev_id);
PRINT_ERR("%s: Assignment timeout. Device busy.\n",
device->cdev->dev.bus_id);
ccw_device_clear(device->cdev, (long) request);
}
spin_unlock_irq(get_ccwdev_lock(device->cdev));
rc = tape_cancel_io(device, request);
if(rc)
PRINT_ERR("(%s): Assign timeout: Cancel failed with rc = %i\n",
device->cdev->dev.bus_id, rc);
}
int