dect
/
linux-2.6
Archived
13
0
Fork 0

UBI: correct ubi_wl_flush locking

Commit "62f38455 UBI: modify ubi_wl_flush function to clear work queue for a lnum"
takes the 'work_sem' semaphore in write mode for the entire loop, which is not
very good because it will block other workers for potentially long time. We do
not need to have it in write mode - read mode is enough, and we do not need to
hole it over the entire loop. So this patch turns changes the locking: takes
'work_sem' in read mode and pushes it down to the loop.

Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
This commit is contained in:
Artem Bityutskiy 2012-06-07 15:15:30 +03:00
parent 818039c7d5
commit 12027f1b3f
1 changed files with 13 additions and 4 deletions

View File

@ -1262,11 +1262,11 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
vol_id, lnum, ubi->works_count);
down_write(&ubi->work_sem);
while (found) {
struct ubi_work *wrk;
found = 0;
down_read(&ubi->work_sem);
spin_lock(&ubi->wl_lock);
list_for_each_entry(wrk, &ubi->works, list) {
if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
@ -1277,18 +1277,27 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
spin_unlock(&ubi->wl_lock);
err = wrk->func(ubi, wrk, 0);
if (err)
goto out;
if (err) {
up_read(&ubi->work_sem);
return err;
}
spin_lock(&ubi->wl_lock);
found = 1;
break;
}
}
spin_unlock(&ubi->wl_lock);
up_read(&ubi->work_sem);
}
out:
/*
* Make sure all the works which have been done in parallel are
* finished.
*/
down_write(&ubi->work_sem);
up_write(&ubi->work_sem);
return err;
}