summaryrefslogtreecommitdiff
path: root/drivers/mtd
diff options
context:
space:
mode:
authorRichard Weinberger <richard@nod.at>2014-11-07 23:40:27 +0100
committerRichard Weinberger <richard@nod.at>2015-03-26 22:17:46 +0100
commit691a870563b4fe9e014a309477fb8f9fe41b36bb (patch)
tree39643a305695553da0c2efd655c980282fdb853f /drivers/mtd
parentd59f21bebe9d0fda34027ff1afda4f2b0d5f1869 (diff)
UBI: Split __wl_get_peb()
Make it two functions, wl_get_wle() and wl_get_peb(). wl_get_peb() works exactly like __wl_get_peb() but wl_get_wle() does not call produce_free_peb(). While refilling the fastmap user pool we cannot release ubi->wl_lock as produce_free_peb() does. Hence the fastmap logic uses now wl_get_wle(). Signed-off-by: Richard Weinberger <richard@nod.at>
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/ubi/wl.c61
1 files changed, 38 insertions, 23 deletions
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 2539a12140e7..9f57d6b7579b 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -496,13 +496,46 @@ out:
#endif
/**
- * __wl_get_peb - get a physical eraseblock.
+ * wl_get_wle - get a mean wl entry to be used by wl_get_peb() or
+ * refill_wl_user_pool().
+ * @ubi: UBI device description object
+ *
+ * This function returns a a wear leveling entry in case of success and
+ * NULL in case of failure.
+ */
+static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
+{
+ struct ubi_wl_entry *e;
+
+ e = find_mean_wl_entry(ubi, &ubi->free);
+ if (!e) {
+ ubi_err(ubi, "no free eraseblocks");
+ return NULL;
+ }
+
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+
+ /*
+ * Move the physical eraseblock to the protection queue where it will
+ * be protected from being moved for some time.
+ */
+ rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
+ dbg_wl("PEB %d EC %d", e->pnum, e->ec);
+
+ return e;
+}
+
+/**
+ * wl_get_peb - get a physical eraseblock.
* @ubi: UBI device description object
*
* This function returns a physical eraseblock in case of success and a
* negative error code in case of failure.
+ * It is the low level component of ubi_wl_get_peb() in the non-fastmap
+ * case.
*/
-static int __wl_get_peb(struct ubi_device *ubi)
+static int wl_get_peb(struct ubi_device *ubi)
{
int err;
struct ubi_wl_entry *e;
@@ -521,27 +554,9 @@ retry:
goto retry;
}
- e = find_mean_wl_entry(ubi, &ubi->free);
- if (!e) {
- ubi_err(ubi, "no free eraseblocks");
- return -ENOSPC;
- }
-
- self_check_in_wl_tree(ubi, e, &ubi->free);
-
- /*
- * Move the physical eraseblock to the protection queue where it will
- * be protected from being moved for some time.
- */
- rb_erase(&e->u.rb, &ubi->free);
- ubi->free_count--;
- dbg_wl("PEB %d EC %d", e->pnum, e->ec);
-#ifndef CONFIG_MTD_UBI_FASTMAP
- /* We have to enqueue e only if fastmap is disabled,
- * is fastmap enabled prot_queue_add() will be called by
- * ubi_wl_get_peb() after removing e from the pool. */
+ e = wl_get_wle(ubi);
prot_queue_add(ubi, e);
-#endif
+
return e->pnum;
}
@@ -701,7 +716,7 @@ int ubi_wl_get_peb(struct ubi_device *ubi)
int peb, err;
spin_lock(&ubi->wl_lock);
- peb = __wl_get_peb(ubi);
+ peb = wl_get_peb(ubi);
spin_unlock(&ubi->wl_lock);
if (peb < 0)