summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-06-10 13:24:40 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-10 13:24:40 -0700
commit0e083da7c8a26bc2674d7155bb5a0676b7dbc8ba (patch)
tree747e093807e77168a35aae565390a37eac1a6b8d
parent6f51ab9440d131ae424cce27e3170746219f5142 (diff)
parent4b68bf9a69d22dd512d61d5f0ba01b065b01ede6 (diff)
Merge tag 'for-linus-5.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs
Pull UBI update from Richard Weinberger: "This contains a single change for UBI: - Select fastmap anchor PEBs considering wear level rules" * tag 'for-linus-5.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs: ubi: Select fastmap anchor PEBs considering wear level rules
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c39
-rw-r--r--drivers/mtd/ubi/fastmap.c11
-rw-r--r--drivers/mtd/ubi/ubi.h4
-rw-r--r--drivers/mtd/ubi/wl.c28
4 files changed, 57 insertions, 25 deletions
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index b486250923c5..83afc00e365a 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -116,6 +116,21 @@ void ubi_refill_pools(struct ubi_device *ubi)
wl_pool->size = 0;
pool->size = 0;
+ if (ubi->fm_anchor) {
+ wl_tree_add(ubi->fm_anchor, &ubi->free);
+ ubi->free_count++;
+ }
+ if (ubi->fm_next_anchor) {
+ wl_tree_add(ubi->fm_next_anchor, &ubi->free);
+ ubi->free_count++;
+ }
+
+ /* All available PEBs are in ubi->free, now is the time to get
+ * the best anchor PEBs.
+ */
+ ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
+ ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
+
for (;;) {
enough = 0;
if (pool->size < pool->max_size) {
@@ -271,26 +286,20 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
{
struct ubi_work *wrk;
- struct ubi_wl_entry *anchor;
spin_lock(&ubi->wl_lock);
- /* Do we already have an anchor? */
- if (ubi->fm_anchor) {
- spin_unlock(&ubi->wl_lock);
- return 0;
- }
-
- /* See if we can find an anchor PEB on the list of free PEBs */
- anchor = ubi_wl_get_fm_peb(ubi, 1);
- if (anchor) {
- ubi->fm_anchor = anchor;
- spin_unlock(&ubi->wl_lock);
- return 0;
+ /* Do we have a next anchor? */
+ if (!ubi->fm_next_anchor) {
+ ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
+ if (!ubi->fm_next_anchor)
+ /* Tell wear leveling to produce a new anchor PEB */
+ ubi->fm_do_produce_anchor = 1;
}
- /* No luck, trigger wear leveling to produce a new anchor PEB */
- ubi->fm_do_produce_anchor = 1;
+ /* Do wear leveling to get a new anchor PEB or check the
+ * existing next anchor candidate.
+ */
if (ubi->wl_scheduled) {
spin_unlock(&ubi->wl_lock);
return 0;
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 53f448e7433a..022af59906aa 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -1220,6 +1220,17 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
fm_pos += sizeof(*fec);
ubi_assert(fm_pos <= ubi->fm_size);
}
+ if (ubi->fm_next_anchor) {
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(ubi->fm_next_anchor->pnum);
+ set_seen(ubi, ubi->fm_next_anchor->pnum, seen_pebs);
+ fec->ec = cpu_to_be32(ubi->fm_next_anchor->ec);
+
+ free_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
fmh->free_peb_count = cpu_to_be32(free_peb_count);
ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index da0bee13fe7f..c2da77163f94 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -491,7 +491,8 @@ struct ubi_debug_info {
* @fm_work: fastmap work queue
* @fm_work_scheduled: non-zero if fastmap work was scheduled
* @fast_attach: non-zero if UBI was attached by fastmap
- * @fm_anchor: The next anchor PEB to use for fastmap
+ * @fm_anchor: The new anchor PEB used during fastmap update
+ * @fm_next_anchor: An anchor PEB candidate for the next time fastmap is updated
* @fm_do_produce_anchor: If true produce an anchor PEB in wl
*
* @used: RB-tree of used physical eraseblocks
@@ -602,6 +603,7 @@ struct ubi_device {
int fm_work_scheduled;
int fast_attach;
struct ubi_wl_entry *fm_anchor;
+ struct ubi_wl_entry *fm_next_anchor;
int fm_do_produce_anchor;
/* Wear-leveling sub-system's stuff */
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 5146cce5fe32..27636063ed1b 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -687,20 +687,27 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
}
#ifdef CONFIG_MTD_UBI_FASTMAP
+ e1 = find_anchor_wl_entry(&ubi->used);
+ if (e1 && ubi->fm_next_anchor &&
+ (ubi->fm_next_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
+ ubi->fm_do_produce_anchor = 1;
+ /* fm_next_anchor is no longer considered a good anchor
+ * candidate.
+ * NULL assignment also prevents multiple wear level checks
+ * of this PEB.
+ */
+ wl_tree_add(ubi->fm_next_anchor, &ubi->free);
+ ubi->fm_next_anchor = NULL;
+ ubi->free_count++;
+ }
+
if (ubi->fm_do_produce_anchor) {
- e1 = find_anchor_wl_entry(&ubi->used);
if (!e1)
goto out_cancel;
e2 = get_peb_for_wl(ubi);
if (!e2)
goto out_cancel;
- /*
- * Anchor move within the anchor area is useless.
- */
- if (e2->pnum < UBI_FM_MAX_START)
- goto out_cancel;
-
self_check_in_wl_tree(ubi, e1, &ubi->used);
rb_erase(&e1->u.rb, &ubi->used);
dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
@@ -1079,8 +1086,11 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
if (!err) {
spin_lock(&ubi->wl_lock);
- if (!ubi->fm_anchor && e->pnum < UBI_FM_MAX_START) {
- ubi->fm_anchor = e;
+ if (!ubi->fm_next_anchor && e->pnum < UBI_FM_MAX_START) {
+ /* Abort anchor production, if needed it will be
+ * enabled again in the wear leveling started below.
+ */
+ ubi->fm_next_anchor = e;
ubi->fm_do_produce_anchor = 0;
} else {
wl_tree_add(e, &ubi->free);