From 26f3751eb47c757c656333e74bdceccd8d286d76 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 2 Jul 2010 15:02:11 +0100 Subject: drm: use list_for_each_entry in drm_mm.c Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson Acked-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_mm.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/drm_mm.c') diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 2ac074c8f5d2..b75eb55f6084 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -332,7 +332,6 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size, unsigned alignment, int best_match) { - struct list_head *list; const struct list_head *free_stack = &mm->fl_entry; struct drm_mm_node *entry; struct drm_mm_node *best; @@ -342,8 +341,7 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, best = NULL; best_size = ~0UL; - list_for_each(list, free_stack) { - entry = list_entry(list, struct drm_mm_node, fl_entry); + list_for_each_entry(entry, free_stack, fl_entry) { wasted = 0; if (entry->size < size) @@ -376,7 +374,6 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, unsigned long end, int best_match) { - struct list_head *list; const struct list_head *free_stack = &mm->fl_entry; struct drm_mm_node *entry; struct drm_mm_node *best; @@ -386,8 +383,7 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, best = NULL; best_size = ~0UL; - list_for_each(list, free_stack) { - entry = list_entry(list, struct drm_mm_node, fl_entry); + list_for_each_entry(entry, free_stack, fl_entry) { wasted = 0; if (entry->size < size) -- cgit v1.2.3 From ca31efa89ae16c66966b8d5a5df3ae5cbffa61de Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 2 Jul 2010 15:02:13 +0100 Subject: drm: kill dead code in drm_mm.c Signed-off-by: Daniel Vetter Acked-by: Thomas Hellstrom Signed-off-by: Chris Wilson Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_mm.c | 45 --------------------------------------------- 1 file changed, 45 deletions(-) (limited to 'drivers/gpu/drm/drm_mm.c') diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index b75eb55f6084..a5a7a16c4301 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -48,36 +48,6 @@ #define MM_UNUSED_TARGET 4 -unsigned long drm_mm_tail_space(struct drm_mm *mm) -{ - struct list_head *tail_node; - struct drm_mm_node *entry; - - tail_node = mm->ml_entry.prev; - entry = list_entry(tail_node, struct drm_mm_node, ml_entry); - if (!entry->free) - return 0; - - return entry->size; -} - -int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size) -{ - struct list_head *tail_node; - struct drm_mm_node *entry; - - tail_node = mm->ml_entry.prev; - entry = list_entry(tail_node, struct drm_mm_node, ml_entry); - if (!entry->free) - return -ENOMEM; - - if (entry->size <= size) - return -ENOMEM; - - entry->size -= size; - return 0; -} - static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) { struct drm_mm_node *child; @@ -152,21 +122,6 @@ static int drm_mm_create_tail_node(struct drm_mm *mm, return 0; } -int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic) -{ - struct list_head *tail_node; - struct drm_mm_node *entry; - - tail_node = mm->ml_entry.prev; - entry = list_entry(tail_node, struct drm_mm_node, ml_entry); - if (!entry->free) { - return drm_mm_create_tail_node(mm, entry->start + entry->size, - size, atomic); - } - entry->size += size; - return 0; -} - static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, unsigned long size, int atomic) -- cgit v1.2.3 From d1024ce91ff4c2c4ccbf692d204c71cbf215157a Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 2 Jul 2010 15:02:14 +0100 Subject: drm: sane naming for drm_mm.c Yeah, I've kinda noticed that fl_entry is the free stack. Still give it (and the memory node list ml_entry) decent names. Signed-off-by: Daniel Vetter Acked-by: Thomas Hellstrom Signed-off-by: Chris Wilson Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_mm.c | 72 +++++++++++++++++++++++------------------------- 1 file changed, 35 insertions(+), 37 deletions(-) (limited to 'drivers/gpu/drm/drm_mm.c') diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index a5a7a16c4301..d2267ffd2b7a 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -64,8 +64,8 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) else { child = list_entry(mm->unused_nodes.next, - struct drm_mm_node, fl_entry); - list_del(&child->fl_entry); + struct drm_mm_node, free_stack); + list_del(&child->free_stack); --mm->num_unused; } spin_unlock(&mm->unused_lock); @@ -94,7 +94,7 @@ int drm_mm_pre_get(struct drm_mm *mm) return ret; } ++mm->num_unused; - list_add_tail(&node->fl_entry, &mm->unused_nodes); + list_add_tail(&node->free_stack, &mm->unused_nodes); } spin_unlock(&mm->unused_lock); return 0; @@ -116,8 +116,8 @@ static int drm_mm_create_tail_node(struct drm_mm *mm, child->start = start; child->mm = mm; - list_add_tail(&child->ml_entry, &mm->ml_entry); - list_add_tail(&child->fl_entry, &mm->fl_entry); + list_add_tail(&child->node_list, &mm->node_list); + list_add_tail(&child->free_stack, &mm->free_stack); return 0; } @@ -132,15 +132,15 @@ static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, if (unlikely(child == NULL)) return NULL; - INIT_LIST_HEAD(&child->fl_entry); + INIT_LIST_HEAD(&child->free_stack); child->free = 0; child->size = size; child->start = parent->start; child->mm = parent->mm; - list_add_tail(&child->ml_entry, &parent->ml_entry); - INIT_LIST_HEAD(&child->fl_entry); + list_add_tail(&child->node_list, &parent->node_list); + INIT_LIST_HEAD(&child->free_stack); parent->size -= size; parent->start += size; @@ -168,7 +168,7 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, } if (node->size == size) { - list_del_init(&node->fl_entry); + list_del_init(&node->free_stack); node->free = 0; } else { node = drm_mm_split_at_start(node, size, atomic); @@ -206,7 +206,7 @@ struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node, } if (node->size == size) { - list_del_init(&node->fl_entry); + list_del_init(&node->free_stack); node->free = 0; } else { node = drm_mm_split_at_start(node, size, atomic); @@ -228,8 +228,8 @@ void drm_mm_put_block(struct drm_mm_node *cur) { struct drm_mm *mm = cur->mm; - struct list_head *cur_head = &cur->ml_entry; - struct list_head *root_head = &mm->ml_entry; + struct list_head *cur_head = &cur->node_list; + struct list_head *root_head = &mm->node_list; struct drm_mm_node *prev_node = NULL; struct drm_mm_node *next_node; @@ -237,7 +237,7 @@ void drm_mm_put_block(struct drm_mm_node *cur) if (cur_head->prev != root_head) { prev_node = - list_entry(cur_head->prev, struct drm_mm_node, ml_entry); + list_entry(cur_head->prev, struct drm_mm_node, node_list); if (prev_node->free) { prev_node->size += cur->size; merged = 1; @@ -245,15 +245,15 @@ void drm_mm_put_block(struct drm_mm_node *cur) } if (cur_head->next != root_head) { next_node = - list_entry(cur_head->next, struct drm_mm_node, ml_entry); + list_entry(cur_head->next, struct drm_mm_node, node_list); if (next_node->free) { if (merged) { prev_node->size += next_node->size; - list_del(&next_node->ml_entry); - list_del(&next_node->fl_entry); + list_del(&next_node->node_list); + list_del(&next_node->free_stack); spin_lock(&mm->unused_lock); if (mm->num_unused < MM_UNUSED_TARGET) { - list_add(&next_node->fl_entry, + list_add(&next_node->free_stack, &mm->unused_nodes); ++mm->num_unused; } else @@ -268,12 +268,12 @@ void drm_mm_put_block(struct drm_mm_node *cur) } if (!merged) { cur->free = 1; - list_add(&cur->fl_entry, &mm->fl_entry); + list_add(&cur->free_stack, &mm->free_stack); } else { - list_del(&cur->ml_entry); + list_del(&cur->node_list); spin_lock(&mm->unused_lock); if (mm->num_unused < MM_UNUSED_TARGET) { - list_add(&cur->fl_entry, &mm->unused_nodes); + list_add(&cur->free_stack, &mm->unused_nodes); ++mm->num_unused; } else kfree(cur); @@ -287,7 +287,6 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size, unsigned alignment, int best_match) { - const struct list_head *free_stack = &mm->fl_entry; struct drm_mm_node *entry; struct drm_mm_node *best; unsigned long best_size; @@ -296,7 +295,7 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, best = NULL; best_size = ~0UL; - list_for_each_entry(entry, free_stack, fl_entry) { + list_for_each_entry(entry, &mm->free_stack, free_stack) { wasted = 0; if (entry->size < size) @@ -329,7 +328,6 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, unsigned long end, int best_match) { - const struct list_head *free_stack = &mm->fl_entry; struct drm_mm_node *entry; struct drm_mm_node *best; unsigned long best_size; @@ -338,7 +336,7 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, best = NULL; best_size = ~0UL; - list_for_each_entry(entry, free_stack, fl_entry) { + list_for_each_entry(entry, &mm->free_stack, free_stack) { wasted = 0; if (entry->size < size) @@ -373,7 +371,7 @@ EXPORT_SYMBOL(drm_mm_search_free_in_range); int drm_mm_clean(struct drm_mm * mm) { - struct list_head *head = &mm->ml_entry; + struct list_head *head = &mm->node_list; return (head->next->next == head); } @@ -381,8 +379,8 @@ EXPORT_SYMBOL(drm_mm_clean); int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) { - INIT_LIST_HEAD(&mm->ml_entry); - INIT_LIST_HEAD(&mm->fl_entry); + INIT_LIST_HEAD(&mm->node_list); + INIT_LIST_HEAD(&mm->free_stack); INIT_LIST_HEAD(&mm->unused_nodes); mm->num_unused = 0; spin_lock_init(&mm->unused_lock); @@ -393,25 +391,25 @@ EXPORT_SYMBOL(drm_mm_init); void drm_mm_takedown(struct drm_mm * mm) { - struct list_head *bnode = mm->fl_entry.next; + struct list_head *bnode = mm->free_stack.next; struct drm_mm_node *entry; struct drm_mm_node *next; - entry = list_entry(bnode, struct drm_mm_node, fl_entry); + entry = list_entry(bnode, struct drm_mm_node, free_stack); - if (entry->ml_entry.next != &mm->ml_entry || - entry->fl_entry.next != &mm->fl_entry) { + if (entry->node_list.next != &mm->node_list || + entry->free_stack.next != &mm->free_stack) { DRM_ERROR("Memory manager not clean. Delaying takedown\n"); return; } - list_del(&entry->fl_entry); - list_del(&entry->ml_entry); + list_del(&entry->free_stack); + list_del(&entry->node_list); kfree(entry); spin_lock(&mm->unused_lock); - list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) { - list_del(&entry->fl_entry); + list_for_each_entry_safe(entry, next, &mm->unused_nodes, free_stack) { + list_del(&entry->free_stack); kfree(entry); --mm->num_unused; } @@ -426,7 +424,7 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) struct drm_mm_node *entry; int total_used = 0, total_free = 0, total = 0; - list_for_each_entry(entry, &mm->ml_entry, ml_entry) { + list_for_each_entry(entry, &mm->node_list, node_list) { printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n", prefix, entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used"); @@ -447,7 +445,7 @@ int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) struct drm_mm_node *entry; int total_used = 0, total_free = 0, total = 0; - list_for_each_entry(entry, &mm->ml_entry, ml_entry) { + list_for_each_entry(entry, &mm->node_list, node_list) { seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used"); total += entry->size; if (entry->free) -- cgit v1.2.3 From 7a6b2896f261894dde287d3faefa4b432cddca53 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 2 Jul 2010 15:02:15 +0100 Subject: drm_mm: extract check_free_mm_node There are already two copies of this logic. And the new scanning stuff will add some more. So extract it into a small helper function. Signed-off-by: Daniel Vetter Acked-by: Thomas Hellstrom Signed-off-by: Chris Wilson Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_mm.c | 71 +++++++++++++++++++++++------------------------- 1 file changed, 34 insertions(+), 37 deletions(-) (limited to 'drivers/gpu/drm/drm_mm.c') diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index d2267ffd2b7a..fd86a6c13aac 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -283,6 +283,27 @@ void drm_mm_put_block(struct drm_mm_node *cur) EXPORT_SYMBOL(drm_mm_put_block); +static int check_free_mm_node(struct drm_mm_node *entry, unsigned long size, + unsigned alignment) +{ + unsigned wasted = 0; + + if (entry->size < size) + return 0; + + if (alignment) { + register unsigned tmp = entry->start % alignment; + if (tmp) + wasted = alignment - tmp; + } + + if (entry->size >= size + wasted) { + return 1; + } + + return 0; +} + struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size, unsigned alignment, int best_match) @@ -290,30 +311,20 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, struct drm_mm_node *entry; struct drm_mm_node *best; unsigned long best_size; - unsigned wasted; best = NULL; best_size = ~0UL; list_for_each_entry(entry, &mm->free_stack, free_stack) { - wasted = 0; - - if (entry->size < size) + if (!check_free_mm_node(entry, size, alignment)) continue; - if (alignment) { - register unsigned tmp = entry->start % alignment; - if (tmp) - wasted += alignment - tmp; - } + if (!best_match) + return entry; - if (entry->size >= size + wasted) { - if (!best_match) - return entry; - if (entry->size < best_size) { - best = entry; - best_size = entry->size; - } + if (entry->size < best_size) { + best = entry; + best_size = entry->size; } } @@ -331,37 +342,23 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, struct drm_mm_node *entry; struct drm_mm_node *best; unsigned long best_size; - unsigned wasted; best = NULL; best_size = ~0UL; list_for_each_entry(entry, &mm->free_stack, free_stack) { - wasted = 0; - - if (entry->size < size) + if (entry->start > end || (entry->start+entry->size) < start) continue; - if (entry->start > end || (entry->start+entry->size) < start) + if (!check_free_mm_node(entry, size, alignment)) continue; - if (entry->start < start) - wasted += start - entry->start; + if (!best_match) + return entry; - if (alignment) { - register unsigned tmp = (entry->start + wasted) % alignment; - if (tmp) - wasted += alignment - tmp; - } - - if (entry->size >= size + wasted && - (entry->start + wasted + size) <= end) { - if (!best_match) - return entry; - if (entry->size < best_size) { - best = entry; - best_size = entry->size; - } + if (entry->size < best_size) { + best = entry; + best_size = entry->size; } } -- cgit v1.2.3 From 709ea97145c125b3811ff70429e90ebdb0e832e5 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 2 Jul 2010 15:02:16 +0100 Subject: drm: implement helper functions for scanning lru list These helper functions can be used to efficiently scan lru list for eviction. Eviction becomes a three stage process: 1. Scanning through the lru list until a suitable hole has been found. 2. Scan backwards to restore drm_mm consistency and find out which objects fall into the hole. 3. Evict the objects that fall into the hole. These helper functions don't allocate any memory (at the price of not allowing any other concurrent operations). Hence this can also be used for ttm (which does lru scanning under a spinlock). Evicting objects in this fashion should be more fair than the current approach by i915 (scan the lru for a object large enough to contain the new object). It's also more efficient than the current approach used by ttm (uncoditionally evict objects from the lru until there's enough free space). Signed-Off-by: Daniel Vetter Acked-by: Thomas Hellstrom Signed-off-by: Chris Wilson Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_mm.c | 167 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 163 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/drm_mm.c') diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index fd86a6c13aac..da99edc50888 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -53,9 +53,9 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) struct drm_mm_node *child; if (atomic) - child = kmalloc(sizeof(*child), GFP_ATOMIC); + child = kzalloc(sizeof(*child), GFP_ATOMIC); else - child = kmalloc(sizeof(*child), GFP_KERNEL); + child = kzalloc(sizeof(*child), GFP_KERNEL); if (unlikely(child == NULL)) { spin_lock(&mm->unused_lock); @@ -85,7 +85,7 @@ int drm_mm_pre_get(struct drm_mm *mm) spin_lock(&mm->unused_lock); while (mm->num_unused < MM_UNUSED_TARGET) { spin_unlock(&mm->unused_lock); - node = kmalloc(sizeof(*node), GFP_KERNEL); + node = kzalloc(sizeof(*node), GFP_KERNEL); spin_lock(&mm->unused_lock); if (unlikely(node == NULL)) { @@ -134,7 +134,6 @@ static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, INIT_LIST_HEAD(&child->free_stack); - child->free = 0; child->size = size; child->start = parent->start; child->mm = parent->mm; @@ -235,6 +234,9 @@ void drm_mm_put_block(struct drm_mm_node *cur) int merged = 0; + BUG_ON(cur->scanned_block || cur->scanned_prev_free + || cur->scanned_next_free); + if (cur_head->prev != root_head) { prev_node = list_entry(cur_head->prev, struct drm_mm_node, node_list); @@ -312,6 +314,8 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, struct drm_mm_node *best; unsigned long best_size; + BUG_ON(mm->scanned_blocks); + best = NULL; best_size = ~0UL; @@ -343,6 +347,8 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, struct drm_mm_node *best; unsigned long best_size; + BUG_ON(mm->scanned_blocks); + best = NULL; best_size = ~0UL; @@ -366,6 +372,158 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, } EXPORT_SYMBOL(drm_mm_search_free_in_range); +/** + * Initializa lru scanning. + * + * This simply sets up the scanning routines with the parameters for the desired + * hole. + * + * Warning: As long as the scan list is non-empty, no other operations than + * adding/removing nodes to/from the scan list are allowed. + */ +void drm_mm_init_scan(struct drm_mm *mm, unsigned long size, + unsigned alignment) +{ + mm->scan_alignment = alignment; + mm->scan_size = size; + mm->scanned_blocks = 0; + mm->scan_hit_start = 0; + mm->scan_hit_size = 0; +} +EXPORT_SYMBOL(drm_mm_init_scan); + +/** + * Add a node to the scan list that might be freed to make space for the desired + * hole. + * + * Returns non-zero, if a hole has been found, zero otherwise. + */ +int drm_mm_scan_add_block(struct drm_mm_node *node) +{ + struct drm_mm *mm = node->mm; + struct list_head *prev_free, *next_free; + struct drm_mm_node *prev_node, *next_node; + + mm->scanned_blocks++; + + prev_free = next_free = NULL; + + BUG_ON(node->free); + node->scanned_block = 1; + node->free = 1; + + if (node->node_list.prev != &mm->node_list) { + prev_node = list_entry(node->node_list.prev, struct drm_mm_node, + node_list); + + if (prev_node->free) { + list_del(&prev_node->node_list); + + node->start = prev_node->start; + node->size += prev_node->size; + + prev_node->scanned_prev_free = 1; + + prev_free = &prev_node->free_stack; + } + } + + if (node->node_list.next != &mm->node_list) { + next_node = list_entry(node->node_list.next, struct drm_mm_node, + node_list); + + if (next_node->free) { + list_del(&next_node->node_list); + + node->size += next_node->size; + + next_node->scanned_next_free = 1; + + next_free = &next_node->free_stack; + } + } + + /* The free_stack list is not used for allocated objects, so these two + * pointers can be abused (as long as no allocations in this memory + * manager happens). */ + node->free_stack.prev = prev_free; + node->free_stack.next = next_free; + + if (check_free_mm_node(node, mm->scan_size, mm->scan_alignment)) { + mm->scan_hit_start = node->start; + mm->scan_hit_size = node->size; + + return 1; + } + + return 0; +} +EXPORT_SYMBOL(drm_mm_scan_add_block); + +/** + * Remove a node from the scan list. + * + * Nodes _must_ be removed in the exact same order from the scan list as they + * have been added, otherwise the internal state of the memory manager will be + * corrupted. + * + * When the scan list is empty, the selected memory nodes can be freed. An + * immediatly following drm_mm_search_free with best_match = 0 will then return + * the just freed block (because its at the top of the free_stack list). + * + * Returns one if this block should be evicted, zero otherwise. Will always + * return zero when no hole has been found. + */ +int drm_mm_scan_remove_block(struct drm_mm_node *node) +{ + struct drm_mm *mm = node->mm; + struct drm_mm_node *prev_node, *next_node; + + mm->scanned_blocks--; + + BUG_ON(!node->scanned_block); + node->scanned_block = 0; + node->free = 0; + + prev_node = list_entry(node->free_stack.prev, struct drm_mm_node, + free_stack); + next_node = list_entry(node->free_stack.next, struct drm_mm_node, + free_stack); + + if (prev_node) { + BUG_ON(!prev_node->scanned_prev_free); + prev_node->scanned_prev_free = 0; + + list_add_tail(&prev_node->node_list, &node->node_list); + + node->start = prev_node->start + prev_node->size; + node->size -= prev_node->size; + } + + if (next_node) { + BUG_ON(!next_node->scanned_next_free); + next_node->scanned_next_free = 0; + + list_add(&next_node->node_list, &node->node_list); + + node->size -= next_node->size; + } + + INIT_LIST_HEAD(&node->free_stack); + + /* Only need to check for containement because start&size for the + * complete resulting free block (not just the desired part) is + * stored. */ + if (node->start >= mm->scan_hit_start && + node->start + node->size + <= mm->scan_hit_start + mm->scan_hit_size) { + return 1; + } + + return 0; +} +EXPORT_SYMBOL(drm_mm_scan_remove_block); + int drm_mm_clean(struct drm_mm * mm) { struct list_head *head = &mm->node_list; @@ -380,6 +538,7 @@ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) INIT_LIST_HEAD(&mm->free_stack); INIT_LIST_HEAD(&mm->unused_nodes); mm->num_unused = 0; + mm->scanned_blocks = 0; spin_lock_init(&mm->unused_lock); return drm_mm_create_tail_node(mm, start, size, 0); -- cgit v1.2.3