1/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 *    Eric Anholt <eric@anholt.net>
25 *    Chris Wilson <chris@chris-wilson.co.uuk>
26 *
27 */
28
29#include <drm/drmP.h>
30#include <drm/i915_drm.h>
31
32#include "i915_drv.h"
33#include "intel_drv.h"
34#include "i915_trace.h"
35
36static bool
37mark_free(struct i915_vma *vma, struct list_head *unwind)
38{
39	if (vma->pin_count)
40		return false;
41
42	if (WARN_ON(!list_empty(&vma->exec_list)))
43		return false;
44
45	list_add(&vma->exec_list, unwind);
46	return drm_mm_scan_add_block(&vma->node);
47}
48
49/**
50 * i915_gem_evict_something - Evict vmas to make room for binding a new one
51 * @dev: drm_device
52 * @vm: address space to evict from
53 * @size: size of the desired free space
54 * @alignment: alignment constraint of the desired free space
55 * @cache_level: cache_level for the desired space
56 * @mappable: whether the free space must be mappable
57 * @nonblocking: whether evicting active objects is allowed or not
58 *
59 * This function will try to evict vmas until a free space satisfying the
60 * requirements is found. Callers must check first whether any such hole exists
61 * already before calling this function.
62 *
63 * This function is used by the object/vma binding code.
64 *
65 * To clarify: This is for freeing up virtual address space, not for freeing
66 * memory in e.g. the shrinker.
67 */
68int
69i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
70			 int min_size, unsigned alignment, unsigned cache_level,
71			 unsigned long start, unsigned long end,
72			 unsigned flags)
73{
74	struct list_head eviction_list, unwind_list;
75	struct i915_vma *vma;
76	int ret = 0;
77	int pass = 0;
78
79	trace_i915_gem_evict(dev, min_size, alignment, flags);
80
81	/*
82	 * The goal is to evict objects and amalgamate space in LRU order.
83	 * The oldest idle objects reside on the inactive list, which is in
84	 * retirement order. The next objects to retire are those on the (per
85	 * ring) active list that do not have an outstanding flush. Once the
86	 * hardware reports completion (the seqno is updated after the
87	 * batchbuffer has been finished) the clean buffer objects would
88	 * be retired to the inactive list. Any dirty objects would be added
89	 * to the tail of the flushing list. So after processing the clean
90	 * active objects we need to emit a MI_FLUSH to retire the flushing
91	 * list, hence the retirement order of the flushing list is in
92	 * advance of the dirty objects on the active lists.
93	 *
94	 * The retirement sequence is thus:
95	 *   1. Inactive objects (already retired)
96	 *   2. Clean active objects
97	 *   3. Flushing list
98	 *   4. Dirty active objects.
99	 *
100	 * On each list, the oldest objects lie at the HEAD with the freshest
101	 * object on the TAIL.
102	 */
103
104	INIT_LIST_HEAD(&unwind_list);
105	if (start != 0 || end != vm->total) {
106		drm_mm_init_scan_with_range(&vm->mm, min_size,
107					    alignment, cache_level,
108					    start, end);
109	} else
110		drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
111
112search_again:
113	/* First see if there is a large enough contiguous idle region... */
114	list_for_each_entry(vma, &vm->inactive_list, mm_list) {
115		if (mark_free(vma, &unwind_list))
116			goto found;
117	}
118
119	if (flags & PIN_NONBLOCK)
120		goto none;
121
122	/* Now merge in the soon-to-be-expired objects... */
123	list_for_each_entry(vma, &vm->active_list, mm_list) {
124		if (mark_free(vma, &unwind_list))
125			goto found;
126	}
127
128none:
129	/* Nothing found, clean up and bail out! */
130	while (!list_empty(&unwind_list)) {
131		vma = list_first_entry(&unwind_list,
132				       struct i915_vma,
133				       exec_list);
134		ret = drm_mm_scan_remove_block(&vma->node);
135		BUG_ON(ret);
136
137		list_del_init(&vma->exec_list);
138	}
139
140	/* Can we unpin some objects such as idle hw contents,
141	 * or pending flips?
142	 */
143	if (flags & PIN_NONBLOCK)
144		return -ENOSPC;
145
146	/* Only idle the GPU and repeat the search once */
147	if (pass++ == 0) {
148		ret = i915_gpu_idle(dev);
149		if (ret)
150			return ret;
151
152		i915_gem_retire_requests(dev);
153		goto search_again;
154	}
155
156	/* If we still have pending pageflip completions, drop
157	 * back to userspace to give our workqueues time to
158	 * acquire our locks and unpin the old scanouts.
159	 */
160	return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC;
161
162found:
163	/* drm_mm doesn't allow any other other operations while
164	 * scanning, therefore store to be evicted objects on a
165	 * temporary list. */
166	INIT_LIST_HEAD(&eviction_list);
167	while (!list_empty(&unwind_list)) {
168		vma = list_first_entry(&unwind_list,
169				       struct i915_vma,
170				       exec_list);
171		if (drm_mm_scan_remove_block(&vma->node)) {
172			list_move(&vma->exec_list, &eviction_list);
173			drm_gem_object_reference(&vma->obj->base);
174			continue;
175		}
176		list_del_init(&vma->exec_list);
177	}
178
179	/* Unbinding will emit any required flushes */
180	while (!list_empty(&eviction_list)) {
181		struct drm_gem_object *obj;
182		vma = list_first_entry(&eviction_list,
183				       struct i915_vma,
184				       exec_list);
185
186		obj =  &vma->obj->base;
187		list_del_init(&vma->exec_list);
188		if (ret == 0)
189			ret = i915_vma_unbind(vma);
190
191		drm_gem_object_unreference(obj);
192	}
193
194	return ret;
195}
196
197/**
198 * i915_gem_evict_vm - Evict all idle vmas from a vm
199 *
200 * @vm: Address space to cleanse
201 * @do_idle: Boolean directing whether to idle first.
202 *
203 * This function evicts all idles vmas from a vm. If all unpinned vmas should be
204 * evicted the @do_idle needs to be set to true.
205 *
206 * This is used by the execbuf code as a last-ditch effort to defragment the
207 * address space.
208 *
209 * To clarify: This is for freeing up virtual address space, not for freeing
210 * memory in e.g. the shrinker.
211 */
212int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
213{
214	struct i915_vma *vma, *next;
215	int ret;
216
217	trace_i915_gem_evict_vm(vm);
218
219	if (do_idle) {
220		ret = i915_gpu_idle(vm->dev);
221		if (ret)
222			return ret;
223
224		i915_gem_retire_requests(vm->dev);
225	}
226
227	list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
228		if (vma->pin_count == 0)
229			WARN_ON(i915_vma_unbind(vma));
230
231	return 0;
232}
233
234/**
235 * i915_gem_evict_everything - Try to evict all objects
236 * @dev: Device to evict objects for
237 *
238 * This functions tries to evict all gem objects from all address spaces. Used
239 * by the shrinker as a last-ditch effort and for suspend, before releasing the
240 * backing storage of all unbound objects.
241 */
242int
243i915_gem_evict_everything(struct drm_device *dev)
244{
245	struct drm_i915_private *dev_priv = dev->dev_private;
246	struct i915_address_space *vm, *v;
247	bool lists_empty = true;
248	int ret;
249
250	list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
251		lists_empty = (list_empty(&vm->inactive_list) &&
252			       list_empty(&vm->active_list));
253		if (!lists_empty)
254			lists_empty = false;
255	}
256
257	if (lists_empty)
258		return -ENOSPC;
259
260	trace_i915_gem_evict_everything(dev);
261
262	/* The gpu_idle will flush everything in the write domain to the
263	 * active list. Then we must move everything off the active list
264	 * with retire requests.
265	 */
266	ret = i915_gpu_idle(dev);
267	if (ret)
268		return ret;
269
270	i915_gem_retire_requests(dev);
271
272	/* Having flushed everything, unbind() should never raise an error */
273	list_for_each_entry_safe(vm, v, &dev_priv->vm_list, global_link)
274		WARN_ON(i915_gem_evict_vm(vm, false));
275
276	return 0;
277}
278