98N/A/*
98N/A * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
98N/A */
98N/A
606N/A/*
761N/A * Copyright (c) 2008-2010, 2013 Intel Corporation
98N/A *
98N/A * Permission is hereby granted, free of charge, to any person obtaining a
98N/A * copy of this software and associated documentation files (the "Software"),
98N/A * to deal in the Software without restriction, including without limitation
98N/A * the rights to use, copy, modify, merge, publish, distribute, sublicense,
98N/A * and/or sell copies of the Software, and to permit persons to whom the
98N/A * Software is furnished to do so, subject to the following conditions:
98N/A *
98N/A * The above copyright notice and this permission notice (including the next
98N/A * paragraph) shall be included in all copies or substantial portions of the
98N/A * Software.
98N/A *
98N/A * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
98N/A * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
98N/A * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
98N/A * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
98N/A * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
98N/A * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
98N/A * IN THE SOFTWARE.
98N/A *
98N/A * Authors:
98N/A * Eric Anholt <eric@anholt.net>
98N/A * Chris Wilson <chris@chris-wilson.co.uuk>
98N/A *
98N/A */
98N/A
810N/A#include "drmP.h"
98N/A#include "drm.h"
98N/A#include "i915_drv.h"
98N/A#include "i915_drm.h"
493N/A
98N/Astatic bool
98N/Amark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
761N/A{
98N/A if (obj->pin_count)
98N/A return false;
371N/A
98N/A list_add(&obj->exec_list, unwind, (caddr_t)obj);
98N/A return drm_mm_scan_add_block(obj->gtt_space);
98N/A}
98N/A
98N/Aint
559N/Ai915_gem_evict_something(struct drm_device *dev, int min_size,
98N/A unsigned alignment, unsigned cache_level,
493N/A bool mappable, bool nonblocking)
559N/A{
98N/A drm_i915_private_t *dev_priv = dev->dev_private;
98N/A struct list_head eviction_list, unwind_list;
493N/A struct drm_i915_gem_object *obj;
98N/A int ret = 0;
98N/A
606N/A
98N/A /*
98N/A * The goal is to evict objects and amalgamate space in LRU order.
761N/A * The oldest idle objects reside on the inactive list, which is in
559N/A * retirement order. The next objects to retire are those on the (per
559N/A * ring) active list that do not have an outstanding flush. Once the
761N/A * hardware reports completion (the seqno is updated after the
761N/A * batchbuffer has been finished) the clean buffer objects would
98N/A * be retired to the inactive list. Any dirty objects would be added
98N/A * to the tail of the flushing list. So after processing the clean
810N/A * active objects we need to emit a MI_FLUSH to retire the flushing
810N/A * list, hence the retirement order of the flushing list is in
761N/A * advance of the dirty objects on the active lists.
*
* The retirement sequence is thus:
* 1. Inactive objects (already retired)
* 2. Clean active objects
* 3. Flushing list
* 4. Dirty active objects.
*
* On each list, the oldest objects lie at the HEAD with the freshest
* object on the TAIL.
*/
INIT_LIST_HEAD(&unwind_list);
if (mappable)
drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
min_size, alignment, cache_level,
0, dev_priv->gtt.mappable_end);
else
drm_mm_init_scan(&dev_priv->mm.gtt_space,
min_size, alignment, cache_level);
/* First see if there is a large enough contiguous idle region... */
list_for_each_entry(obj, struct drm_i915_gem_object, &dev_priv->mm.inactive_list, mm_list) {
if (mark_free(obj, &unwind_list))
goto found;
}
if (nonblocking)
goto none;
/* Now merge in the soon-to-be-expired objects... */
list_for_each_entry(obj, struct drm_i915_gem_object, &dev_priv->mm.active_list, mm_list) {
if (mark_free(obj, &unwind_list))
goto found;
}
none:
/* Nothing found, clean up and bail out! */
while (!list_empty(&unwind_list)) {
obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
exec_list);
ret = drm_mm_scan_remove_block(obj->gtt_space);
BUG_ON(ret);
list_del_init(&obj->exec_list);
}
/* We expect the caller to unpin, evict all and try again, or give up.
* So calling i915_gem_evict_everything() is unnecessary.
*/
return -ENOSPC;
found:
/* drm_mm doesn't allow any other other operations while
* scanning, therefore store to be evicted objects on a
* temporary list. */
INIT_LIST_HEAD(&eviction_list);
while (!list_empty(&unwind_list)) {
obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
exec_list);
if (drm_mm_scan_remove_block(obj->gtt_space)) {
list_move(&obj->exec_list, &eviction_list, (caddr_t)obj);
drm_gem_object_reference(&obj->base);
continue;
}
list_del_init(&obj->exec_list);
}
/* Unbinding will emit any required flushes */
while (!list_empty(&eviction_list)) {
obj = list_first_entry(&eviction_list,
struct drm_i915_gem_object,
exec_list);
if (ret == 0)
ret = i915_gem_object_unbind(obj, 1);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
}
return ret;
}
int
i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj, *next;
bool lists_empty;
int ret;
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.active_list));
if (lists_empty)
return -ENOSPC;
/* The gpu_idle will flush everything in the write domain to the
* active list. Then we must move everything off the active list
* with retire requests.
*/
ret = i915_gpu_idle(dev);
if (ret)
return ret;
i915_gem_retire_requests(dev);
/* Having flushed everything, unbind() should never raise an error */
list_for_each_entry_safe(obj, next, struct drm_i915_gem_object,
&dev_priv->mm.inactive_list, mm_list)
/* LINTED */
if (obj->pin_count == 0)
WARN_ON(i915_gem_object_unbind(obj, true));
return 0;
}