Lines Matching refs:obj

42 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
43 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
44 static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
49 struct drm_i915_gem_object *obj,
53 struct drm_i915_gem_object *obj);
54 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
58 int i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
60 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
62 if (obj->tiling_mode)
63 i915_gem_release_mmap(obj);
68 obj->fence_dirty = false;
69 obj->fence_reg = I915_FENCE_REG_NONE;
128 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
130 return obj->gtt_space && !obj->active;
166 struct drm_i915_gem_object *obj;
171 list_for_each_entry(obj, struct drm_i915_gem_object, &dev_priv->mm.bound_list, global_list)
172 if (obj->pin_count)
173 pinned += obj->gtt_space->size;
187 void i915_gem_object_free(struct drm_i915_gem_object *obj)
197 struct drm_i915_gem_object *obj;
206 obj = i915_gem_alloc_object(dev, size);
207 if (obj == NULL)
210 ret = drm_gem_handle_create(file, &obj->base, &handle);
212 drm_gem_object_release(&obj->base);
213 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
214 i915_gem_object_free(obj);
219 drm_gem_object_unreference(&obj->base);
307 struct drm_i915_gem_object *obj,
324 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
326 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
331 if (obj->cache_level == I915_CACHE_NONE)
333 if (obj->gtt_space) {
334 ret = i915_gem_object_set_to_gtt_domain(obj, false);
340 ret = i915_gem_object_get_pages(obj);
344 i915_gem_object_pin_pages(obj);
351 i915_gem_clflush_object(obj);
374 slow_shmem_bit17_copy(obj->page_list[shmem_page_index],
387 obj->base.kaddr + args->offset,
392 i915_gem_object_unpin_pages(obj);
406 struct drm_i915_gem_object *obj;
416 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
417 if (&obj->base == NULL) {
423 if (args->offset > obj->base.size ||
424 args->size > obj->base.size - args->offset) {
432 ret = i915_gem_shmem_pread(dev, obj, args, file);
434 TRACE_GEM_OBJ_HISTORY(obj, "pread");
437 drm_gem_object_unreference(&obj->base);
445 struct drm_i915_gem_object *obj,
452 ret = i915_gem_object_pin(obj, 0, true, true);
456 ret = i915_gem_object_set_to_gtt_domain(obj, true);
460 ret = i915_gem_object_put_fence(obj);
465 ret = DRM_COPY_FROM_USER(obj->base.kaddr + args->offset, user_data, args->size);
472 i915_gem_object_unpin(obj);
479 struct drm_i915_gem_object *obj,
504 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
506 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
511 if (obj->cache_level == I915_CACHE_NONE)
513 if (obj->gtt_space) {
514 ret = i915_gem_object_set_to_gtt_domain(obj, true);
521 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
522 && obj->cache_level == I915_CACHE_NONE)
525 ret = i915_gem_object_get_pages(obj);
529 i915_gem_object_pin_pages(obj);
532 i915_gem_clflush_object(obj);
535 obj->dirty = 1;
558 slow_shmem_bit17_copy(obj->page_list[shmem_page_index],
570 ret = DRM_COPY_FROM_USER(obj->base.kaddr + args->offset,
577 i915_gem_object_unpin_pages(obj);
594 struct drm_i915_gem_object *obj;
604 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
605 if (&obj->base == NULL) {
611 if (args->offset > obj->base.size ||
612 args->size > obj->base.size - args->offset) {
620 TRACE_GEM_OBJ_HISTORY(obj, "pwrite");
628 if (obj->phys_obj) {
629 ret = i915_gem_phys_pwrite(dev, obj, args, file);
633 if (obj->cache_level == I915_CACHE_NONE &&
634 obj->tiling_mode == I915_TILING_NONE &&
635 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
636 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
642 if (obj->is_cursor)
643 i915_gem_clflush_object(obj);
647 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
650 drm_gem_object_unreference(&obj->base);
822 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
834 obj->last_write_seqno = 0;
835 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
845 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
848 struct intel_ring_buffer *ring = obj->ring;
852 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
860 return i915_gem_object_wait_rendering__tail(obj, ring);
867 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
870 struct drm_device *dev = obj->base.dev;
872 struct intel_ring_buffer *ring = obj->ring;
880 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
899 return i915_gem_object_wait_rendering__tail(obj, ring);
911 struct drm_i915_gem_object *obj;
933 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
934 if (&obj->base == NULL) {
943 ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
948 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
957 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
961 drm_gem_object_unreference(&obj->base);
975 struct drm_i915_gem_object *obj;
982 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
983 if (&obj->base == NULL) {
989 if (obj->pin_count)
990 i915_gem_object_flush_cpu_write_domain(obj);
992 drm_gem_object_unreference(&obj->base);
1011 struct drm_gem_object *obj;
1018 obj = drm_gem_object_lookup(dev, file, args->handle);
1019 if (obj == NULL)
1026 if (obj->size > dev_priv->gtt.mappable_end) {
1027 drm_gem_object_unreference_unlocked(obj);
1031 ret = ddi_devmap_segmap(dev_id, (off_t)obj->maplist.user_token,
1032 ttoproc(curthread)->p_as, &vvaddr, obj->maplist.map->size,
1038 drm_gem_object_unreference(obj);
1047 i915_gem_fault(struct drm_gem_object *obj)
1049 struct drm_device *dev = obj->dev;
1050 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1054 if (obj->maplist.map->gtt_mmap)
1085 drm_gem_mmap(obj, start);
1087 obj->maplist.map->gtt_mmap = 1;
1097 * @obj: obj in question
1104 * This routine allocates and attaches a fake offset for @obj.
1107 i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj)
1109 struct ddi_umem_cookie *umem_cookie = obj->base.maplist.map->umem_cookie;
1112 if (obj->base.gtt_map_kaddr == NULL) {
1113 ret = drm_gem_create_mmap_offset(&obj->base);
1120 umem_cookie->cvaddr = obj->base.gtt_map_kaddr;
1125 obj->mmap_offset = obj->base.maplist.user_token;
1126 obj->base.maplist.map->callback = 1;
1133 * @obj: obj in question
1146 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1148 struct drm_device *dev = obj->base.dev;
1151 if (obj->base.maplist.map->gtt_mmap) {
1153 if (!list_empty(&obj->base.seg_list)) {
1154 list_for_each_entry_safe(entry, temp, struct gem_map_list, &obj->base.seg_list, head) {
1161 drm_gem_release_mmap(&obj->base);
1162 obj->base.maplist.map->gtt_mmap = 0;
1167 i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
1169 drm_gem_free_mmap_offset(&obj->base);
1170 obj->mmap_offset = 0;
1196 * @obj: object to check
1228 struct drm_i915_gem_object *obj;
1235 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1236 if (&obj->base == NULL) {
1241 if (obj->base.size > dev_priv->gtt.mappable_end) {
1246 if (!obj->mmap_offset) {
1247 ret = i915_gem_create_mmap_offset(obj);
1252 *offset = obj->mmap_offset;
1255 drm_gem_object_unreference(&obj->base);
1286 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1289 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1295 i915_gem_clflush_object(obj);
1296 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1298 if (i915_gem_object_needs_bit17_swizzle(obj))
1299 i915_gem_object_save_bit_17_swizzle(obj);
1301 obj->dirty = 0;
1303 kmem_free(obj->page_list,
1304 btop(obj->base.size) * sizeof(caddr_t));
1305 obj->page_list = NULL;
1309 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1311 const struct drm_i915_gem_object_ops *ops = obj->ops;
1313 if (obj->page_list == NULL)
1316 BUG_ON(obj->gtt_space);
1318 if (obj->pages_pin_count)
1321 ops->put_pages(obj);
1322 obj->page_list = NULL;
1324 list_del(&obj->global_list);
1329 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1331 pgcnt_t np = btop(obj->base.size);
1335 obj->page_list = kmem_zalloc(np * sizeof(caddr_t), KM_SLEEP);
1336 if (obj->page_list == NULL) {
1341 for (i = 0, va = obj->base.kaddr; i < np; i++, va += PAGESIZE) {
1342 obj->page_list[i] = va;
1345 if (i915_gem_object_needs_bit17_swizzle(obj))
1346 i915_gem_object_do_bit_17_swizzle(obj);
1359 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1361 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1362 const struct drm_i915_gem_object_ops *ops = obj->ops;
1365 if (obj->page_list)
1368 BUG_ON(obj->pages_pin_count);
1370 ret = ops->get_pages(obj);
1374 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list, (caddr_t)obj);
1379 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1382 struct drm_device *dev = obj->base.dev;
1387 if (obj->ring != ring && obj->last_write_seqno) {
1389 obj->last_write_seqno = seqno;
1391 obj->ring = ring;
1394 if (!obj->active) {
1395 drm_gem_object_reference(&obj->base);
1396 obj->active = 1;
1400 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list, (caddr_t)obj);
1401 list_move_tail(&obj->ring_list, &ring->active_list, (caddr_t)obj);
1402 obj->last_read_seqno = seqno;
1404 if (obj->fenced_gpu_access) {
1405 obj->last_fenced_seqno = seqno;
1408 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1411 reg = &dev_priv->fence_regs[obj->fence_reg];
1415 TRACE_GEM_OBJ_HISTORY(obj, "to active");
1419 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1421 struct drm_device *dev = obj->base.dev;
1424 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
1425 BUG_ON(!obj->active);
1427 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list, (caddr_t)obj);
1429 list_del_init(&obj->ring_list);
1430 obj->ring = NULL;
1432 obj->last_read_seqno = 0;
1433 obj->last_write_seqno = 0;
1434 obj->base.write_domain = 0;
1436 obj->last_fenced_seqno = 0;
1437 obj->fenced_gpu_access = false;
1439 obj->active = 0;
1440 TRACE_GEM_OBJ_HISTORY(obj, "to inactive");
1441 drm_gem_object_unreference(&obj->base);
1518 struct drm_i915_gem_object *obj,
1562 request->batch_obj = obj;
1627 static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
1629 if (acthd >= obj->gtt_offset &&
1630 acthd < obj->gtt_offset + obj->base.size)
1749 struct drm_i915_gem_object *obj;
1751 obj = list_first_entry(&ring->active_list,
1755 i915_gem_object_move_to_inactive(obj);
1771 if (reg->obj) {
1772 i915_gem_object_update_fence(reg->obj, reg,
1773 reg->obj->tiling_mode);
1783 struct drm_i915_gem_object *obj;
1793 list_for_each_entry(obj, struct drm_i915_gem_object,
1797 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1842 struct drm_i915_gem_object *obj;
1844 obj = list_first_entry(&ring->active_list,
1848 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
1851 i915_gem_object_move_to_inactive(obj);
1927 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
1931 if (obj->active) {
1932 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
1936 i915_gem_retire_requests_ring(obj->ring);
1969 struct drm_i915_gem_object *obj;
1984 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
1985 if (&obj->base == NULL) {
1991 ret = i915_gem_object_flush_active(obj);
1995 if (obj->active) {
1996 seqno = obj->last_read_seqno;
1997 ring = obj->ring;
2011 drm_gem_object_unreference(&obj->base);
2022 drm_gem_object_unreference(&obj->base);
2030 * @obj: object which may be in use on another ring.
2040 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2043 struct intel_ring_buffer *from = obj->ring;
2050 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2051 return i915_gem_object_wait_rendering(obj, false);
2055 seqno = obj->last_read_seqno;
2059 ret = i915_gem_check_olr(obj->ring, seqno);
2069 from->sync_seqno[idx] = obj->last_read_seqno;
2074 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2078 i915_gem_release_mmap(obj);
2080 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2084 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2085 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2092 i915_gem_object_unbind(struct drm_i915_gem_object *obj, uint32_t type)
2094 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2097 if (obj->gtt_space == NULL)
2100 if (obj->pin_count)
2103 BUG_ON(obj->page_list == NULL);
2105 ret = i915_gem_object_finish_gpu(obj);
2113 i915_gem_object_finish_gtt(obj);
2116 ret = i915_gem_object_put_fence(obj);
2120 if (obj->has_global_gtt_mapping)
2121 i915_gem_gtt_unbind_object(obj, type);
2122 if (obj->has_aliasing_ppgtt_mapping) {
2123 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2124 obj->has_aliasing_ppgtt_mapping = 0;
2126 i915_gem_gtt_finish_object(obj);
2127 i915_gem_object_unpin_pages(obj);
2129 list_del(&obj->mm_list);
2130 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list, (caddr_t)obj);
2132 obj->map_and_fenceable = true;
2134 drm_mm_put_block(obj->gtt_space);
2135 obj->gtt_space = NULL;
2136 obj->gtt_offset = 0;
2137 TRACE_GEM_OBJ_HISTORY(obj, "unbind");
2163 struct drm_i915_gem_object *obj)
2191 if (obj) {
2192 u32 size = obj->gtt_space->size;
2195 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2197 val |= obj->gtt_offset & 0xfffff000;
2198 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
2199 if (obj->tiling_mode == I915_TILING_Y)
2215 struct drm_i915_gem_object *obj)
2220 if (obj) {
2221 u32 size = obj->gtt_space->size;
2225 if((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2227 (obj->gtt_offset & (size - 1)))
2229 obj->gtt_offset, obj->map_and_fenceable, size);
2231 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2237 pitch_val = obj->stride / tile_width;
2240 val = obj->gtt_offset;
2241 if (obj->tiling_mode == I915_TILING_Y)
2259 struct drm_i915_gem_object *obj)
2264 if (obj) {
2265 u32 size = obj->gtt_space->size;
2268 if((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2270 (obj->gtt_offset & (size - 1)))
2272 obj->gtt_offset, size);
2274 pitch_val = obj->stride / 128;
2277 val = obj->gtt_offset;
2278 if (obj->tiling_mode == I915_TILING_Y)
2290 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2292 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2296 struct drm_i915_gem_object *obj)
2303 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2306 if(obj && (!obj->stride || !obj->tiling_mode))
2308 obj->stride, obj->tiling_mode);
2314 case 4: i965_write_fence_reg(dev, reg, obj); break;
2315 case 3: i915_write_fence_reg(dev, reg, obj); break;
2316 case 2: i830_write_fence_reg(dev, reg, obj); break;
2323 if (i915_gem_object_needs_mb(obj))
2333 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2337 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2340 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
2343 obj->fence_reg = reg;
2344 fence->obj = obj;
2347 obj->fence_reg = I915_FENCE_REG_NONE;
2348 fence->obj = NULL;
2351 obj->fence_dirty = false;
2355 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
2357 if (obj->last_fenced_seqno) {
2358 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
2362 obj->last_fenced_seqno = 0;
2365 obj->fenced_gpu_access = false;
2370 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2372 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2376 ret = i915_gem_object_wait_fence(obj);
2380 if (obj->fence_reg == I915_FENCE_REG_NONE)
2383 fence = &dev_priv->fence_regs[obj->fence_reg];
2385 i915_gem_object_fence_lost(obj);
2386 i915_gem_object_update_fence(obj, fence, false);
2402 if (!reg->obj)
2425 * @obj: object to map through a fence reg
2429 * This function walks the fence regs looking for a free one for @obj,
2438 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2440 struct drm_device *dev = obj->base.dev;
2442 bool enable = obj->tiling_mode != I915_TILING_NONE;
2449 if (obj->fence_dirty) {
2450 ret = i915_gem_object_wait_fence(obj);
2456 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2457 reg = &dev_priv->fence_regs[obj->fence_reg];
2458 if (!obj->fence_dirty) {
2468 if (reg->obj) {
2469 struct drm_i915_gem_object *old = reg->obj;
2480 i915_gem_object_update_fence(obj, reg, enable);
2519 struct drm_i915_gem_object *obj;
2522 list_for_each_entry(obj, struct drm_i915_gem_object, &dev_priv->mm.gtt_list, global_list) {
2523 if (obj->gtt_space == NULL) {
2529 if (obj->cache_level != obj->gtt_space->color) {
2531 obj->gtt_space->start,
2532 obj->gtt_space->start + obj->gtt_space->size,
2533 obj->cache_level,
2534 obj->gtt_space->color);
2540 obj->gtt_space,
2541 obj->cache_level)) {
2543 obj->gtt_space->start,
2544 obj->gtt_space->start + obj->gtt_space->size,
2545 obj->cache_level);
2559 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2564 struct drm_device *dev = obj->base.dev;
2574 obj->base.size,
2575 obj->tiling_mode);
2577 obj->base.size,
2578 obj->tiling_mode, true);
2581 obj->base.size,
2582 obj->tiling_mode, false);
2592 size = map_and_fenceable ? fence_size : obj->base.size;
2597 if (obj->base.size > gtt_max) {
2599 obj->base.size,
2605 ret = i915_gem_object_get_pages(obj);
2609 i915_gem_object_pin_pages(obj);
2613 i915_gem_object_unpin_pages(obj);
2620 obj->cache_level, 0, gtt_max);
2623 obj->cache_level,
2629 i915_gem_object_unpin_pages(obj);
2635 obj->cache_level))) {
2636 i915_gem_object_unpin_pages(obj);
2641 ret = i915_gem_gtt_prepare_object(obj);
2643 i915_gem_object_unpin_pages(obj);
2648 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list, (caddr_t)obj);
2649 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list, (caddr_t)obj);
2651 obj->gtt_space = node;
2652 obj->gtt_offset = node->start;
2659 obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
2661 obj->map_and_fenceable = mappable && fenceable;
2663 TRACE_GEM_OBJ_HISTORY(obj, "bind gtt");
2669 i915_gem_clflush_object(struct drm_i915_gem_object *obj)
2675 if (obj->page_list == NULL)
2682 if (obj->stolen)
2693 if (obj->cache_level != I915_CACHE_NONE)
2696 drm_clflush_pages(obj->page_list, obj->base.size / PAGE_SIZE);
2697 TRACE_GEM_OBJ_HISTORY(obj, "clflush");
2702 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
2704 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
2717 obj->base.write_domain = 0;
2722 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
2724 struct drm_device *dev = obj->base.dev;
2726 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
2729 i915_gem_clflush_object(obj);
2731 obj->base.write_domain = 0;
2741 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, int write)
2743 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2747 if (obj->gtt_space == NULL)
2750 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
2753 ret = i915_gem_object_wait_rendering(obj, !write);
2757 i915_gem_object_flush_cpu_write_domain(obj);
2763 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2770 // BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2771 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2773 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2774 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
2775 obj->dirty = 1;
2779 if (i915_gem_object_is_inactive(obj))
2780 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list, (caddr_t)obj);
2785 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2788 struct drm_device *dev = obj->base.dev;
2792 if (obj->cache_level == cache_level)
2795 if (obj->pin_count) {
2800 if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
2801 ret = i915_gem_object_unbind(obj, true);
2806 if (obj->gtt_space) {
2807 ret = i915_gem_object_finish_gpu(obj);
2811 i915_gem_object_finish_gtt(obj);
2818 ret = i915_gem_object_put_fence(obj);
2823 if (obj->has_global_gtt_mapping)
2824 i915_gem_gtt_bind_object(obj, cache_level);
2825 if (obj->has_aliasing_ppgtt_mapping)
2827 obj, cache_level);
2829 obj->gtt_space->color = cache_level;
2836 * in obj->write_domain and have been skipping the clflushes.
2839 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
2840 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
2842 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
2843 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2846 obj->cache_level = cache_level;
2854 struct drm_i915_gem_object *obj;
2861 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
2862 if (&obj->base == NULL) {
2867 args->caching = obj->cache_level != I915_CACHE_NONE;
2869 drm_gem_object_unreference(&obj->base);
2878 struct drm_i915_gem_object *obj;
2897 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
2898 if (&obj->base == NULL) {
2903 ret = i915_gem_object_set_cache_level(obj, level);
2905 drm_gem_object_unreference(&obj->base);
2917 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2925 if (pipelined != obj->ring) {
2926 ret = i915_gem_object_sync(obj, pipelined);
2940 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
2948 ret = i915_gem_object_pin(obj, alignment, true, false);
2952 i915_gem_object_flush_cpu_write_domain(obj);
2954 old_write_domain = obj->base.write_domain;
2955 old_read_domains = obj->base.read_domains;
2960 obj->base.write_domain = 0;
2961 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2967 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
2971 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
2974 ret = i915_gem_object_wait_rendering(obj, false);
2979 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
2990 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
2996 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
2999 ret = i915_gem_object_wait_rendering(obj, !write);
3003 i915_gem_object_flush_gtt_write_domain(obj);
3005 old_write_domain = obj->base.write_domain;
3006 old_read_domains = obj->base.read_domains;
3009 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3010 i915_gem_clflush_object(obj);
3012 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3018 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3024 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3025 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3082 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3089 if ((obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3092 if (obj->gtt_space != NULL) {
3093 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3094 (map_and_fenceable && !obj->map_and_fenceable)) {
3097 " obj->map_and_fenceable=%d\n",
3098 obj->gtt_offset, alignment,
3100 obj->map_and_fenceable);
3101 ret = i915_gem_object_unbind(obj, 1);
3107 if (obj->gtt_space == NULL) {
3108 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3110 ret = i915_gem_object_bind_to_gtt(obj, alignment,
3117 i915_gem_gtt_bind_object(obj, obj->cache_level);
3120 if (!obj->has_global_gtt_mapping && map_and_fenceable)
3121 i915_gem_gtt_bind_object(obj, obj->cache_level);
3123 obj->pin_count++;
3124 obj->pin_mappable |= map_and_fenceable;
3130 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3132 BUG_ON(obj->pin_count == 0);
3133 BUG_ON(obj->gtt_space == NULL);
3135 if (--obj->pin_count == 0)
3136 obj->pin_mappable = false;
3144 struct drm_i915_gem_object *obj;
3151 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3152 if (&obj->base == NULL) {
3157 if (obj->pin_filp != NULL && obj->pin_filp != file) {
3164 obj->user_pin_count++;
3165 obj->pin_filp = file;
3166 if (obj->user_pin_count == 1) {
3167 ret = i915_gem_object_pin(obj, args->alignment, true, false);
3175 i915_gem_object_flush_cpu_write_domain(obj);
3176 args->offset = obj->gtt_offset;
3178 drm_gem_object_unreference(&obj->base);
3189 struct drm_i915_gem_object *obj;
3196 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3197 if (&obj->base == NULL) {
3202 if (obj->pin_filp != file) {
3208 obj->user_pin_count--;
3209 if (obj->user_pin_count == 0) {
3210 obj->pin_filp = NULL;
3211 i915_gem_object_unpin(obj);
3215 drm_gem_object_unreference(&obj->base);
3226 struct drm_i915_gem_object *obj;
3233 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3234 if (&obj->base == NULL) {
3244 ret = i915_gem_object_flush_active(obj);
3246 args->busy = obj->active;
3247 if (obj->ring) {
3248 args->busy |= intel_ring_flag(obj->ring) << 16;
3251 drm_gem_object_unreference(&obj->base);
3275 void i915_gem_object_init(struct drm_i915_gem_object *obj,
3278 INIT_LIST_HEAD(&obj->mm_list);
3279 INIT_LIST_HEAD(&obj->global_list);
3280 INIT_LIST_HEAD(&obj->ring_list);
3281 INIT_LIST_HEAD(&obj->exec_list);
3283 obj->ops = ops;
3285 obj->fence_reg = I915_FENCE_REG_NONE;
3286 obj->madv = I915_MADV_WILLNEED;
3288 obj->map_and_fenceable = true;
3290 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
3301 struct drm_i915_gem_object *obj;
3304 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3305 if (obj == NULL)
3313 if (drm_gem_object_init(dev, &obj->base, size, gen) != 0) {
3314 kfree(obj, sizeof(*obj));
3320 i915_gem_object_init(obj, &i915_gem_object_ops);
3322 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3323 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3338 obj->cache_level = I915_CACHE_LLC;
3340 obj->cache_level = I915_CACHE_NONE;
3342 return obj;
3345 int i915_gem_init_object(struct drm_gem_object *obj)
3353 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3354 struct drm_device *dev = obj->base.dev;
3358 if (obj->phys_obj)
3359 i915_gem_detach_phys_object(dev, obj);
3361 obj->pin_count = 0;
3362 ret = i915_gem_object_unbind(obj, 1);
3368 WARN_ON(i915_gem_object_unbind(obj, 1));
3375 if (obj->stolen)
3376 i915_gem_object_unpin_pages(obj);
3378 if (obj->pages_pin_count)
3379 obj->pages_pin_count = 0;
3380 i915_gem_object_put_pages(obj);
3381 if (obj->mmap_offset)
3382 i915_gem_free_mmap_offset(obj);
3384 // if (obj->base.import_attach)
3385 // drm_prime_gem_destroy(&obj->base, NULL);
3387 i915_gem_info_remove_obj(dev_priv, obj->base.size);
3389 if (obj->bit_17 != NULL)
3390 kfree(obj->bit_17, sizeof(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) * sizeof(long)));
3391 drm_gem_object_release(&obj->base);
3392 kfree(obj, sizeof(*obj));
3852 struct drm_i915_gem_object *obj)
3857 if (!obj->phys_obj)
3860 if (!obj->page_list) {
3861 ret = i915_gem_object_get_pages_gtt(obj);
3866 page_count = obj->base.size / PAGE_SIZE;
3869 char *dst = obj->page_list[i];
3870 char *src = (caddr_t)(obj->phys_obj->handle->vaddr + (i * PAGE_SIZE));
3874 drm_clflush_pages(obj->page_list, page_count);
3877 i915_gem_object_put_pages_gtt(obj);
3879 obj->phys_obj->cur_obj = NULL;
3880 obj->phys_obj = NULL;
3885 struct drm_i915_gem_object *obj,
3897 if (obj->phys_obj) {
3898 if (obj->phys_obj->id == id)
3900 i915_gem_detach_phys_object(dev, obj);
3906 obj->base.size, align);
3908 DRM_ERROR("failed to init phys object %d size: %lu\n", id, obj->base.size);
3914 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
3915 obj->phys_obj->cur_obj = obj;
3917 if (!obj->page_list) {
3918 ret = i915_gem_object_get_pages_gtt(obj);
3925 page_count = obj->base.size / PAGE_SIZE;
3928 char *dst = obj->page_list[i];
3929 char *src = (caddr_t)(obj->phys_obj->handle->vaddr + (i * PAGE_SIZE));
3934 i915_gem_object_put_pages_gtt(obj);
3943 struct drm_i915_gem_object *obj,
3953 obj_addr = (void *)(uintptr_t)(obj->phys_obj->handle->vaddr + args->offset);