1/**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29#include "main/imports.h"
30#include "main/mfeatures.h"
31#include "main/mtypes.h"
32#include "main/macros.h"
33#include "main/bufferobj.h"
34
35#include "intel_blit.h"
36#include "intel_buffer_objects.h"
37#include "intel_batchbuffer.h"
38#include "intel_context.h"
39#include "intel_fbo.h"
40#include "intel_mipmap_tree.h"
41#include "intel_regions.h"
42
43static GLboolean
44intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj);
45
46/** Allocates a new drm_intel_bo to store the data for the buffer object. */
47static void
48intel_bufferobj_alloc_buffer(struct intel_context *intel,
49			     struct intel_buffer_object *intel_obj)
50{
51   intel_obj->buffer = drm_intel_bo_alloc(intel->bufmgr, "bufferobj",
52					  intel_obj->Base.Size, 64);
53}
54
55static void
56release_buffer(struct intel_buffer_object *intel_obj)
57{
58   drm_intel_bo_unreference(intel_obj->buffer);
59   intel_obj->buffer = NULL;
60   intel_obj->offset = 0;
61   intel_obj->source = 0;
62}
63
64/**
65 * There is some duplication between mesa's bufferobjects and our
66 * bufmgr buffers.  Both have an integer handle and a hashtable to
67 * lookup an opaque structure.  It would be nice if the handles and
68 * internal structure where somehow shared.
69 */
70static struct gl_buffer_object *
71intel_bufferobj_alloc(struct gl_context * ctx, GLuint name, GLenum target)
72{
73   struct intel_buffer_object *obj = CALLOC_STRUCT(intel_buffer_object);
74
75   _mesa_initialize_buffer_object(ctx, &obj->Base, name, target);
76
77   obj->buffer = NULL;
78
79   return &obj->Base;
80}
81
82/**
83 * Deallocate/free a vertex/pixel buffer object.
84 * Called via glDeleteBuffersARB().
85 */
86static void
87intel_bufferobj_free(struct gl_context * ctx, struct gl_buffer_object *obj)
88{
89   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
90
91   assert(intel_obj);
92
93   /* Buffer objects are automatically unmapped when deleting according
94    * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
95    * (though it does if you call glDeleteBuffers)
96    */
97   if (obj->Pointer)
98      intel_bufferobj_unmap(ctx, obj);
99
100   free(intel_obj->sys_buffer);
101
102   drm_intel_bo_unreference(intel_obj->buffer);
103   free(intel_obj);
104}
105
106
107
108/**
109 * Allocate space for and store data in a buffer object.  Any data that was
110 * previously stored in the buffer object is lost.  If data is NULL,
111 * memory will be allocated, but no copy will occur.
112 * Called via ctx->Driver.BufferData().
113 * \return true for success, false if out of memory
114 */
115static GLboolean
116intel_bufferobj_data(struct gl_context * ctx,
117                     GLenum target,
118                     GLsizeiptrARB size,
119                     const GLvoid * data,
120                     GLenum usage, struct gl_buffer_object *obj)
121{
122   struct intel_context *intel = intel_context(ctx);
123   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
124
125   /* Part of the ABI, but this function doesn't use it.
126    */
127#ifndef I915
128   (void) target;
129#endif
130
131   intel_obj->Base.Size = size;
132   intel_obj->Base.Usage = usage;
133
134   assert(!obj->Pointer); /* Mesa should have unmapped it */
135
136   if (intel_obj->buffer != NULL)
137      release_buffer(intel_obj);
138
139   free(intel_obj->sys_buffer);
140   intel_obj->sys_buffer = NULL;
141
142   if (size != 0) {
143#ifdef I915
144      /* On pre-965, stick VBOs in system memory, as we're always doing
145       * swtnl with their contents anyway.
146       */
147      if (target == GL_ARRAY_BUFFER || target == GL_ELEMENT_ARRAY_BUFFER) {
148	 intel_obj->sys_buffer = malloc(size);
149	 if (intel_obj->sys_buffer != NULL) {
150	    if (data != NULL)
151	       memcpy(intel_obj->sys_buffer, data, size);
152	    return true;
153	 }
154      }
155#endif
156      intel_bufferobj_alloc_buffer(intel, intel_obj);
157      if (!intel_obj->buffer)
158         return false;
159
160      if (data != NULL)
161	 drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
162   }
163
164   return true;
165}
166
167
168/**
169 * Replace data in a subrange of buffer object.  If the data range
170 * specified by size + offset extends beyond the end of the buffer or
171 * if data is NULL, no copy is performed.
172 * Called via glBufferSubDataARB().
173 */
174static void
175intel_bufferobj_subdata(struct gl_context * ctx,
176                        GLintptrARB offset,
177                        GLsizeiptrARB size,
178                        const GLvoid * data, struct gl_buffer_object *obj)
179{
180   struct intel_context *intel = intel_context(ctx);
181   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
182   bool busy;
183
184   if (size == 0)
185      return;
186
187   assert(intel_obj);
188
189   /* If we have a single copy in system memory, update that */
190   if (intel_obj->sys_buffer) {
191      if (intel_obj->source)
192	 release_buffer(intel_obj);
193
194      if (intel_obj->buffer == NULL) {
195	 memcpy((char *)intel_obj->sys_buffer + offset, data, size);
196	 return;
197      }
198
199      free(intel_obj->sys_buffer);
200      intel_obj->sys_buffer = NULL;
201   }
202
203   /* Otherwise we need to update the copy in video memory. */
204   busy =
205      drm_intel_bo_busy(intel_obj->buffer) ||
206      drm_intel_bo_references(intel->batch.bo, intel_obj->buffer);
207
208   if (busy) {
209      if (size == intel_obj->Base.Size) {
210	 /* Replace the current busy bo with fresh data. */
211	 drm_intel_bo_unreference(intel_obj->buffer);
212	 intel_bufferobj_alloc_buffer(intel, intel_obj);
213	 drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
214      } else {
215         perf_debug("Using a blit copy to avoid stalling on glBufferSubData() "
216                    "to a busy buffer object.\n");
217	 drm_intel_bo *temp_bo =
218	    drm_intel_bo_alloc(intel->bufmgr, "subdata temp", size, 64);
219
220	 drm_intel_bo_subdata(temp_bo, 0, size, data);
221
222	 intel_emit_linear_blit(intel,
223				intel_obj->buffer, offset,
224				temp_bo, 0,
225				size);
226
227	 drm_intel_bo_unreference(temp_bo);
228      }
229   } else {
230      if (unlikely(INTEL_DEBUG & DEBUG_PERF)) {
231         if (drm_intel_bo_busy(intel_obj->buffer)) {
232            perf_debug("Stalling on the GPU in glBufferSubData().\n");
233         }
234      }
235      drm_intel_bo_subdata(intel_obj->buffer, offset, size, data);
236   }
237}
238
239
240/**
241 * Called via glGetBufferSubDataARB().
242 */
243static void
244intel_bufferobj_get_subdata(struct gl_context * ctx,
245                            GLintptrARB offset,
246                            GLsizeiptrARB size,
247                            GLvoid * data, struct gl_buffer_object *obj)
248{
249   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
250   struct intel_context *intel = intel_context(ctx);
251
252   assert(intel_obj);
253   if (intel_obj->sys_buffer)
254      memcpy(data, (char *)intel_obj->sys_buffer + offset, size);
255   else {
256      if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
257	 intel_batchbuffer_flush(intel);
258      }
259      drm_intel_bo_get_subdata(intel_obj->buffer, offset, size, data);
260   }
261}
262
263
264
265/**
266 * Called via glMapBufferRange and glMapBuffer
267 *
268 * The goal of this extension is to allow apps to accumulate their rendering
269 * at the same time as they accumulate their buffer object.  Without it,
270 * you'd end up blocking on execution of rendering every time you mapped
271 * the buffer to put new data in.
272 *
273 * We support it in 3 ways: If unsynchronized, then don't bother
274 * flushing the batchbuffer before mapping the buffer, which can save blocking
275 * in many cases.  If we would still block, and they allow the whole buffer
276 * to be invalidated, then just allocate a new buffer to replace the old one.
277 * If not, and we'd block, and they allow the subrange of the buffer to be
278 * invalidated, then we can make a new little BO, let them write into that,
279 * and blit it into the real BO at unmap time.
280 */
281static void *
282intel_bufferobj_map_range(struct gl_context * ctx,
283			  GLintptr offset, GLsizeiptr length,
284			  GLbitfield access, struct gl_buffer_object *obj)
285{
286   struct intel_context *intel = intel_context(ctx);
287   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
288
289   assert(intel_obj);
290
291   /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
292    * internally uses our functions directly.
293    */
294   obj->Offset = offset;
295   obj->Length = length;
296   obj->AccessFlags = access;
297
298   if (intel_obj->sys_buffer) {
299      const bool read_only =
300	 (access & (GL_MAP_READ_BIT | GL_MAP_WRITE_BIT)) == GL_MAP_READ_BIT;
301
302      if (!read_only && intel_obj->source)
303	 release_buffer(intel_obj);
304
305      if (!intel_obj->buffer || intel_obj->source) {
306	 obj->Pointer = intel_obj->sys_buffer + offset;
307	 return obj->Pointer;
308      }
309
310      free(intel_obj->sys_buffer);
311      intel_obj->sys_buffer = NULL;
312   }
313
314   if (intel_obj->buffer == NULL) {
315      obj->Pointer = NULL;
316      return NULL;
317   }
318
319   /* If the access is synchronized (like a normal buffer mapping), then get
320    * things flushed out so the later mapping syncs appropriately through GEM.
321    * If the user doesn't care about existing buffer contents and mapping would
322    * cause us to block, then throw out the old buffer.
323    *
324    * If they set INVALIDATE_BUFFER, we can pitch the current contents to
325    * achieve the required synchronization.
326    */
327   if (!(access & GL_MAP_UNSYNCHRONIZED_BIT)) {
328      if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
329	 if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
330	    drm_intel_bo_unreference(intel_obj->buffer);
331	    intel_bufferobj_alloc_buffer(intel, intel_obj);
332	 } else {
333	    intel_flush(ctx);
334	 }
335      } else if (drm_intel_bo_busy(intel_obj->buffer) &&
336		 (access & GL_MAP_INVALIDATE_BUFFER_BIT)) {
337	 drm_intel_bo_unreference(intel_obj->buffer);
338	 intel_bufferobj_alloc_buffer(intel, intel_obj);
339      }
340   }
341
342   /* If the user is mapping a range of an active buffer object but
343    * doesn't require the current contents of that range, make a new
344    * BO, and we'll copy what they put in there out at unmap or
345    * FlushRange time.
346    */
347   if ((access & GL_MAP_INVALIDATE_RANGE_BIT) &&
348       drm_intel_bo_busy(intel_obj->buffer)) {
349      if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {
350	 intel_obj->range_map_buffer = malloc(length);
351	 obj->Pointer = intel_obj->range_map_buffer;
352      } else {
353	 intel_obj->range_map_bo = drm_intel_bo_alloc(intel->bufmgr,
354						      "range map",
355						      length, 64);
356	 if (!(access & GL_MAP_READ_BIT)) {
357	    drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo);
358	 } else {
359	    drm_intel_bo_map(intel_obj->range_map_bo,
360			     (access & GL_MAP_WRITE_BIT) != 0);
361	 }
362	 obj->Pointer = intel_obj->range_map_bo->virtual;
363      }
364      return obj->Pointer;
365   }
366
367   if (access & GL_MAP_UNSYNCHRONIZED_BIT)
368      drm_intel_gem_bo_map_unsynchronized(intel_obj->buffer);
369   else if (!(access & GL_MAP_READ_BIT)) {
370      drm_intel_gem_bo_map_gtt(intel_obj->buffer);
371   } else {
372      drm_intel_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
373   }
374
375   obj->Pointer = intel_obj->buffer->virtual + offset;
376   return obj->Pointer;
377}
378
379/* Ideally we'd use a BO to avoid taking up cache space for the temporary
380 * data, but FlushMappedBufferRange may be followed by further writes to
381 * the pointer, so we would have to re-map after emitting our blit, which
382 * would defeat the point.
383 */
384static void
385intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
386				   GLintptr offset, GLsizeiptr length,
387				   struct gl_buffer_object *obj)
388{
389   struct intel_context *intel = intel_context(ctx);
390   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
391   drm_intel_bo *temp_bo;
392
393   /* Unless we're in the range map using a temporary system buffer,
394    * there's no work to do.
395    */
396   if (intel_obj->range_map_buffer == NULL)
397      return;
398
399   if (length == 0)
400      return;
401
402   temp_bo = drm_intel_bo_alloc(intel->bufmgr, "range map flush", length, 64);
403
404   drm_intel_bo_subdata(temp_bo, 0, length, intel_obj->range_map_buffer);
405
406   intel_emit_linear_blit(intel,
407			  intel_obj->buffer, obj->Offset + offset,
408			  temp_bo, 0,
409			  length);
410
411   drm_intel_bo_unreference(temp_bo);
412}
413
414
415/**
416 * Called via glUnmapBuffer().
417 */
418static GLboolean
419intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
420{
421   struct intel_context *intel = intel_context(ctx);
422   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
423
424   assert(intel_obj);
425   assert(obj->Pointer);
426   if (intel_obj->sys_buffer != NULL) {
427      /* always keep the mapping around. */
428   } else if (intel_obj->range_map_buffer != NULL) {
429      /* Since we've emitted some blits to buffers that will (likely) be used
430       * in rendering operations in other cache domains in this batch, emit a
431       * flush.  Once again, we wish for a domain tracker in libdrm to cover
432       * usage inside of a batchbuffer.
433       */
434      intel_batchbuffer_emit_mi_flush(intel);
435      free(intel_obj->range_map_buffer);
436      intel_obj->range_map_buffer = NULL;
437   } else if (intel_obj->range_map_bo != NULL) {
438      drm_intel_bo_unmap(intel_obj->range_map_bo);
439
440      intel_emit_linear_blit(intel,
441			     intel_obj->buffer, obj->Offset,
442			     intel_obj->range_map_bo, 0,
443			     obj->Length);
444
445      /* Since we've emitted some blits to buffers that will (likely) be used
446       * in rendering operations in other cache domains in this batch, emit a
447       * flush.  Once again, we wish for a domain tracker in libdrm to cover
448       * usage inside of a batchbuffer.
449       */
450      intel_batchbuffer_emit_mi_flush(intel);
451
452      drm_intel_bo_unreference(intel_obj->range_map_bo);
453      intel_obj->range_map_bo = NULL;
454   } else if (intel_obj->buffer != NULL) {
455      drm_intel_bo_unmap(intel_obj->buffer);
456   }
457   obj->Pointer = NULL;
458   obj->Offset = 0;
459   obj->Length = 0;
460
461   return true;
462}
463
464drm_intel_bo *
465intel_bufferobj_buffer(struct intel_context *intel,
466                       struct intel_buffer_object *intel_obj,
467		       GLuint flag)
468{
469   if (intel_obj->source)
470      release_buffer(intel_obj);
471
472   if (intel_obj->buffer == NULL) {
473      intel_bufferobj_alloc_buffer(intel, intel_obj);
474      drm_intel_bo_subdata(intel_obj->buffer,
475			   0, intel_obj->Base.Size,
476			   intel_obj->sys_buffer);
477
478      free(intel_obj->sys_buffer);
479      intel_obj->sys_buffer = NULL;
480      intel_obj->offset = 0;
481   }
482
483   return intel_obj->buffer;
484}
485
486#define INTEL_UPLOAD_SIZE (64*1024)
487
488void
489intel_upload_finish(struct intel_context *intel)
490{
491   if (!intel->upload.bo)
492	   return;
493
494   if (intel->upload.buffer_len) {
495	   drm_intel_bo_subdata(intel->upload.bo,
496				intel->upload.buffer_offset,
497				intel->upload.buffer_len,
498				intel->upload.buffer);
499	   intel->upload.buffer_len = 0;
500   }
501
502   drm_intel_bo_unreference(intel->upload.bo);
503   intel->upload.bo = NULL;
504}
505
506static void wrap_buffers(struct intel_context *intel, GLuint size)
507{
508   intel_upload_finish(intel);
509
510   if (size < INTEL_UPLOAD_SIZE)
511      size = INTEL_UPLOAD_SIZE;
512
513   intel->upload.bo = drm_intel_bo_alloc(intel->bufmgr, "upload", size, 0);
514   intel->upload.offset = 0;
515}
516
517void intel_upload_data(struct intel_context *intel,
518		       const void *ptr, GLuint size, GLuint align,
519		       drm_intel_bo **return_bo,
520		       GLuint *return_offset)
521{
522   GLuint base, delta;
523
524   base = (intel->upload.offset + align - 1) / align * align;
525   if (intel->upload.bo == NULL || base + size > intel->upload.bo->size) {
526      wrap_buffers(intel, size);
527      base = 0;
528   }
529
530   drm_intel_bo_reference(intel->upload.bo);
531   *return_bo = intel->upload.bo;
532   *return_offset = base;
533
534   delta = base - intel->upload.offset;
535   if (intel->upload.buffer_len &&
536       intel->upload.buffer_len + delta + size > sizeof(intel->upload.buffer))
537   {
538      drm_intel_bo_subdata(intel->upload.bo,
539			   intel->upload.buffer_offset,
540			   intel->upload.buffer_len,
541			   intel->upload.buffer);
542      intel->upload.buffer_len = 0;
543   }
544
545   if (size < sizeof(intel->upload.buffer))
546   {
547      if (intel->upload.buffer_len == 0)
548	 intel->upload.buffer_offset = base;
549      else
550	 intel->upload.buffer_len += delta;
551
552      memcpy(intel->upload.buffer + intel->upload.buffer_len, ptr, size);
553      intel->upload.buffer_len += size;
554   }
555   else
556   {
557      drm_intel_bo_subdata(intel->upload.bo, base, size, ptr);
558   }
559
560   intel->upload.offset = base + size;
561}
562
563void *intel_upload_map(struct intel_context *intel, GLuint size, GLuint align)
564{
565   GLuint base, delta;
566   char *ptr;
567
568   base = (intel->upload.offset + align - 1) / align * align;
569   if (intel->upload.bo == NULL || base + size > intel->upload.bo->size) {
570      wrap_buffers(intel, size);
571      base = 0;
572   }
573
574   delta = base - intel->upload.offset;
575   if (intel->upload.buffer_len &&
576       intel->upload.buffer_len + delta + size > sizeof(intel->upload.buffer))
577   {
578      drm_intel_bo_subdata(intel->upload.bo,
579			   intel->upload.buffer_offset,
580			   intel->upload.buffer_len,
581			   intel->upload.buffer);
582      intel->upload.buffer_len = 0;
583   }
584
585   if (size <= sizeof(intel->upload.buffer)) {
586      if (intel->upload.buffer_len == 0)
587	 intel->upload.buffer_offset = base;
588      else
589	 intel->upload.buffer_len += delta;
590
591      ptr = intel->upload.buffer + intel->upload.buffer_len;
592      intel->upload.buffer_len += size;
593   } else
594      ptr = malloc(size);
595
596   return ptr;
597}
598
599void intel_upload_unmap(struct intel_context *intel,
600			const void *ptr, GLuint size, GLuint align,
601			drm_intel_bo **return_bo,
602			GLuint *return_offset)
603{
604   GLuint base;
605
606   base = (intel->upload.offset + align - 1) / align * align;
607   if (size > sizeof(intel->upload.buffer)) {
608      drm_intel_bo_subdata(intel->upload.bo, base, size, ptr);
609      free((void*)ptr);
610   }
611
612   drm_intel_bo_reference(intel->upload.bo);
613   *return_bo = intel->upload.bo;
614   *return_offset = base;
615
616   intel->upload.offset = base + size;
617}
618
619drm_intel_bo *
620intel_bufferobj_source(struct intel_context *intel,
621                       struct intel_buffer_object *intel_obj,
622		       GLuint align, GLuint *offset)
623{
624   if (intel_obj->buffer == NULL) {
625      intel_upload_data(intel,
626			intel_obj->sys_buffer, intel_obj->Base.Size, align,
627			&intel_obj->buffer, &intel_obj->offset);
628      intel_obj->source = 1;
629   }
630
631   *offset = intel_obj->offset;
632   return intel_obj->buffer;
633}
634
635static void
636intel_bufferobj_copy_subdata(struct gl_context *ctx,
637			     struct gl_buffer_object *src,
638			     struct gl_buffer_object *dst,
639			     GLintptr read_offset, GLintptr write_offset,
640			     GLsizeiptr size)
641{
642   struct intel_context *intel = intel_context(ctx);
643   struct intel_buffer_object *intel_src = intel_buffer_object(src);
644   struct intel_buffer_object *intel_dst = intel_buffer_object(dst);
645   drm_intel_bo *src_bo, *dst_bo;
646   GLuint src_offset;
647
648   if (size == 0)
649      return;
650
651   /* If we're in system memory, just map and memcpy. */
652   if (intel_src->sys_buffer || intel_dst->sys_buffer) {
653      /* The same buffer may be used, but note that regions copied may
654       * not overlap.
655       */
656      if (src == dst) {
657	 char *ptr = intel_bufferobj_map_range(ctx, 0, dst->Size,
658					       GL_MAP_READ_BIT |
659					       GL_MAP_WRITE_BIT,
660					       dst);
661	 memmove(ptr + write_offset, ptr + read_offset, size);
662	 intel_bufferobj_unmap(ctx, dst);
663      } else {
664	 const char *src_ptr;
665	 char *dst_ptr;
666
667	 src_ptr =  intel_bufferobj_map_range(ctx, 0, src->Size,
668					      GL_MAP_READ_BIT, src);
669	 dst_ptr =  intel_bufferobj_map_range(ctx, 0, dst->Size,
670					      GL_MAP_WRITE_BIT, dst);
671
672	 memcpy(dst_ptr + write_offset, src_ptr + read_offset, size);
673
674	 intel_bufferobj_unmap(ctx, src);
675	 intel_bufferobj_unmap(ctx, dst);
676      }
677      return;
678   }
679
680   /* Otherwise, we have real BOs, so blit them. */
681
682   dst_bo = intel_bufferobj_buffer(intel, intel_dst, INTEL_WRITE_PART);
683   src_bo = intel_bufferobj_source(intel, intel_src, 64, &src_offset);
684
685   intel_emit_linear_blit(intel,
686			  dst_bo, write_offset,
687			  src_bo, read_offset + src_offset, size);
688
689   /* Since we've emitted some blits to buffers that will (likely) be used
690    * in rendering operations in other cache domains in this batch, emit a
691    * flush.  Once again, we wish for a domain tracker in libdrm to cover
692    * usage inside of a batchbuffer.
693    */
694   intel_batchbuffer_emit_mi_flush(intel);
695}
696
697#if FEATURE_APPLE_object_purgeable
698static GLenum
699intel_buffer_purgeable(drm_intel_bo *buffer)
700{
701   int retained = 0;
702
703   if (buffer != NULL)
704      retained = drm_intel_bo_madvise (buffer, I915_MADV_DONTNEED);
705
706   return retained ? GL_VOLATILE_APPLE : GL_RELEASED_APPLE;
707}
708
709static GLenum
710intel_buffer_object_purgeable(struct gl_context * ctx,
711                              struct gl_buffer_object *obj,
712                              GLenum option)
713{
714   struct intel_buffer_object *intel_obj = intel_buffer_object (obj);
715
716   if (intel_obj->buffer != NULL)
717      return intel_buffer_purgeable(intel_obj->buffer);
718
719   if (option == GL_RELEASED_APPLE) {
720      if (intel_obj->sys_buffer != NULL) {
721         free(intel_obj->sys_buffer);
722         intel_obj->sys_buffer = NULL;
723      }
724
725      return GL_RELEASED_APPLE;
726   } else {
727      /* XXX Create the buffer and madvise(MADV_DONTNEED)? */
728      struct intel_context *intel = intel_context(ctx);
729      drm_intel_bo *bo = intel_bufferobj_buffer(intel, intel_obj, INTEL_READ);
730
731      return intel_buffer_purgeable(bo);
732   }
733}
734
735static GLenum
736intel_texture_object_purgeable(struct gl_context * ctx,
737                               struct gl_texture_object *obj,
738                               GLenum option)
739{
740   struct intel_texture_object *intel;
741
742   (void) ctx;
743   (void) option;
744
745   intel = intel_texture_object(obj);
746   if (intel->mt == NULL || intel->mt->region == NULL)
747      return GL_RELEASED_APPLE;
748
749   return intel_buffer_purgeable(intel->mt->region->bo);
750}
751
752static GLenum
753intel_render_object_purgeable(struct gl_context * ctx,
754                              struct gl_renderbuffer *obj,
755                              GLenum option)
756{
757   struct intel_renderbuffer *intel;
758
759   (void) ctx;
760   (void) option;
761
762   intel = intel_renderbuffer(obj);
763   if (intel->mt == NULL)
764      return GL_RELEASED_APPLE;
765
766   return intel_buffer_purgeable(intel->mt->region->bo);
767}
768
769static GLenum
770intel_buffer_unpurgeable(drm_intel_bo *buffer)
771{
772   int retained;
773
774   retained = 0;
775   if (buffer != NULL)
776      retained = drm_intel_bo_madvise (buffer, I915_MADV_WILLNEED);
777
778   return retained ? GL_RETAINED_APPLE : GL_UNDEFINED_APPLE;
779}
780
781static GLenum
782intel_buffer_object_unpurgeable(struct gl_context * ctx,
783                                struct gl_buffer_object *obj,
784                                GLenum option)
785{
786   (void) ctx;
787   (void) option;
788
789   return intel_buffer_unpurgeable(intel_buffer_object (obj)->buffer);
790}
791
792static GLenum
793intel_texture_object_unpurgeable(struct gl_context * ctx,
794                                 struct gl_texture_object *obj,
795                                 GLenum option)
796{
797   struct intel_texture_object *intel;
798
799   (void) ctx;
800   (void) option;
801
802   intel = intel_texture_object(obj);
803   if (intel->mt == NULL || intel->mt->region == NULL)
804      return GL_UNDEFINED_APPLE;
805
806   return intel_buffer_unpurgeable(intel->mt->region->bo);
807}
808
809static GLenum
810intel_render_object_unpurgeable(struct gl_context * ctx,
811                                struct gl_renderbuffer *obj,
812                                GLenum option)
813{
814   struct intel_renderbuffer *intel;
815
816   (void) ctx;
817   (void) option;
818
819   intel = intel_renderbuffer(obj);
820   if (intel->mt == NULL)
821      return GL_UNDEFINED_APPLE;
822
823   return intel_buffer_unpurgeable(intel->mt->region->bo);
824}
825#endif
826
827void
828intelInitBufferObjectFuncs(struct dd_function_table *functions)
829{
830   functions->NewBufferObject = intel_bufferobj_alloc;
831   functions->DeleteBuffer = intel_bufferobj_free;
832   functions->BufferData = intel_bufferobj_data;
833   functions->BufferSubData = intel_bufferobj_subdata;
834   functions->GetBufferSubData = intel_bufferobj_get_subdata;
835   functions->MapBufferRange = intel_bufferobj_map_range;
836   functions->FlushMappedBufferRange = intel_bufferobj_flush_mapped_range;
837   functions->UnmapBuffer = intel_bufferobj_unmap;
838   functions->CopyBufferSubData = intel_bufferobj_copy_subdata;
839
840#if FEATURE_APPLE_object_purgeable
841   functions->BufferObjectPurgeable = intel_buffer_object_purgeable;
842   functions->TextureObjectPurgeable = intel_texture_object_purgeable;
843   functions->RenderObjectPurgeable = intel_render_object_purgeable;
844
845   functions->BufferObjectUnpurgeable = intel_buffer_object_unpurgeable;
846   functions->TextureObjectUnpurgeable = intel_texture_object_unpurgeable;
847   functions->RenderObjectUnpurgeable = intel_render_object_unpurgeable;
848#endif
849}
850