radeon_texture.c revision 10e418f3815d690b2526e835bc7eb421b6be7050
1/*
2 * Copyright (C) 2009 Maciej Cencora.
3 * Copyright (C) 2008 Nicolai Haehnle.
4 * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
5 *
6 * The Weather Channel (TM) funded Tungsten Graphics to develop the
7 * initial release of the Radeon 8500 driver under the XFree86 license.
8 * This notice must be preserved.
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining
11 * a copy of this software and associated documentation files (the
12 * "Software"), to deal in the Software without restriction, including
13 * without limitation the rights to use, copy, modify, merge, publish,
14 * distribute, sublicense, and/or sell copies of the Software, and to
15 * permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the
19 * next paragraph) shall be included in all copies or substantial
20 * portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
25 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
26 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
27 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
28 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 *
30 */
31
32#include "main/glheader.h"
33#include "main/imports.h"
34#include "main/context.h"
35#include "main/enums.h"
36#include "main/mfeatures.h"
37#include "main/mipmap.h"
38#include "main/pbo.h"
39#include "main/texcompress.h"
40#include "main/texstore.h"
41#include "main/teximage.h"
42#include "main/texobj.h"
43#include "drivers/common/meta.h"
44
45#include "xmlpool.h"		/* for symbolic values of enum-type options */
46
47#include "radeon_common.h"
48
49#include "radeon_mipmap_tree.h"
50
51
52void copy_rows(void* dst, GLuint dststride, const void* src, GLuint srcstride,
53	GLuint numrows, GLuint rowsize)
54{
55	assert(rowsize <= dststride);
56	assert(rowsize <= srcstride);
57
58	radeon_print(RADEON_TEXTURE, RADEON_TRACE,
59		"%s dst %p, stride %u, src %p, stride %u, "
60		"numrows %u, rowsize %u.\n",
61		__func__, dst, dststride,
62		src, srcstride,
63		numrows, rowsize);
64
65	if (rowsize == srcstride && rowsize == dststride) {
66		memcpy(dst, src, numrows*rowsize);
67	} else {
68		GLuint i;
69		for(i = 0; i < numrows; ++i) {
70			memcpy(dst, src, rowsize);
71			dst += dststride;
72			src += srcstride;
73		}
74	}
75}
76
77/* textures */
78/**
79 * Allocate an empty texture image object.
80 */
81struct gl_texture_image *radeonNewTextureImage(struct gl_context *ctx)
82{
83	return CALLOC(sizeof(radeon_texture_image));
84}
85
86/**
87 * Free memory associated with this texture image.
88 */
89void radeonFreeTexImageData(struct gl_context *ctx, struct gl_texture_image *timage)
90{
91	radeon_texture_image* image = get_radeon_texture_image(timage);
92
93	if (image->mt) {
94		radeon_miptree_unreference(&image->mt);
95		assert(!image->base.Data);
96	} else {
97		_mesa_free_texture_image_data(ctx, timage);
98	}
99	if (image->bo) {
100		radeon_bo_unref(image->bo);
101		image->bo = NULL;
102	}
103	if (timage->Data) {
104		_mesa_free_texmemory(timage->Data);
105		timage->Data = NULL;
106	}
107}
108
109/* Set Data pointer and additional data for mapped texture image */
110static void teximage_set_map_data(radeon_texture_image *image)
111{
112	radeon_mipmap_level *lvl;
113
114	if (!image->mt) {
115		radeon_warning("%s(%p) Trying to set map data without miptree.\n",
116				__func__, image);
117
118		return;
119	}
120
121	lvl = &image->mt->levels[image->mtlevel];
122
123	image->base.Data = image->mt->bo->ptr + lvl->faces[image->mtface].offset;
124	image->base.RowStride = lvl->rowstride / _mesa_get_format_bytes(image->base.TexFormat);
125}
126
127
128/**
129 * Map a single texture image for glTexImage and friends.
130 */
131void radeon_teximage_map(radeon_texture_image *image, GLboolean write_enable)
132{
133	radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
134			"%s(img %p), write_enable %s.\n",
135			__func__, image,
136			write_enable ? "true": "false");
137	if (image->mt) {
138		assert(!image->base.Data);
139
140		radeon_bo_map(image->mt->bo, write_enable);
141		teximage_set_map_data(image);
142	}
143}
144
145
146void radeon_teximage_unmap(radeon_texture_image *image)
147{
148	radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
149			"%s(img %p)\n",
150			__func__, image);
151	if (image->mt) {
152		assert(image->base.Data);
153
154		image->base.Data = 0;
155		radeon_bo_unmap(image->mt->bo);
156	}
157}
158
159static void map_override(struct gl_context *ctx, radeonTexObj *t)
160{
161	radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
162
163	radeon_bo_map(t->bo, GL_FALSE);
164
165	img->base.Data = t->bo->ptr;
166}
167
168static void unmap_override(struct gl_context *ctx, radeonTexObj *t)
169{
170	radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);
171
172	radeon_bo_unmap(t->bo);
173
174	img->base.Data = NULL;
175}
176
177/**
178 * Map a validated texture for reading during software rendering.
179 */
180void radeonMapTexture(struct gl_context *ctx, struct gl_texture_object *texObj)
181{
182	radeonTexObj* t = radeon_tex_obj(texObj);
183	int face, level;
184
185	radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
186			"%s(%p, tex %p)\n",
187			__func__, ctx, texObj);
188
189	if (!radeon_validate_texture_miptree(ctx, texObj)) {
190		radeon_error("%s(%p, tex %p) Failed to validate miptree for "
191			"sw fallback.\n",
192			__func__, ctx, texObj);
193		return;
194	}
195
196	if (t->image_override && t->bo) {
197		radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
198			"%s(%p, tex %p) Work around for missing miptree in r100.\n",
199			__func__, ctx, texObj);
200
201		map_override(ctx, t);
202	}
203
204	/* for r100 3D sw fallbacks don't have mt */
205	if (!t->mt) {
206		radeon_warning("%s(%p, tex %p) No miptree in texture.\n",
207			__func__, ctx, texObj);
208		return;
209	}
210
211	radeon_bo_map(t->mt->bo, GL_FALSE);
212	for(face = 0; face < t->mt->faces; ++face) {
213		for(level = t->minLod; level <= t->maxLod; ++level)
214			teximage_set_map_data(get_radeon_texture_image(texObj->Image[face][level]));
215	}
216}
217
218void radeonUnmapTexture(struct gl_context *ctx, struct gl_texture_object *texObj)
219{
220	radeonTexObj* t = radeon_tex_obj(texObj);
221	int face, level;
222
223	radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
224			"%s(%p, tex %p)\n",
225			__func__, ctx, texObj);
226
227	if (t->image_override && t->bo)
228		unmap_override(ctx, t);
229	/* for r100 3D sw fallbacks don't have mt */
230	if (!t->mt)
231	  return;
232
233	for(face = 0; face < t->mt->faces; ++face) {
234		for(level = t->minLod; level <= t->maxLod; ++level)
235			texObj->Image[face][level]->Data = 0;
236	}
237	radeon_bo_unmap(t->mt->bo);
238}
239
240/**
241 * Wraps Mesa's implementation to ensure that the base level image is mapped.
242 *
243 * This relies on internal details of _mesa_generate_mipmap, in particular
244 * the fact that the memory for recreated texture images is always freed.
245 */
246static void radeon_generate_mipmap(struct gl_context *ctx, GLenum target,
247				   struct gl_texture_object *texObj)
248{
249	radeonTexObj* t = radeon_tex_obj(texObj);
250	GLuint nr_faces = (t->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
251	int i, face;
252	struct gl_texture_image *first_image;
253
254	radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
255			"%s(%p, tex %p) Target type %s.\n",
256			__func__, ctx, texObj,
257			_mesa_lookup_enum_by_nr(target));
258
259	_mesa_generate_mipmap(ctx, target, texObj);
260
261	/* For the compressed case, we don't need to do the
262	 * non-TexImage recovery path below.
263	 */
264	first_image = texObj->Image[0][texObj->BaseLevel];
265	if (_mesa_is_format_compressed(first_image->TexFormat))
266		return;
267
268	for (face = 0; face < nr_faces; face++) {
269		for (i = texObj->BaseLevel + 1; i < texObj->MaxLevel; i++) {
270			radeon_texture_image *image;
271
272			image = get_radeon_texture_image(texObj->Image[face][i]);
273
274			if (image == NULL)
275				break;
276
277			image->mtlevel = i;
278			image->mtface = face;
279
280			radeon_miptree_unreference(&image->mt);
281		}
282	}
283
284}
285
286void radeonGenerateMipmap(struct gl_context* ctx, GLenum target, struct gl_texture_object *texObj)
287{
288	radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
289	struct radeon_bo *bo;
290	GLuint face = _mesa_tex_target_to_face(target);
291	radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[face][texObj->BaseLevel]);
292	bo = !baseimage->mt ? baseimage->bo : baseimage->mt->bo;
293
294	radeon_print(RADEON_TEXTURE, RADEON_TRACE,
295		"%s(%p, target %s, tex %p)\n",
296		__func__, ctx, _mesa_lookup_enum_by_nr(target),
297		texObj);
298
299	if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
300		radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
301			"%s(%p, tex %p) Trying to generate mipmap for texture "
302			"in processing by GPU.\n",
303			__func__, ctx, texObj);
304		radeon_firevertices(rmesa);
305	}
306
307	if (_mesa_meta_check_generate_mipmap_fallback(ctx, target, texObj)) {
308		radeon_teximage_map(baseimage, GL_FALSE);
309		radeon_generate_mipmap(ctx, target, texObj);
310		radeon_teximage_unmap(baseimage);
311	} else {
312		_mesa_meta_GenerateMipmap(ctx, target, texObj);
313	}
314}
315
316
317/* try to find a format which will only need a memcopy */
318static gl_format radeonChoose8888TexFormat(radeonContextPtr rmesa,
319					   GLenum srcFormat,
320					   GLenum srcType, GLboolean fbo)
321{
322	const GLuint ui = 1;
323	const GLubyte littleEndian = *((const GLubyte *)&ui);
324
325	/* r100 can only do this */
326	if (IS_R100_CLASS(rmesa->radeonScreen) || fbo)
327	  return _dri_texformat_argb8888;
328
329	if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
330	    (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
331	    (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
332	    (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && littleEndian)) {
333		return MESA_FORMAT_RGBA8888;
334	} else if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
335		   (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && littleEndian) ||
336		   (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
337		   (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && !littleEndian)) {
338		return MESA_FORMAT_RGBA8888_REV;
339	} else if (IS_R200_CLASS(rmesa->radeonScreen)) {
340		return _dri_texformat_argb8888;
341	} else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
342					    srcType == GL_UNSIGNED_INT_8_8_8_8)) {
343		return MESA_FORMAT_ARGB8888_REV;
344	} else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && littleEndian) ||
345					    srcType == GL_UNSIGNED_INT_8_8_8_8_REV)) {
346		return MESA_FORMAT_ARGB8888;
347	} else
348		return _dri_texformat_argb8888;
349}
350
351gl_format radeonChooseTextureFormat_mesa(struct gl_context * ctx,
352					 GLint internalFormat,
353					 GLenum format,
354					 GLenum type)
355{
356	return radeonChooseTextureFormat(ctx, internalFormat, format,
357					 type, 0);
358}
359
360gl_format radeonChooseTextureFormat(struct gl_context * ctx,
361				    GLint internalFormat,
362				    GLenum format,
363				    GLenum type, GLboolean fbo)
364{
365	radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
366	const GLboolean do32bpt =
367	    (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_32);
368	const GLboolean force16bpt =
369	    (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_FORCE_16);
370	(void)format;
371
372	radeon_print(RADEON_TEXTURE, RADEON_TRACE,
373		"%s InternalFormat=%s(%d) type=%s format=%s\n",
374		__func__,
375		_mesa_lookup_enum_by_nr(internalFormat), internalFormat,
376		_mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format));
377	radeon_print(RADEON_TEXTURE, RADEON_TRACE,
378			"%s do32bpt=%d force16bpt=%d\n",
379			__func__, do32bpt, force16bpt);
380
381	switch (internalFormat) {
382	case 4:
383	case GL_RGBA:
384	case GL_COMPRESSED_RGBA:
385		switch (type) {
386		case GL_UNSIGNED_INT_10_10_10_2:
387		case GL_UNSIGNED_INT_2_10_10_10_REV:
388			return do32bpt ? _dri_texformat_argb8888 :
389			    _dri_texformat_argb1555;
390		case GL_UNSIGNED_SHORT_4_4_4_4:
391		case GL_UNSIGNED_SHORT_4_4_4_4_REV:
392			return _dri_texformat_argb4444;
393		case GL_UNSIGNED_SHORT_5_5_5_1:
394		case GL_UNSIGNED_SHORT_1_5_5_5_REV:
395			return _dri_texformat_argb1555;
396		default:
397			return do32bpt ? radeonChoose8888TexFormat(rmesa, format, type, fbo) :
398			    _dri_texformat_argb4444;
399		}
400
401	case 3:
402	case GL_RGB:
403	case GL_COMPRESSED_RGB:
404		switch (type) {
405		case GL_UNSIGNED_SHORT_4_4_4_4:
406		case GL_UNSIGNED_SHORT_4_4_4_4_REV:
407			return _dri_texformat_argb4444;
408		case GL_UNSIGNED_SHORT_5_5_5_1:
409		case GL_UNSIGNED_SHORT_1_5_5_5_REV:
410			return _dri_texformat_argb1555;
411		case GL_UNSIGNED_SHORT_5_6_5:
412		case GL_UNSIGNED_SHORT_5_6_5_REV:
413			return _dri_texformat_rgb565;
414		default:
415			return do32bpt ? _dri_texformat_argb8888 :
416			    _dri_texformat_rgb565;
417		}
418
419	case GL_RGBA8:
420	case GL_RGB10_A2:
421	case GL_RGBA12:
422	case GL_RGBA16:
423		return !force16bpt ?
424			radeonChoose8888TexFormat(rmesa, format, type, fbo) :
425			_dri_texformat_argb4444;
426
427	case GL_RGBA4:
428	case GL_RGBA2:
429		return _dri_texformat_argb4444;
430
431	case GL_RGB5_A1:
432		return _dri_texformat_argb1555;
433
434	case GL_RGB8:
435	case GL_RGB10:
436	case GL_RGB12:
437	case GL_RGB16:
438		return !force16bpt ? _dri_texformat_argb8888 :
439		    _dri_texformat_rgb565;
440
441	case GL_RGB5:
442	case GL_RGB4:
443	case GL_R3_G3_B2:
444		return _dri_texformat_rgb565;
445
446	case GL_ALPHA:
447	case GL_ALPHA4:
448	case GL_ALPHA8:
449	case GL_ALPHA12:
450	case GL_ALPHA16:
451	case GL_COMPRESSED_ALPHA:
452		/* r200: can't use a8 format since interpreting hw I8 as a8 would result
453		   in wrong rgb values (same as alpha value instead of 0). */
454		if (IS_R200_CLASS(rmesa->radeonScreen))
455			return _dri_texformat_al88;
456		else
457			return _dri_texformat_a8;
458	case 1:
459	case GL_LUMINANCE:
460	case GL_LUMINANCE4:
461	case GL_LUMINANCE8:
462	case GL_LUMINANCE12:
463	case GL_LUMINANCE16:
464	case GL_COMPRESSED_LUMINANCE:
465		return _dri_texformat_l8;
466
467	case 2:
468	case GL_LUMINANCE_ALPHA:
469	case GL_LUMINANCE4_ALPHA4:
470	case GL_LUMINANCE6_ALPHA2:
471	case GL_LUMINANCE8_ALPHA8:
472	case GL_LUMINANCE12_ALPHA4:
473	case GL_LUMINANCE12_ALPHA12:
474	case GL_LUMINANCE16_ALPHA16:
475	case GL_COMPRESSED_LUMINANCE_ALPHA:
476		return _dri_texformat_al88;
477
478	case GL_INTENSITY:
479	case GL_INTENSITY4:
480	case GL_INTENSITY8:
481	case GL_INTENSITY12:
482	case GL_INTENSITY16:
483	case GL_COMPRESSED_INTENSITY:
484		return _dri_texformat_i8;
485
486	case GL_YCBCR_MESA:
487		if (type == GL_UNSIGNED_SHORT_8_8_APPLE ||
488		    type == GL_UNSIGNED_BYTE)
489			return MESA_FORMAT_YCBCR;
490		else
491			return MESA_FORMAT_YCBCR_REV;
492
493	case GL_RGB_S3TC:
494	case GL_RGB4_S3TC:
495	case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
496		return MESA_FORMAT_RGB_DXT1;
497
498	case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
499		return MESA_FORMAT_RGBA_DXT1;
500
501	case GL_RGBA_S3TC:
502	case GL_RGBA4_S3TC:
503	case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
504		return MESA_FORMAT_RGBA_DXT3;
505
506	case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
507		return MESA_FORMAT_RGBA_DXT5;
508
509	case GL_ALPHA16F_ARB:
510		return MESA_FORMAT_ALPHA_FLOAT16;
511	case GL_ALPHA32F_ARB:
512		return MESA_FORMAT_ALPHA_FLOAT32;
513	case GL_LUMINANCE16F_ARB:
514		return MESA_FORMAT_LUMINANCE_FLOAT16;
515	case GL_LUMINANCE32F_ARB:
516		return MESA_FORMAT_LUMINANCE_FLOAT32;
517	case GL_LUMINANCE_ALPHA16F_ARB:
518		return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT16;
519	case GL_LUMINANCE_ALPHA32F_ARB:
520		return MESA_FORMAT_LUMINANCE_ALPHA_FLOAT32;
521	case GL_INTENSITY16F_ARB:
522		return MESA_FORMAT_INTENSITY_FLOAT16;
523	case GL_INTENSITY32F_ARB:
524		return MESA_FORMAT_INTENSITY_FLOAT32;
525	case GL_RGB16F_ARB:
526		return MESA_FORMAT_RGBA_FLOAT16;
527	case GL_RGB32F_ARB:
528		return MESA_FORMAT_RGBA_FLOAT32;
529	case GL_RGBA16F_ARB:
530		return MESA_FORMAT_RGBA_FLOAT16;
531	case GL_RGBA32F_ARB:
532		return MESA_FORMAT_RGBA_FLOAT32;
533
534#ifdef RADEON_R300
535	case GL_DEPTH_COMPONENT:
536	case GL_DEPTH_COMPONENT16:
537		return MESA_FORMAT_Z16;
538	case GL_DEPTH_COMPONENT24:
539	case GL_DEPTH_COMPONENT32:
540	case GL_DEPTH_STENCIL_EXT:
541	case GL_DEPTH24_STENCIL8_EXT:
542		if (rmesa->radeonScreen->chip_family >= CHIP_FAMILY_RV515)
543			return MESA_FORMAT_S8_Z24;
544		else
545			return MESA_FORMAT_Z16;
546#else
547	case GL_DEPTH_COMPONENT:
548	case GL_DEPTH_COMPONENT16:
549	case GL_DEPTH_COMPONENT24:
550	case GL_DEPTH_COMPONENT32:
551	case GL_DEPTH_STENCIL_EXT:
552	case GL_DEPTH24_STENCIL8_EXT:
553		return MESA_FORMAT_S8_Z24;
554#endif
555
556	/* EXT_texture_sRGB */
557	case GL_SRGB:
558	case GL_SRGB8:
559	case GL_SRGB_ALPHA:
560	case GL_SRGB8_ALPHA8:
561	case GL_COMPRESSED_SRGB:
562	case GL_COMPRESSED_SRGB_ALPHA:
563		return MESA_FORMAT_SARGB8;
564
565	case GL_SLUMINANCE:
566	case GL_SLUMINANCE8:
567	case GL_COMPRESSED_SLUMINANCE:
568		return MESA_FORMAT_SL8;
569
570	case GL_SLUMINANCE_ALPHA:
571	case GL_SLUMINANCE8_ALPHA8:
572	case GL_COMPRESSED_SLUMINANCE_ALPHA:
573		return MESA_FORMAT_SLA8;
574
575	case GL_COMPRESSED_SRGB_S3TC_DXT1_EXT:
576		return MESA_FORMAT_SRGB_DXT1;
577	case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT:
578		return MESA_FORMAT_SRGBA_DXT1;
579	case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT:
580		return MESA_FORMAT_SRGBA_DXT3;
581	case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT:
582		return MESA_FORMAT_SRGBA_DXT5;
583
584	default:
585		_mesa_problem(ctx,
586			      "unexpected internalFormat 0x%x in %s",
587			      (int)internalFormat, __func__);
588		return MESA_FORMAT_NONE;
589	}
590
591	return MESA_FORMAT_NONE;		/* never get here */
592}
593
594/** Check if given image is valid within current texture object.
595 */
596static int image_matches_texture_obj(struct gl_texture_object *texObj,
597	struct gl_texture_image *texImage,
598	unsigned level)
599{
600	const struct gl_texture_image *baseImage = texObj->Image[0][texObj->BaseLevel];
601
602	if (!baseImage)
603		return 0;
604
605	if (level < texObj->BaseLevel || level > texObj->MaxLevel)
606		return 0;
607
608	const unsigned levelDiff = level - texObj->BaseLevel;
609	const unsigned refWidth = MAX2(baseImage->Width >> levelDiff, 1);
610	const unsigned refHeight = MAX2(baseImage->Height >> levelDiff, 1);
611	const unsigned refDepth = MAX2(baseImage->Depth >> levelDiff, 1);
612
613	return (texImage->Width == refWidth &&
614			texImage->Height == refHeight &&
615			texImage->Depth == refDepth);
616}
617
618static void teximage_assign_miptree(radeonContextPtr rmesa,
619	struct gl_texture_object *texObj,
620	struct gl_texture_image *texImage,
621	unsigned face,
622	unsigned level)
623{
624	radeonTexObj *t = radeon_tex_obj(texObj);
625	radeon_texture_image* image = get_radeon_texture_image(texImage);
626
627	/* Since miptree holds only images for levels <BaseLevel..MaxLevel>
628	 * don't allocate the miptree if the teximage won't fit.
629	 */
630	if (!image_matches_texture_obj(texObj, texImage, level))
631		return;
632
633	/* Try using current miptree, or create new if there isn't any */
634	if (!t->mt || !radeon_miptree_matches_image(t->mt, texImage, face, level)) {
635		radeon_miptree_unreference(&t->mt);
636		radeon_try_alloc_miptree(rmesa, t);
637		radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
638				"%s: texObj %p, texImage %p, face %d, level %d, "
639				"texObj miptree doesn't match, allocated new miptree %p\n",
640				__FUNCTION__, texObj, texImage, face, level, t->mt);
641	}
642
643	/* Miptree alocation may have failed,
644	 * when there was no image for baselevel specified */
645	if (t->mt) {
646		image->mtface = face;
647		image->mtlevel = level;
648		radeon_miptree_reference(t->mt, &image->mt);
649	} else
650		radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
651				"%s Failed to allocate miptree.\n", __func__);
652}
653
654static GLuint * allocate_image_offsets(struct gl_context *ctx,
655	unsigned alignedWidth,
656	unsigned height,
657	unsigned depth)
658{
659	int i;
660	GLuint *offsets;
661
662	offsets = malloc(depth * sizeof(GLuint)) ;
663	if (!offsets) {
664		_mesa_error(ctx, GL_OUT_OF_MEMORY, "glTex[Sub]Image");
665		return NULL;
666	}
667
668	for (i = 0; i < depth; ++i) {
669		offsets[i] = alignedWidth * height * i;
670	}
671
672	return offsets;
673}
674
675/**
676 * Update a subregion of the given texture image.
677 */
678static void radeon_store_teximage(struct gl_context* ctx, int dims,
679		GLint xoffset, GLint yoffset, GLint zoffset,
680		GLsizei width, GLsizei height, GLsizei depth,
681		GLsizei imageSize,
682		GLenum format, GLenum type,
683		const GLvoid * pixels,
684		const struct gl_pixelstore_attrib *packing,
685		struct gl_texture_object *texObj,
686		struct gl_texture_image *texImage,
687		int compressed)
688{
689	radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
690	radeonTexObj *t = radeon_tex_obj(texObj);
691	radeon_texture_image* image = get_radeon_texture_image(texImage);
692
693	GLuint dstRowStride;
694	GLuint *dstImageOffsets;
695
696	radeon_print(RADEON_TEXTURE, RADEON_TRACE,
697			"%s(%p, tex %p, image %p) compressed %d\n",
698			__func__, ctx, texObj, texImage, compressed);
699
700	if (image->mt) {
701		dstRowStride = image->mt->levels[image->mtlevel].rowstride;
702	} else if (t->bo) {
703		/* TFP case */
704		dstRowStride = get_texture_image_row_stride(rmesa, texImage->TexFormat, width, 0);
705	} else {
706		dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
707	}
708
709	assert(dstRowStride);
710
711	if (dims == 3) {
712		unsigned alignedWidth = dstRowStride/_mesa_get_format_bytes(texImage->TexFormat);
713		dstImageOffsets = allocate_image_offsets(ctx, alignedWidth, texImage->Height, texImage->Depth);
714		if (!dstImageOffsets) {
715			radeon_warning("%s Failed to allocate dstImaeOffset.\n", __func__);
716			return;
717		}
718	} else {
719		dstImageOffsets = texImage->ImageOffsets;
720	}
721
722	radeon_teximage_map(image, GL_TRUE);
723
724	if (compressed) {
725		uint32_t srcRowStride, bytesPerRow, rows, block_width, block_height;
726		GLubyte *img_start;
727
728		_mesa_get_format_block_size(texImage->TexFormat, &block_width, &block_height);
729
730		if (!image->mt) {
731			dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
732			img_start = _mesa_compressed_image_address(xoffset, yoffset, 0,
733									texImage->TexFormat,
734									texImage->Width, texImage->Data);
735		}
736		else {
737			uint32_t offset;
738			offset = dstRowStride / _mesa_get_format_bytes(texImage->TexFormat) * yoffset / block_height + xoffset / block_width;
739			offset *= _mesa_get_format_bytes(texImage->TexFormat);
740			img_start = texImage->Data + offset;
741		}
742		srcRowStride = _mesa_format_row_stride(texImage->TexFormat, width);
743		bytesPerRow = srcRowStride;
744		rows = (height + block_height - 1) / block_height;
745
746		copy_rows(img_start, dstRowStride, pixels, srcRowStride, rows, bytesPerRow);
747	}
748	else {
749		if (!_mesa_texstore(ctx, dims, texImage->_BaseFormat,
750					texImage->TexFormat, texImage->Data,
751					xoffset, yoffset, zoffset,
752					dstRowStride,
753					dstImageOffsets,
754					width, height, depth,
755					format, type, pixels, packing)) {
756			_mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexSubImage");
757		}
758	}
759
760	if (dims == 3) {
761		free(dstImageOffsets);
762	}
763
764	radeon_teximage_unmap(image);
765}
766
767/**
768 * All glTexImage calls go through this function.
769 */
770static void radeon_teximage(
771	struct gl_context *ctx, int dims,
772	GLenum target, GLint level,
773	GLint internalFormat,
774	GLint width, GLint height, GLint depth,
775	GLsizei imageSize,
776	GLenum format, GLenum type, const GLvoid * pixels,
777	const struct gl_pixelstore_attrib *packing,
778	struct gl_texture_object *texObj,
779	struct gl_texture_image *texImage,
780	int compressed)
781{
782	radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
783	radeonTexObj* t = radeon_tex_obj(texObj);
784	radeon_texture_image* image = get_radeon_texture_image(texImage);
785	GLuint face = _mesa_tex_target_to_face(target);
786
787	radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
788			"%s %dd: texObj %p, texImage %p, face %d, level %d\n",
789			__func__, dims, texObj, texImage, face, level);
790	{
791		struct radeon_bo *bo;
792		bo = !image->mt ? image->bo : image->mt->bo;
793		if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
794			radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
795				"%s Calling teximage for texture that is "
796				"queued for GPU processing.\n",
797				__func__);
798			radeon_firevertices(rmesa);
799		}
800	}
801
802
803	t->validated = GL_FALSE;
804
805	/* Mesa core only clears texImage->Data but not image->mt */
806	radeonFreeTexImageData(ctx, texImage);
807
808	if (!t->bo) {
809		teximage_assign_miptree(rmesa, texObj, texImage, face, level);
810		if (!image->mt) {
811			int size = _mesa_format_image_size(texImage->TexFormat,
812								texImage->Width,
813								texImage->Height,
814								texImage->Depth);
815			texImage->Data = _mesa_alloc_texmemory(size);
816			radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
817					"%s %dd: texObj %p, texImage %p, "
818					" no miptree assigned, using local memory %p\n",
819					__func__, dims, texObj, texImage, texImage->Data);
820		}
821	}
822
823	/* Upload texture image; note that the spec allows pixels to be NULL */
824	if (compressed) {
825		pixels = _mesa_validate_pbo_compressed_teximage(
826			ctx, imageSize, pixels, packing, "glCompressedTexImage");
827	} else {
828		pixels = _mesa_validate_pbo_teximage(
829			ctx, dims, width, height, depth,
830			format, type, pixels, packing, "glTexImage");
831	}
832
833	if (pixels) {
834		radeon_store_teximage(ctx, dims,
835			0, 0, 0,
836			width, height, depth,
837			imageSize, format, type,
838			pixels, packing,
839			texObj, texImage,
840			compressed);
841	}
842
843	_mesa_unmap_teximage_pbo(ctx, packing);
844}
845
846void radeonTexImage1D(struct gl_context * ctx, GLenum target, GLint level,
847		      GLint internalFormat,
848		      GLint width, GLint border,
849		      GLenum format, GLenum type, const GLvoid * pixels,
850		      const struct gl_pixelstore_attrib *packing,
851		      struct gl_texture_object *texObj,
852		      struct gl_texture_image *texImage)
853{
854	radeon_teximage(ctx, 1, target, level, internalFormat, width, 1, 1,
855		0, format, type, pixels, packing, texObj, texImage, 0);
856}
857
858void radeonTexImage2D(struct gl_context * ctx, GLenum target, GLint level,
859			   GLint internalFormat,
860			   GLint width, GLint height, GLint border,
861			   GLenum format, GLenum type, const GLvoid * pixels,
862			   const struct gl_pixelstore_attrib *packing,
863			   struct gl_texture_object *texObj,
864			   struct gl_texture_image *texImage)
865
866{
867	radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
868		0, format, type, pixels, packing, texObj, texImage, 0);
869}
870
871void radeonCompressedTexImage2D(struct gl_context * ctx, GLenum target,
872				     GLint level, GLint internalFormat,
873				     GLint width, GLint height, GLint border,
874				     GLsizei imageSize, const GLvoid * data,
875				     struct gl_texture_object *texObj,
876				     struct gl_texture_image *texImage)
877{
878	radeon_teximage(ctx, 2, target, level, internalFormat, width, height, 1,
879		imageSize, 0, 0, data, &ctx->Unpack, texObj, texImage, 1);
880}
881
882void radeonTexImage3D(struct gl_context * ctx, GLenum target, GLint level,
883		      GLint internalFormat,
884		      GLint width, GLint height, GLint depth,
885		      GLint border,
886		      GLenum format, GLenum type, const GLvoid * pixels,
887		      const struct gl_pixelstore_attrib *packing,
888		      struct gl_texture_object *texObj,
889		      struct gl_texture_image *texImage)
890{
891	radeon_teximage(ctx, 3, target, level, internalFormat, width, height, depth,
892		0, format, type, pixels, packing, texObj, texImage, 0);
893}
894
895/**
896 * All glTexSubImage calls go through this function.
897 */
898static void radeon_texsubimage(struct gl_context* ctx, int dims, GLenum target, int level,
899		GLint xoffset, GLint yoffset, GLint zoffset,
900		GLsizei width, GLsizei height, GLsizei depth,
901		GLsizei imageSize,
902		GLenum format, GLenum type,
903		const GLvoid * pixels,
904		const struct gl_pixelstore_attrib *packing,
905		struct gl_texture_object *texObj,
906		struct gl_texture_image *texImage,
907		int compressed)
908{
909	radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
910	radeonTexObj* t = radeon_tex_obj(texObj);
911	radeon_texture_image* image = get_radeon_texture_image(texImage);
912
913	radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
914			"%s %dd: texObj %p, texImage %p, face %d, level %d\n",
915			__func__, dims, texObj, texImage,
916			_mesa_tex_target_to_face(target), level);
917	{
918		struct radeon_bo *bo;
919		bo = !image->mt ? image->bo : image->mt->bo;
920		if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
921			radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
922				"%s Calling texsubimage for texture that is "
923				"queued for GPU processing.\n",
924				__func__);
925			radeon_firevertices(rmesa);
926		}
927	}
928
929
930	t->validated = GL_FALSE;
931	if (compressed) {
932		pixels = _mesa_validate_pbo_compressed_teximage(
933			ctx, imageSize, pixels, packing, "glCompressedTexSubImage");
934	} else {
935		pixels = _mesa_validate_pbo_teximage(ctx, dims,
936			width, height, depth, format, type, pixels, packing, "glTexSubImage");
937	}
938
939	if (pixels) {
940		radeon_store_teximage(ctx, dims,
941			xoffset, yoffset, zoffset,
942			width, height, depth,
943			imageSize, format, type,
944			pixels, packing,
945			texObj, texImage,
946			compressed);
947	}
948
949	_mesa_unmap_teximage_pbo(ctx, packing);
950}
951
952void radeonTexSubImage1D(struct gl_context * ctx, GLenum target, GLint level,
953			 GLint xoffset,
954			 GLsizei width,
955			 GLenum format, GLenum type,
956			 const GLvoid * pixels,
957			 const struct gl_pixelstore_attrib *packing,
958			 struct gl_texture_object *texObj,
959			 struct gl_texture_image *texImage)
960{
961	radeon_texsubimage(ctx, 1, target, level, xoffset, 0, 0, width, 1, 1, 0,
962		format, type, pixels, packing, texObj, texImage, 0);
963}
964
965void radeonTexSubImage2D(struct gl_context * ctx, GLenum target, GLint level,
966			 GLint xoffset, GLint yoffset,
967			 GLsizei width, GLsizei height,
968			 GLenum format, GLenum type,
969			 const GLvoid * pixels,
970			 const struct gl_pixelstore_attrib *packing,
971			 struct gl_texture_object *texObj,
972			 struct gl_texture_image *texImage)
973{
974	radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
975			   0, format, type, pixels, packing, texObj, texImage,
976			   0);
977}
978
979void radeonCompressedTexSubImage2D(struct gl_context * ctx, GLenum target,
980				   GLint level, GLint xoffset,
981				   GLint yoffset, GLsizei width,
982				   GLsizei height, GLenum format,
983				   GLsizei imageSize, const GLvoid * data,
984				   struct gl_texture_object *texObj,
985				   struct gl_texture_image *texImage)
986{
987	radeon_texsubimage(ctx, 2, target, level, xoffset, yoffset, 0, width, height, 1,
988		imageSize, format, 0, data, &ctx->Unpack, texObj, texImage, 1);
989}
990
991
992void radeonTexSubImage3D(struct gl_context * ctx, GLenum target, GLint level,
993			 GLint xoffset, GLint yoffset, GLint zoffset,
994			 GLsizei width, GLsizei height, GLsizei depth,
995			 GLenum format, GLenum type,
996			 const GLvoid * pixels,
997			 const struct gl_pixelstore_attrib *packing,
998			 struct gl_texture_object *texObj,
999			 struct gl_texture_image *texImage)
1000{
1001	radeon_texsubimage(ctx, 3, target, level, xoffset, yoffset, zoffset, width, height, depth, 0,
1002		format, type, pixels, packing, texObj, texImage, 0);
1003}
1004
1005unsigned radeonIsFormatRenderable(gl_format mesa_format)
1006{
1007	if (mesa_format == _dri_texformat_argb8888 || mesa_format == _dri_texformat_rgb565 ||
1008		mesa_format == _dri_texformat_argb1555 || mesa_format == _dri_texformat_argb4444)
1009		return 1;
1010
1011	switch (mesa_format)
1012	{
1013		case MESA_FORMAT_Z16:
1014		case MESA_FORMAT_S8_Z24:
1015			return 1;
1016		default:
1017			return 0;
1018	}
1019}
1020
1021#if FEATURE_OES_EGL_image
1022void radeon_image_target_texture_2d(struct gl_context *ctx, GLenum target,
1023				    struct gl_texture_object *texObj,
1024				    struct gl_texture_image *texImage,
1025				    GLeglImageOES image_handle)
1026{
1027	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
1028	radeonTexObj *t = radeon_tex_obj(texObj);
1029	radeon_texture_image *radeonImage = get_radeon_texture_image(texImage);
1030	__DRIscreen *screen;
1031	__DRIimage *image;
1032
1033	screen = radeon->dri.screen;
1034	image = screen->dri2.image->lookupEGLImage(screen, image_handle,
1035						   screen->loaderPrivate);
1036	if (image == NULL)
1037		return;
1038
1039	radeonFreeTexImageData(ctx, texImage);
1040
1041	texImage->Width = image->width;
1042	texImage->Height = image->height;
1043	texImage->Depth = 1;
1044	texImage->_BaseFormat = GL_RGBA;
1045	texImage->TexFormat = image->format;
1046	texImage->RowStride = image->pitch;
1047	texImage->InternalFormat = image->internal_format;
1048
1049	if(t->mt)
1050	{
1051		radeon_miptree_unreference(&t->mt);
1052		t->mt = NULL;
1053	}
1054
1055	/* NOTE: The following is *very* ugly and will probably break. But
1056	   I don't know how to deal with it, without creating a whole new
1057	   function like radeon_miptree_from_bo() so I'm going with the
1058	   easy but error-prone way. */
1059
1060	radeon_try_alloc_miptree(radeon, t);
1061
1062	radeonImage->mtface = _mesa_tex_target_to_face(target);
1063	radeonImage->mtlevel = 0;
1064	radeon_miptree_reference(t->mt, &radeonImage->mt);
1065
1066	if (t->mt == NULL)
1067	{
1068		radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
1069			     "%s Failed to allocate miptree.\n", __func__);
1070		return;
1071	}
1072
1073	/* Particularly ugly: this is guaranteed to break, if image->bo is
1074	   not of the required size for a miptree. */
1075	radeon_bo_unref(t->mt->bo);
1076	radeon_bo_ref(image->bo);
1077	t->mt->bo = image->bo;
1078
1079	if (!radeon_miptree_matches_image(t->mt, &radeonImage->base,
1080					  radeonImage->mtface, 0))
1081		fprintf(stderr, "miptree doesn't match image\n");
1082}
1083#endif
1084