utils.c revision 4237d5a79f48789aacc67dc43378d2d7813a39f4
/*
* Utility functions for the WineD3D Library
*
* Copyright 2002-2004 Jason Edmeades
* Copyright 2003-2004 Raphael Junqueira
* Copyright 2004 Christian Costa
* Copyright 2005 Oliver Stieber
* Copyright 2006-2008 Henri Verbeet
* Copyright 2007-2008 Stefan Dösinger for CodeWeavers
* Copyright 2009-2010 Henri Verbeet for CodeWeavers
*
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
*/
/*
* Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
* other than GPL or LGPL is available it will apply instead, Oracle elects to use only
* the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
* a choice of LGPL license versions is made available with the language indicating
* that LGPLv2 or any later version may be used, or where a choice of which version
* of the LGPL is applied is otherwise unspecified.
*/
#include "config.h"
#include "wined3d_private.h"
struct wined3d_format_channels
{
enum wined3d_format_id id;
};
static const struct wined3d_format_channels formats[] =
{
/* size offset
* format id r g b a r g b a bpp depth stencil */
{WINED3DFMT_UNKNOWN, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
/* FourCC formats */
{WINED3DFMT_UYVY, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0},
{WINED3DFMT_YUY2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0},
{WINED3DFMT_YV12, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0},
{WINED3DFMT_DXT1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0},
{WINED3DFMT_DXT2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0},
{WINED3DFMT_DXT3, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0},
{WINED3DFMT_DXT4, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0},
{WINED3DFMT_DXT5, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0},
{WINED3DFMT_MULTI2_ARGB8, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0},
{WINED3DFMT_G8R8_G8B8, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0},
{WINED3DFMT_R8G8_B8G8, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0},
/* IEEE formats */
/* Hmm? */
{WINED3DFMT_R8G8_SNORM_Cx, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0},
/* Float */
/* Palettized formats */
{WINED3DFMT_P8_UINT, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0},
/* Standard ARGB formats. */
/* Luminance */
{WINED3DFMT_L8_UNORM, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0},
{WINED3DFMT_L16_UNORM, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0},
/* Bump mapping stuff */
/* Depth stencil formats */
{WINED3DFMT_VERTEXDATA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
/* Vendor-specific formats */
{WINED3DFMT_ATI2N, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0},
{WINED3DFMT_NVDB, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{WINED3DFMT_INST, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{WINED3DFMT_RESZ, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{WINED3DFMT_NVHU, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0},
{WINED3DFMT_NVHS, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0},
/* Unsure about them, could not find a Windows driver that supports them */
/* Typeless */
};
struct wined3d_format_base_flags
{
enum wined3d_format_id id;
};
/* The ATI2N format behaves like an uncompressed format in LockRect(), but
* still needs to use the correct block based calculation for e.g. the
* resource size. */
static const struct wined3d_format_base_flags format_base_flags[] =
{
};
struct wined3d_format_block_info
{
enum wined3d_format_id id;
};
static const struct wined3d_format_block_info format_block_info[] =
{
};
struct wined3d_format_vertex_info
{
enum wined3d_format_id id;
enum wined3d_ffp_emit_idx emit_idx;
unsigned int component_size;
};
static const struct wined3d_format_vertex_info format_vertex_info[] =
{
{WINED3DFMT_B8G8R8A8_UNORM, WINED3D_FFP_EMIT_D3DCOLOR, 4, GL_UNSIGNED_BYTE, 4, GL_TRUE, sizeof(BYTE)},
{WINED3DFMT_R8G8B8A8_UINT, WINED3D_FFP_EMIT_UBYTE4, 4, GL_UNSIGNED_BYTE, 4, GL_FALSE, sizeof(BYTE)},
{WINED3DFMT_R16G16B16A16_SINT, WINED3D_FFP_EMIT_SHORT4, 4, GL_SHORT, 4, GL_FALSE, sizeof(short int)},
{WINED3DFMT_R8G8B8A8_UNORM, WINED3D_FFP_EMIT_UBYTE4N, 4, GL_UNSIGNED_BYTE, 4, GL_TRUE, sizeof(BYTE)},
{WINED3DFMT_R16G16B16A16_SNORM, WINED3D_FFP_EMIT_SHORT4N, 4, GL_SHORT, 4, GL_TRUE, sizeof(short int)},
{WINED3DFMT_R16G16_UNORM, WINED3D_FFP_EMIT_USHORT2N, 2, GL_UNSIGNED_SHORT, 2, GL_TRUE, sizeof(short int)},
{WINED3DFMT_R16G16B16A16_UNORM, WINED3D_FFP_EMIT_USHORT4N, 4, GL_UNSIGNED_SHORT, 4, GL_TRUE, sizeof(short int)},
{WINED3DFMT_R10G10B10A2_UINT, WINED3D_FFP_EMIT_UDEC3, 3, GL_UNSIGNED_SHORT, 3, GL_FALSE, sizeof(short int)},
{WINED3DFMT_R16G16B16A16_FLOAT, WINED3D_FFP_EMIT_FLOAT16_4, 4, GL_FLOAT, 4, GL_FALSE, sizeof(GLhalfNV)},
{WINED3DFMT_R32G32B32_UINT, WINED3D_FFP_EMIT_INVALID, 3, GL_UNSIGNED_INT, 3, GL_FALSE, sizeof(UINT)},
{WINED3DFMT_R32G32B32A32_UINT, WINED3D_FFP_EMIT_INVALID, 4, GL_UNSIGNED_INT, 4, GL_FALSE, sizeof(UINT)},
};
struct wined3d_format_texture_info
{
enum wined3d_format_id id;
unsigned int conv_byte_count;
unsigned int flags;
};
{
/* WINED3DFMT_L4A4_UNORM exists as an internal gl format, but for some reason there is not
* format+type combination to load it. Thus convert it to A8L8, then load it
* with A4L4 internal, but A8L8 format+type
*/
unsigned int x, y;
const unsigned char *Source;
unsigned char *Dest;
for(y = 0; y < height; y++) {
for (x = 0; x < width; x++ ) {
Dest += 2;
}
}
}
static void convert_r5g5_snorm_l6_unorm(const BYTE *src, BYTE *dst, UINT pitch, UINT width, UINT height)
{
unsigned int x, y;
for(y = 0; y < height; y++)
{
for (x = 0; x < width; x++ )
{
short u = ((color ) & 0x1f);
short v_conv = v + 16;
short u_conv = u + 16;
Dest_s += 1;
}
}
}
static void convert_r5g5_snorm_l6_unorm_nv(const BYTE *src, BYTE *dst, UINT pitch, UINT width, UINT height)
{
unsigned int x, y;
unsigned char *Dest;
/* This makes the gl surface bigger(24 bit instead of 16), but it works with
* fixed function and shaders without further conversion once the surface is
* loaded
*/
for(y = 0; y < height; y++) {
for (x = 0; x < width; x++ ) {
char u = ((color ) & 0x1f);
/* 8 bits destination, 6 bits source, 8th bit is the sign. gl ignores the sign
* and doubles the positive range. Thus shift left only once, gl does the 2nd
* shift. GL reads a signed value and converts it into an unsigned value.
*/
/* Those are read as signed, but kept signed. Just left-shift 3 times to scale
* from 5 bit values to 8 bit values.
*/
Dest += 3;
}
}
}
{
unsigned int x, y;
const short *Source;
unsigned char *Dest;
for(y = 0; y < height; y++)
{
for (x = 0; x < width; x++ )
{
Dest += 3;
}
}
}
static void convert_r8g8_snorm_l8x8_unorm(const BYTE *src, BYTE *dst, UINT pitch, UINT width, UINT height)
{
unsigned int x, y;
unsigned char *Dest;
/* Doesn't work correctly with the fixed function pipeline, but can work in
* shaders if the shader is adjusted. (There's no use for this format in gl's
* standard fixed function pipeline anyway).
*/
for(y = 0; y < height; y++)
{
for (x = 0; x < width; x++ )
{
Dest += 4;
}
}
}
static void convert_r8g8_snorm_l8x8_unorm_nv(const BYTE *src, BYTE *dst, UINT pitch, UINT width, UINT height)
{
unsigned int x, y;
unsigned char *Dest;
/* This implementation works with the fixed function pipeline and shaders
* without further modification after converting the surface.
*/
for(y = 0; y < height; y++)
{
for (x = 0; x < width; x++ )
{
Dest += 4;
}
}
}
{
unsigned int x, y;
unsigned char *Dest;
for(y = 0; y < height; y++)
{
for (x = 0; x < width; x++ )
{
Dest += 4;
}
}
}
{
unsigned int x, y;
unsigned short *Dest;
for(y = 0; y < height; y++)
{
for (x = 0; x < width; x++ )
{
Dest += 3;
}
}
}
{
unsigned int x, y;
for(y = 0; y < height; y++)
{
for (x = 0; x < width; x++ )
{
/* Strictly speaking not correct for R16G16F, but it doesn't matter because the
* shader overwrites it anyway
*/
Dest += 3;
}
}
}
{
unsigned int x, y;
const float *Source;
float *Dest;
for(y = 0; y < height; y++)
{
for (x = 0; x < width; x++ )
{
Dest += 3;
}
}
}
static void convert_s1_uint_d15_unorm(const BYTE *src, BYTE *dst, UINT pitch, UINT width, UINT height)
{
unsigned int x, y;
for (y = 0; y < height; ++y)
{
for (x = 0; x < width; ++x)
{
/* The depth data is normalized, so needs to be scaled,
* the stencil data isn't. Scale depth data by
* (2^24-1)/(2^15-1) ~~ (2^9 + 2^-6). */
}
}
}
static void convert_s4x4_uint_d24_unorm(const BYTE *src, BYTE *dst, UINT pitch, UINT width, UINT height)
{
unsigned int x, y;
for (y = 0; y < height; ++y)
{
for (x = 0; x < width; ++x)
{
/* Just need to clear out the X4 part. */
}
}
}
static void convert_s8_uint_d24_float(const BYTE *src, BYTE *dst, UINT pitch, UINT width, UINT height)
{
unsigned int x, y;
for (y = 0; y < height; ++y)
{
for (x = 0; x < width; ++x)
{
}
}
}
/* The following formats explicitly don't have WINED3DFMT_FLAG_TEXTURE set:
*
* These are never supported on native.
* WINED3DFMT_B8G8R8_UNORM
* WINED3DFMT_B2G3R3_UNORM
* WINED3DFMT_L4A4_UNORM
* WINED3DFMT_S1_UINT_D15_UNORM
* WINED3DFMT_S4X4_UINT_D24_UNORM
*
* Since it is not widely available, don't offer it. Further no Windows driver
* offers WINED3DFMT_P8_UINT_A8_NORM, so don't offer it either.
* WINED3DFMT_P8_UINT
* WINED3DFMT_P8_UINT_A8_UNORM
*
* These formats seem to be similar to the HILO formats in
* GL_NV_texture_shader. NVHU is said to be GL_UNSIGNED_HILO16,
* NVHS GL_SIGNED_HILO16. Rumours say that D3D computes a 3rd channel
* similarly to D3DFMT_CxV8U8 (So NVHS could be called D3DFMT_CxV16U16). ATI
* refused to support formats which can easily be emulated with pixel shaders,
* so applications have to deal with not having NVHS and NVHU.
* WINED3DFMT_NVHU
* WINED3DFMT_NVHS */
static const struct wined3d_format_texture_info format_texture_info[] =
{
/* format id gl_internal gl_srgb_internal gl_rt_internal
gl_format gl_type conv_byte_count
flags
extension convert */
/* FourCC formats */
/* GL_APPLE_ycbcr_422 claims that its '2YUV' format, which is supported via the UNSIGNED_SHORT_8_8_REV_APPLE type
* is equivalent to 'UYVY' format on Windows, and the 'YUVS' via UNSIGNED_SHORT_8_8_APPLE equates to 'YUY2'. The
* d3d9 test however shows that the opposite is true. Since the extension is from 2002, it predates the x86 based
* Macs, so probably the endianness differs. This could be tested as soon as we have a Windows and MacOS on a big
* endian machine
*/
GL_ALPHA, GL_UNSIGNED_BYTE, 0,
GL_RGBA, GL_UNSIGNED_BYTE, 0,
GL_RGBA, GL_UNSIGNED_BYTE, 0,
GL_RGBA, GL_UNSIGNED_BYTE, 0,
GL_RGBA, GL_UNSIGNED_BYTE, 0,
GL_RGBA, GL_UNSIGNED_BYTE, 0,
/* IEEE formats */
/* Float */
GL_RED, GL_HALF_FLOAT_ARB, 0,
GL_RED, GL_HALF_FLOAT_ARB, 0,
GL_RG, GL_HALF_FLOAT_ARB, 0,
GL_RGBA, GL_HALF_FLOAT_ARB, 0,
/* Palettized formats */
GL_ALPHA, GL_UNSIGNED_BYTE, 0,
0,
0,
/* Standard ARGB formats */
GL_BGR, GL_UNSIGNED_BYTE, 0,
WINED3DFMT_FLAG_POSTPIXELSHADER_BLENDING | WINED3DFMT_FLAG_FILTERING | WINED3DFMT_FLAG_RENDERTARGET,
GL_ALPHA, GL_UNSIGNED_BYTE, 0,
GL_RG, GL_UNSIGNED_SHORT, 0,
GL_RGBA, GL_UNSIGNED_SHORT, 0,
/* Luminance */
/* Bump mapping stuff */
GL_DSDT_NV, GL_BYTE, 0,
GL_HILO_NV, GL_SHORT, 0,
/* Depth stencil formats */
/* Vendor-specific formats */
{WINED3DFMT_NULL, 0, 0, 0,
};
{
/* First check if the format is at the position of its value.
* This will catch the argb formats before the loop is entered. */
{
return format_id;
}
else
{
unsigned int i;
{
}
}
return -1;
}
{
UINT i;
gl_info->formats = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, format_count * sizeof(*gl_info->formats));
{
ERR("Failed to allocate memory.\n");
return FALSE;
}
for (i = 0; i < format_count; ++i)
{
}
for (i = 0; i < (sizeof(format_base_flags) / sizeof(*format_base_flags)); ++i)
{
if (fmt_idx == -1)
{
ERR("Format %s (%#x) not found.\n",
return FALSE;
}
}
return TRUE;
}
{
unsigned int i;
for (i = 0; i < (sizeof(format_block_info) / sizeof(*format_block_info)); ++i)
{
struct wined3d_format *format;
if (fmt_idx == -1)
{
ERR("Format %s (%#x) not found.\n",
return FALSE;
}
}
return TRUE;
}
/* Context activation is done by the caller. */
{
/* Check if the default internal format is supported as a frame buffer
* target, otherwise fall back to the render target internal.
*
* Try to stick to the standard format if possible, this limits precision differences. */
gl_info->fbo_ops.glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, tex, 0);
checkGLcall("Framebuffer format check");
if (status == GL_FRAMEBUFFER_COMPLETE)
{
}
else
{
if (!format->rtInternal)
{
{
FIXME("Format %s with rendertarget flag is not supported as FBO color attachment,"
}
else
{
}
}
else
{
TRACE("Format %s is not supported as FBO color attachment, trying rtInternal format as fallback.\n",
gl_info->fbo_ops.glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, tex, 0);
checkGLcall("Framebuffer format check");
if (status == GL_FRAMEBUFFER_COMPLETE)
{
TRACE("Format %s rtInternal format is supported as FBO color attachment.\n",
}
else
{
FIXME("Format %s rtInternal format is not supported as FBO color attachment.\n",
}
}
}
if (status == GL_FRAMEBUFFER_COMPLETE && ((format->flags & WINED3DFMT_FLAG_POSTPIXELSHADER_BLENDING)
{
BYTE r, a;
{
gl_info->fbo_ops.glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, rb);
gl_info->fbo_ops.glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_RENDERBUFFER, rb);
checkGLcall("RB attachment");
}
{
TRACE("Format doesn't support post-pixelshader blending.\n");
}
else
{
/* Draw a full-black quad */
/* Draw a half-transparent red quad */
/* Rebinding texture to workaround a fglrx bug. */
gl_info->gl_ops.gl.p_glGetTexImage(GL_TEXTURE_2D, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, readback);
checkGLcall("Post-pixelshader blending check");
a = color >> 24;
if (!match)
{
TRACE("Format doesn't support post-pixelshader blending.\n");
}
else
{
TRACE("Format supports post-pixelshader blending.\n");
}
}
{
gl_info->fbo_ops.glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, 0);
gl_info->fbo_ops.glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_RENDERBUFFER, 0);
checkGLcall("RB cleanup");
}
}
{
gl_info->fbo_ops.glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, tex, 0);
checkGLcall("Framebuffer format check");
if (status == GL_FRAMEBUFFER_COMPLETE)
{
}
else
{
}
}
else if (status == GL_FRAMEBUFFER_COMPLETE)
}
{
if (value == GL_FULL_SUPPORT)
{
}
else
{
}
}
/* Context activation is done by the caller. */
{
unsigned int i;
{
{
if (!format->glInternal)
continue;
continue;
if (value == GL_FULL_SUPPORT)
{
WINED3DFMT_FLAG_POSTPIXELSHADER_BLENDING, "post-pixelshader blending");
}
else
{
if (!format->rtInternal)
{
{
WARN("Format %s with rendertarget flag is not supported as FBO color attachment"
}
else
}
else
{
if (value == GL_FULL_SUPPORT)
{
TRACE("Format %s rtInternal format is supported as FBO color attachment.\n",
}
else
{
WARN("Format %s rtInternal format is not supported as FBO color attachment.\n",
}
}
}
{
if (value == GL_FULL_SUPPORT)
{
}
else
{
}
}
}
return;
}
{
}
{
if (!format->glInternal) continue;
{
continue;
}
{
TRACE("Skipping format %s because it's a compressed format.\n",
continue;
}
{
TRACE("Checking if format %s is supported as FBO color attachment...\n", debug_d3dformat(format->id));
}
else
{
}
}
}
static BOOL init_format_texture_info(struct wined3d_adapter *adapter, struct wined3d_gl_info *gl_info)
{
struct fragment_caps fragment_caps;
struct shader_caps shader_caps;
unsigned int i;
for (i = 0; i < sizeof(format_texture_info) / sizeof(*format_texture_info); ++i)
{
struct wined3d_format *format;
if (fmt_idx == -1)
{
ERR("Format %s (%#x) not found.\n",
return FALSE;
}
/* ARB_texture_rg defines floating point formats, but only if
* ARB_texture_float is also supported. */
continue;
{
WINED3DFMT_FLAG_VTF, "vertex texture usage");
WINED3DFMT_FLAG_FILTERING, "filtering");
{
WINED3DFMT_FLAG_SRGB_READ, "sRGB read");
if (srgb_write)
WINED3DFMT_FLAG_SRGB_WRITE, "sRGB write");
else
}
}
else
{
{
/* Filter sRGB capabilities if EXT_texture_sRGB is not supported. */
{
}
{
}
}
}
/* Texture conversion stuff */
}
return TRUE;
}
{
return TRUE;
}
/* A context is provided by the caller */
{
/* Render a filtered texture and see what happens. This is intended to detect the lack of
* float16 filtering on ATI X1000 class cards. The drivers disable filtering instead of
* falling back to software. If this changes in the future this code will get fooled and
* apps might hit the software path due to incorrectly advertised caps.
*
* Its unlikely that this changes however. GL Games like Mass Effect depend on the filter
* disable fallback, if Apple or ATI ever change the driver behavior they will break more
* than Wine. The Linux binary <= r500 driver is not maintained any more anyway
*/
gl_info->fbo_ops.glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, buffer, 0);
gl_info->gl_ops.gl.p_glGetTexImage(GL_TEXTURE_2D, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, readback);
{
TRACE("Read back colors 0x%08x and 0x%08x close to unfiltered color, assuming no filtering\n",
}
else
{
TRACE("Read back colors are 0x%08x and 0x%08x, assuming texture is filtered\n",
}
{
}
return ret;
}
static void init_format_filter_info(struct wined3d_gl_info *gl_info, enum wined3d_pci_vendor vendor)
{
struct wined3d_format *format;
unsigned int fmt_idx, i;
static const enum wined3d_format_id fmts16[] =
{
};
/* This was already handled by init_format_texture_info(). */
return;
{
WARN("No FBO support, or no FBO ORM, guessing filter info from GL caps\n");
{
TRACE("Nvidia card with texture_float support: Assuming float16 blending\n");
}
{
TRACE("More than 44 GLSL varyings - assuming d3d10 card with float16 blending\n");
}
else
{
TRACE("Assuming no float16 blending\n");
}
if(filtered)
{
{
}
}
return;
}
{
if(filtered)
{
}
else
{
}
}
}
{
unsigned int i;
int idx;
/* V8U8 is supported natively by GL_ATI_envmap_bumpmap and GL_NV_texture_shader.
* V16U16 is only supported by GL_NV_texture_shader. The formats need fixup if
* their extensions are not available. GL_ATI_envmap_bumpmap is not used because
* the only driver that implements it(fglrx) has a buggy implementation.
*
* V8U8 and V16U16 need a fixup of the undefined blue channel. OpenGL
* returns 0.0 when sampling from it, DirectX 1.0. So we always have in-shader
* conversion for this format.
*/
{
}
else
{
}
{
/* If GL_NV_texture_shader is not supported, those formats are converted, incompatibly
* with each other
*/
}
else
{
/* If GL_NV_texture_shader is supported, WINED3DFMT_L6V5U5 and WINED3DFMT_X8L8V8U8
* are converted at surface loading time, but they do not need any modification in
* the shader, thus they are compatible with all WINED3DFMT_UNKNOWN group formats.
* WINED3DFMT_Q8W8V8U8 doesn't even need load-time conversion
*/
}
{
}
{
}
{
}
{
}
{
}
{
/* Do not change the size of the type, it is CPU side. We have to change the GPU-side information though.
* It is the job of the vertex buffer code to make sure that the vbos have the right format */
}
{
}
{
}
/* ATI instancing hack: Although ATI cards do not support Shader Model
* 3.0, they support instancing. To query if the card supports instancing
* CheckDeviceFormat() with the special format MAKEFOURCC('I','N','S','T')
* is used. Should an application check for this, provide a proper return
* value. We can do instancing with all shader versions, but we need
* vertex shaders.
*
* Additionally applications have to set the D3DRS_POINTSIZE render state
* to MAKEFOURCC('I','N','S','T') once to enable instancing. Wined3d
* doesn't need that and just ignores it.
*
* With Shader Model 3.0 capable cards Instancing 'just works' in Windows. */
/* FIXME: This should just check the shader backend caps. */
{
}
/* Depth bound test. To query if the card supports it CheckDeviceFormat()
* with the special format MAKEFOURCC('N','V','D','B') is used. It is
* enabled by setting D3DRS_ADAPTIVETESS_X render state to
* MAKEFOURCC('N','V','D','B') and then controlled by setting
* D3DRS_ADAPTIVETESS_Z (zMin) and D3DRS_ADAPTIVETESS_W (zMax) to test
* value. */
{
}
/* RESZ aka AMD DX9-level hack for multisampled depth buffer resolve. You query for RESZ
* support by checking for availability of MAKEFOURCC('R','E','S','Z') surfaces with
* RENDERTARGET usage. */
{
}
{
continue;
}
}
{
unsigned int i;
for (i = 0; i < (sizeof(format_vertex_info) / sizeof(*format_vertex_info)); ++i)
{
struct wined3d_format *format;
if (fmt_idx == -1)
{
ERR("Format %s (%#x) not found.\n",
return FALSE;
}
}
return TRUE;
}
{
if (!init_format_block_info(gl_info))
{
return FALSE;
}
return TRUE;
}
/* Context activation is done by the caller. */
{
return TRUE;
fail:
return FALSE;
}
enum wined3d_format_id format_id)
{
if (idx == -1)
{
FIXME("Can't find format %s (%#x) in the format lookup table\n",
/* Get the caller a valid pointer */
}
}
UINT wined3d_format_calculate_size(const struct wined3d_format *format, UINT alignment, UINT width, UINT height)
{
{
size = 0;
}
{
size = row_count * (((row_block_count * format->block_byte_count) + alignment - 1) & ~(alignment - 1));
}
else
{
}
{
/* The D3D format requirements make sure that the resulting format is an integer again */
}
return size;
}
/*****************************************************************************
* Trace formatting of useful values
*/
{
switch (format_id)
{
default:
{
char fourcc[5];
fourcc[4] = 0;
else
}
return "unrecognized";
}
}
{
switch (device_type)
{
default:
return "unrecognized";
}
}
{
char buf[333];
buf[0] = '\0';
}
{
char buf[238];
buf[0] = '\0';
}
{
switch (method)
{
#define WINED3DDECLMETHOD_TO_STR(u) case u: return #u
default:
return "unrecognized";
}
}
{
switch (usage)
{
#define WINED3DDECLUSAGE_TO_STR(u) case u: return #u
default:
return "unrecognized";
}
}
{
switch (resource_type)
{
default:
return "unrecognized";
}
}
{
switch (primitive_type)
{
default:
return "unrecognized";
}
}
{
switch (state)
{
#define D3DSTATE_TO_STR(u) case u: return #u
default:
return "unrecognized";
}
}
{
switch (state)
{
#define D3DSTATE_TO_STR(u) case u: return #u
default:
return "unrecognized";
}
}
{
switch (filter_type)
{
#define D3DTEXTUREFILTERTYPE_TO_STR(u) case u: return #u
default:
return "unrecognized";
}
}
{
switch (state)
{
#define D3DSTATE_TO_STR(u) case u: return #u
default:
return "unrecognized";
}
}
{
switch (d3dtop)
{
#define D3DTOP_TO_STR(u) case u: return #u
default:
return "unrecognized";
}
}
{
switch (tstype)
{
default:
{
return ("WINED3D_TS_WORLD_MATRIX > 0");
}
return "unrecognized";
}
}
{
if (STATE_IS_RENDER(state))
if (STATE_IS_TEXTURESTAGE(state))
{
return wine_dbg_sprintf("STATE_TEXTURESTAGE(%#x, %s)",
}
if (STATE_IS_SAMPLER(state))
if (STATE_IS_PIXELSHADER(state))
return "STATE_PIXELSHADER";
if (STATE_IS_TRANSFORM(state))
if (STATE_IS_STREAMSRC(state))
return "STATE_STREAMSRC";
if (STATE_IS_INDEXBUFFER(state))
return "STATE_INDEXBUFFER";
if (STATE_IS_VDECL(state))
return "STATE_VDECL";
if (STATE_IS_VSHADER(state))
return "STATE_VSHADER";
return "STATE_GEOMETRY_SHADER";
if (STATE_IS_VIEWPORT(state))
return "STATE_VIEWPORT";
return "STATE_VERTEXSHADERCONSTANT";
return "STATE_PIXELSHADERCONSTANT";
if (STATE_IS_LIGHT_TYPE(state))
return "STATE_LIGHT_TYPE";
if (STATE_IS_ACTIVELIGHT(state))
if (STATE_IS_SCISSORRECT(state))
return "STATE_SCISSORRECT";
if (STATE_IS_CLIPPLANE(state))
if (STATE_IS_MATERIAL(state))
return "STATE_MATERIAL";
if (STATE_IS_FRONTFACE(state))
return "STATE_FRONTFACE";
return "STATE_POINTSPRITECOORDORIGIN";
return "STATE_BASEVERTEXINDEX";
if (STATE_IS_FRAMEBUFFER(state))
return "STATE_FRAMEBUFFER";
return "STATE_POINT_SIZE_ENABLE";
}
{
switch (pool)
{
#define POOL_TO_STR(p) case p: return #p
default:
return "unrecognized";
}
}
switch(status) {
#define FBOSTATUS_TO_STR(u) case u: return #u
default:
return "unrecognized";
}
}
switch(error) {
#define GLERROR_TO_STR(u) case u: return #u
default:
return "unrecognized";
}
}
{
switch(source)
{
#define WINED3D_TO_STR(x) case x: return #x
default:
return "unrecognized";
}
}
{
switch(fixup)
{
#define WINED3D_TO_STR(x) case x: return #x
default:
return "unrecognized";
}
}
{
if (is_complex_fixup(fixup))
{
return;
}
TRACE("\tX: %s%s\n", debug_fixup_channel_source(fixup.x_source), fixup.x_sign_fixup ? ", SIGN_FIXUP" : "");
TRACE("\tY: %s%s\n", debug_fixup_channel_source(fixup.y_source), fixup.y_sign_fixup ? ", SIGN_FIXUP" : "");
TRACE("\tZ: %s%s\n", debug_fixup_channel_source(fixup.z_source), fixup.z_sign_fixup ? ", SIGN_FIXUP" : "");
TRACE("\tW: %s%s\n", debug_fixup_channel_source(fixup.w_source), fixup.w_sign_fixup ? ", SIGN_FIXUP" : "");
}
char buf[128];
buf[0] = 0;
}
{
if (op == WINED3D_TOP_DISABLE)
return FALSE;
return FALSE;
&& op != WINED3D_TOP_SELECT_ARG2)
return TRUE;
&& op != WINED3D_TOP_SELECT_ARG1)
return TRUE;
return TRUE;
return FALSE;
}
/* Setup this textures matrix according to the texture flags. */
/* Context activation is done by the caller (state handler). */
{
float mat[16];
checkGLcall("glMatrixMode(GL_TEXTURE)");
{
checkGLcall("glLoadIdentity()");
return;
}
{
ERR("Invalid texture transform flags: WINED3D_TTFF_COUNT1 | WINED3D_TTFF_PROJECTED.\n");
return;
}
if (flags & WINED3D_TTFF_PROJECTED)
{
if (!ffp_proj_control)
{
switch (flags & ~WINED3D_TTFF_PROJECTED)
{
case WINED3D_TTFF_COUNT2:
break;
case WINED3D_TTFF_COUNT3:
break;
}
}
} else { /* under directx the R/Z coord can be used for translation, under opengl we use the Q coord instead */
if(!calculatedCoords) {
switch(vtx_fmt)
{
case WINED3DFMT_R32_FLOAT:
/* Direct3D passes the default 1.0 in the 2nd coord, while gl passes it in the 4th.
* swap 2nd and 4th coord. No need to store the value of mat[12] in mat[4] because
* the input value to the transformation will be 0, so the matrix value is irrelevant
*/
break;
case WINED3DFMT_R32G32_FLOAT:
/* See above, just 3rd and 4th coord
*/
break;
case WINED3DFMT_R32G32B32_FLOAT: /* Opengl defaults match dx defaults */
case WINED3DFMT_R32G32B32A32_FLOAT: /* No defaults apply, all app defined */
/* This is to prevent swapping the matrix lines and put the default 4th coord = 1.0
* into a bad place. The division elimination below will apply to make sure the
* 1.0 doesn't do anything bad. The caller will set this value if the stride is 0
*/
case WINED3DFMT_UNKNOWN: /* No texture coords, 0/0/0/1 defaults are passed */
break;
default:
FIXME("Unexpected fixed function texture coord input\n");
}
}
if (!ffp_proj_control)
{
switch (flags & ~WINED3D_TTFF_PROJECTED)
{
/* case WINED3D_TTFF_COUNT1: Won't ever get here. */
case WINED3D_TTFF_COUNT2:
/* OpenGL divides the first 3 vertex coord by the 4th by default,
* which is essentially the same as D3DTTFF_PROJECTED. Make sure that
* the 4th coord evaluates to 1.0 to eliminate that.
*
* If the fixed function pipeline is used, the 4th value remains unused,
* so there is no danger in doing this. With vertex shaders we have a
* problem. Should an app hit that problem, the code here would have to
* check for pixel shaders, and the shader has to undo the default gl divide.
*
* A more serious problem occurs if the app passes 4 coordinates in, and the
* 4th is != 1.0(opengl default). This would have to be fixed in drawStridedSlow
* or a replacement shader. */
default:
}
}
}
checkGLcall("glLoadMatrixf(mat)");
}
/* This small helper function is used to convert a bitmask into the number of masked bits */
unsigned int count_bits(unsigned int mask)
{
unsigned int count;
{
}
return count;
}
/* Helper function for retrieving color info for ChoosePixelFormat and wglChoosePixelFormatARB.
* The later function requires individual color components. */
{
{
case WINED3DFMT_B8G8R8_UNORM:
case WINED3DFMT_B5G6R5_UNORM:
case WINED3DFMT_B2G3R3_UNORM:
case WINED3DFMT_P8_UINT:
break;
default:
return FALSE;
}
TRACE("Returning red: %d, green: %d, blue: %d, alpha: %d, total: %d for format %s.\n",
return TRUE;
}
/* Helper function for retrieving depth/stencil info for ChoosePixelFormat and wglChoosePixelFormatARB */
{
{
case WINED3DFMT_D16_LOCKABLE:
case WINED3DFMT_D16_UNORM:
case WINED3DFMT_X8D24_UNORM:
case WINED3DFMT_D32_UNORM:
case WINED3DFMT_D32_FLOAT:
case WINED3DFMT_INTZ:
break;
default:
return FALSE;
}
TRACE("Returning depthSize: %d and stencilSize: %d for format %s.\n",
return TRUE;
}
/* Note: It's the caller's responsibility to ensure values can be expressed
* in the requested format. UNORM formats for example can only express values
* in the range 0.0f -> 1.0f. */
DWORD wined3d_format_convert_from_float(const struct wined3d_surface *surface, const struct wined3d_color *color)
{
static const struct
{
enum wined3d_format_id format_id;
float r_mul;
float g_mul;
float b_mul;
float a_mul;
}
conv[] =
{
};
unsigned int i;
TRACE("Converting color {%.8e %.8e %.8e %.8e} to format %s.\n",
{
return ret;
}
{
PALETTEENTRY *e;
BYTE r, g, b, a;
{
WARN("Surface doesn't have a palette, returning 0.\n");
return 0;
}
return a;
WARN("Alpha didn't match index, searching full palette.\n");
for (i = 0; i < 256; ++i)
{
return i;
}
FIXME("Unable to convert color to palette index.\n");
return 0;
}
return 0;
}
/* DirectDraw stuff */
{
switch (depth)
{
case 8: return WINED3DFMT_P8_UINT;
case 15: return WINED3DFMT_B5G5R5X1_UNORM;
case 16: return WINED3DFMT_B5G6R5_UNORM;
case 32: return WINED3DFMT_B8G8R8X8_UNORM; /* EVE online and the Fur demo need 32bit AdapterDisplayMode to return WINED3DFMT_B8G8R8X8_UNORM */
default: return WINED3DFMT_UNKNOWN;
}
}
const struct wined3d_matrix *src2)
{
struct wined3d_matrix temp;
/* Now do the multiplication 'by hand'.
I know that all this could be optimised, but this will be done later :-) */
temp.u.s._11 = (src1->u.s._11 * src2->u.s._11) + (src1->u.s._21 * src2->u.s._12) + (src1->u.s._31 * src2->u.s._13) + (src1->u.s._41 * src2->u.s._14);
temp.u.s._21 = (src1->u.s._11 * src2->u.s._21) + (src1->u.s._21 * src2->u.s._22) + (src1->u.s._31 * src2->u.s._23) + (src1->u.s._41 * src2->u.s._24);
temp.u.s._31 = (src1->u.s._11 * src2->u.s._31) + (src1->u.s._21 * src2->u.s._32) + (src1->u.s._31 * src2->u.s._33) + (src1->u.s._41 * src2->u.s._34);
temp.u.s._41 = (src1->u.s._11 * src2->u.s._41) + (src1->u.s._21 * src2->u.s._42) + (src1->u.s._31 * src2->u.s._43) + (src1->u.s._41 * src2->u.s._44);
temp.u.s._12 = (src1->u.s._12 * src2->u.s._11) + (src1->u.s._22 * src2->u.s._12) + (src1->u.s._32 * src2->u.s._13) + (src1->u.s._42 * src2->u.s._14);
temp.u.s._22 = (src1->u.s._12 * src2->u.s._21) + (src1->u.s._22 * src2->u.s._22) + (src1->u.s._32 * src2->u.s._23) + (src1->u.s._42 * src2->u.s._24);
temp.u.s._32 = (src1->u.s._12 * src2->u.s._31) + (src1->u.s._22 * src2->u.s._32) + (src1->u.s._32 * src2->u.s._33) + (src1->u.s._42 * src2->u.s._34);
temp.u.s._42 = (src1->u.s._12 * src2->u.s._41) + (src1->u.s._22 * src2->u.s._42) + (src1->u.s._32 * src2->u.s._43) + (src1->u.s._42 * src2->u.s._44);
temp.u.s._13 = (src1->u.s._13 * src2->u.s._11) + (src1->u.s._23 * src2->u.s._12) + (src1->u.s._33 * src2->u.s._13) + (src1->u.s._43 * src2->u.s._14);
temp.u.s._23 = (src1->u.s._13 * src2->u.s._21) + (src1->u.s._23 * src2->u.s._22) + (src1->u.s._33 * src2->u.s._23) + (src1->u.s._43 * src2->u.s._24);
temp.u.s._33 = (src1->u.s._13 * src2->u.s._31) + (src1->u.s._23 * src2->u.s._32) + (src1->u.s._33 * src2->u.s._33) + (src1->u.s._43 * src2->u.s._34);
temp.u.s._43 = (src1->u.s._13 * src2->u.s._41) + (src1->u.s._23 * src2->u.s._42) + (src1->u.s._33 * src2->u.s._43) + (src1->u.s._43 * src2->u.s._44);
temp.u.s._14 = (src1->u.s._14 * src2->u.s._11) + (src1->u.s._24 * src2->u.s._12) + (src1->u.s._34 * src2->u.s._13) + (src1->u.s._44 * src2->u.s._14);
temp.u.s._24 = (src1->u.s._14 * src2->u.s._21) + (src1->u.s._24 * src2->u.s._22) + (src1->u.s._34 * src2->u.s._23) + (src1->u.s._44 * src2->u.s._24);
temp.u.s._34 = (src1->u.s._14 * src2->u.s._31) + (src1->u.s._24 * src2->u.s._32) + (src1->u.s._34 * src2->u.s._33) + (src1->u.s._44 * src2->u.s._34);
temp.u.s._44 = (src1->u.s._14 * src2->u.s._41) + (src1->u.s._24 * src2->u.s._42) + (src1->u.s._34 * src2->u.s._43) + (src1->u.s._44 * src2->u.s._44);
/* And copy the new matrix in the good storage.. */
}
int i;
switch (d3dvtVertexType & WINED3DFVF_POSITION_MASK) {
default: ERR("Unexpected position mask\n");
}
for (i = 0; i < numTextures; i++) {
}
return size;
}
{
#define ARG1 0x01
#define ARG2 0x02
#define ARG0 0x04
{
/* undefined */ 0,
/* D3DTOP_DISABLE */ 0,
/* D3DTOP_SELECTARG1 */ ARG1,
/* D3DTOP_SELECTARG2 */ ARG2,
};
unsigned int i;
#ifdef VBOX_WITH_WINE_FIX_INITCLEAR
#endif
{
const struct wined3d_texture *texture;
{
i++;
break;
}
{
if (ignore_textype)
{
}
else
{
{
case GL_TEXTURE_1D:
break;
case GL_TEXTURE_2D:
break;
case GL_TEXTURE_3D:
break;
case GL_TEXTURE_CUBE_MAP_ARB:
break;
case GL_TEXTURE_RECTANGLE_ARB:
break;
}
}
} else {
}
{
carg0 = ARG_UNUSED;
carg2 = ARG_UNUSED;
}
if (cop == WINED3D_TOP_DOTPRODUCT3)
{
/* A dotproduct3 on the colorop overwrites the alphaop operation and replicates
* the color result to the alpha component of the destination
*/
}
else
{
}
{
{
{
if (aop == WINED3D_TOP_DISABLE)
{
}
{
{
}
else aarg1 = WINED3DTA_TEXTURE;
}
{
{
}
else aarg2 = WINED3DTA_TEXTURE;
}
}
}
}
{
aarg0 = ARG_UNUSED;
aarg2 = ARG_UNUSED;
}
{
else if (ttff & WINED3D_TTFF_PROJECTED)
else
}
else
{
}
else
}
/* Clear unsupported stages */
for(; i < MAX_TEXTURES; i++) {
}
{
}
{
{
}
else
{
{
case WINED3D_FOG_NONE:
case WINED3D_FOG_LINEAR:
break;
case WINED3D_FOG_EXP:
break;
case WINED3D_FOG_EXP2:
break;
}
}
}
else
{
{
case WINED3D_FOG_LINEAR:
break;
case WINED3D_FOG_EXP:
break;
case WINED3D_FOG_EXP2:
break;
}
}
{
} else {
settings->sRGB_write = 0;
}
{
/* No need to emulate clipplanes if GL supports native vertex shader clipping or if
* the fixed function vertex pipeline is used(which always supports clipplanes), or
* if no clipplane is enabled
*/
settings->emul_clipplanes = 0;
} else {
}
}
const struct ffp_frag_settings *settings)
{
}
{
/* Note that the key is the implementation independent part of the ffp_frag_desc structure,
* whereas desc points to an extended structure with implementation specific parts. */
{
ERR("Failed to insert ffp frag shader.\n");
}
}
/* Activates the texture dimension according to the bound D3D texture. Does
* not care for the colorop or correct gl texture unit (when using nvrc).
* Requires the caller to activate the correct unit. */
/* Context activation is done by the caller (state handler). */
void texture_activate_dimensions(const struct wined3d_texture *texture, const struct wined3d_gl_info *gl_info)
{
if (texture)
{
{
case GL_TEXTURE_2D:
checkGLcall("glDisable(GL_TEXTURE_3D)");
{
checkGLcall("glDisable(GL_TEXTURE_CUBE_MAP_ARB)");
}
{
checkGLcall("glDisable(GL_TEXTURE_RECTANGLE_ARB)");
}
checkGLcall("glEnable(GL_TEXTURE_2D)");
break;
case GL_TEXTURE_RECTANGLE_ARB:
checkGLcall("glDisable(GL_TEXTURE_2D)");
checkGLcall("glDisable(GL_TEXTURE_3D)");
{
checkGLcall("glDisable(GL_TEXTURE_CUBE_MAP_ARB)");
}
checkGLcall("glEnable(GL_TEXTURE_RECTANGLE_ARB)");
break;
case GL_TEXTURE_3D:
{
checkGLcall("glDisable(GL_TEXTURE_CUBE_MAP_ARB)");
}
{
checkGLcall("glDisable(GL_TEXTURE_RECTANGLE_ARB)");
}
checkGLcall("glDisable(GL_TEXTURE_2D)");
checkGLcall("glEnable(GL_TEXTURE_3D)");
break;
case GL_TEXTURE_CUBE_MAP_ARB:
checkGLcall("glDisable(GL_TEXTURE_2D)");
checkGLcall("glDisable(GL_TEXTURE_3D)");
{
checkGLcall("glDisable(GL_TEXTURE_RECTANGLE_ARB)");
}
checkGLcall("glEnable(GL_TEXTURE_CUBE_MAP_ARB)");
break;
}
}
else
{
checkGLcall("glEnable(GL_TEXTURE_2D)");
checkGLcall("glDisable(GL_TEXTURE_3D)");
{
checkGLcall("glDisable(GL_TEXTURE_CUBE_MAP_ARB)");
}
{
checkGLcall("glDisable(GL_TEXTURE_RECTANGLE_ARB)");
}
/* Binding textures is done by samplers. A dummy texture will be bound */
}
}
/* Context activation is done by the caller (state handler). */
void sampler_texdim(struct wined3d_context *context, const struct wined3d_state *state, DWORD state_id)
{
/* No need to enable / disable anything here for unused samplers. The
* tex_colorop handler takes care. Also no action is needed with pixel
* shaders, or if tex_colorop will take care of this business. */
return;
return;
return;
}
{
}
{
}
void wined3d_rb_free(void *ptr)
{
}
{
const struct ffp_frag_settings *kb = &WINE_RB_ENTRY_VALUE(entry, const struct ffp_frag_desc, entry)->settings;
}
const struct wine_rb_functions wined3d_ffp_frag_program_rb_functions =
{
};
void wined3d_ffp_get_vs_settings(const struct wined3d_state *state, const struct wined3d_stream_info *si,
struct wined3d_ffp_vs_settings *settings)
{
unsigned int coord_idx, i;
if (si->position_transformed)
{
else
for (i = 0; i < MAX_TEXTURES; ++i)
{
settings->texgen[i] = (state->texture_states[i][WINED3D_TSS_TEXCOORD_INDEX] >> WINED3D_FFP_TCI_SHIFT)
}
return;
}
settings->transformed = 0;
{
}
else
{
}
for (i = 0; i < MAX_TEXTURES; ++i)
{
settings->texgen[i] = (state->texture_states[i][WINED3D_TSS_TEXCOORD_INDEX] >> WINED3D_FFP_TCI_SHIFT)
}
settings->light_type = 0;
for (i = 0; i < MAX_ACTIVE_LIGHTS; ++i)
{
}
else
}
static int wined3d_ffp_vertex_program_key_compare(const void *key, const struct wine_rb_entry *entry)
{
}
const struct wine_rb_functions wined3d_ffp_vertex_program_rb_functions =
{
};
{
static const UINT l[] =
{
~0U, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
};
UINT32 i;
return (i = x >> 16) ? (x = i >> 8) ? l[x] + 24 : l[i] + 16 : (i = x >> 8) ? l[i] + 8 : l[x];
}
const struct blit_shader *wined3d_select_blitter(const struct wined3d_gl_info *gl_info, enum wined3d_blit_op blit_op,
const RECT *src_rect, DWORD src_usage, enum wined3d_pool src_pool, const struct wined3d_format *src_format,
const RECT *dst_rect, DWORD dst_usage, enum wined3d_pool dst_pool, const struct wined3d_format *dst_format)
{
static const struct blit_shader * const blitters[] =
{
&ffp_blit,
&cpu_blit,
};
unsigned int i;
{
return blitters[i];
}
return NULL;
}
{
}