utils.c revision 589fd26cedb2b4ebbed14f2964cad03cc8ebbca2
/*
* Utility functions for the WineD3D Library
*
* Copyright 2002-2004 Jason Edmeades
* Copyright 2003-2004 Raphael Junqueira
* Copyright 2004 Christian Costa
* Copyright 2005 Oliver Stieber
* Copyright 2006-2008 Henri Verbeet
* Copyright 2007-2008 Stefan Dösinger for CodeWeavers
*
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
*/
/*
* Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
* other than GPL or LGPL is available it will apply instead, Sun elects to use only
* the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
* a choice of LGPL license versions is made available with the language indicating
* that LGPLv2 or any later version may be used, or where a choice of which version
* of the LGPL is applied is otherwise unspecified.
*/
#include "config.h"
#include "wined3d_private.h"
struct StaticPixelFormatDesc
{
short depthSize, stencilSize;
};
/*****************************************************************************
* Pixel format array
*
* For the formats WINED3DFMT_A32B32G32R32F, WINED3DFMT_A16B16G16R16F,
* and WINED3DFMT_A16B16G16R16 do not have correct alpha masks, because the
* high masks do not fit into the 32 bit values needed for ddraw. It is only
* used for ddraw mostly, and to figure out if the format has alpha at all, so
* setting a mask like 0x1 for those surfaces is correct. The 64 and 128 bit
* formats are not usable in 2D rendering because ddraw doesn't support them.
*/
static const struct StaticPixelFormatDesc formats[] =
{
/* WINED3DFORMAT alphamask redmask greenmask bluemask bpp depth stencil isFourcc */
/* FourCC formats, kept here to have WINED3DFMT_R8G8B8(=20) at position 20 */
/* IEEE formats */
/* Hmm? */
/* Float */
/* Palettized formats */
/* Standard ARGB formats. */
/* Luminance */
/* Bump mapping stuff */
/* Depth stencil formats */
/* Is this a vertex buffer? */
/* Vendor-specific formats */
};
struct wined3d_format_vertex_info
{
enum wined3d_ffp_emit_idx emit_idx;
unsigned int component_size;
};
static const struct wined3d_format_vertex_info format_vertex_info[] =
{
{WINED3DFMT_R8G8B8A8_UINT, WINED3D_FFP_EMIT_UBYTE4, 4, GL_UNSIGNED_BYTE, 4, GL_FALSE, sizeof(BYTE)},
{WINED3DFMT_R16G16B16A16_SINT, WINED3D_FFP_EMIT_SHORT4, 4, GL_SHORT, 4, GL_FALSE, sizeof(short int)},
{WINED3DFMT_R8G8B8A8_UNORM, WINED3D_FFP_EMIT_UBYTE4N, 4, GL_UNSIGNED_BYTE, 4, GL_TRUE, sizeof(BYTE)},
{WINED3DFMT_R16G16B16A16_SNORM, WINED3D_FFP_EMIT_SHORT4N, 4, GL_SHORT, 4, GL_TRUE, sizeof(short int)},
{WINED3DFMT_R16G16_UNORM, WINED3D_FFP_EMIT_USHORT2N, 2, GL_UNSIGNED_SHORT, 2, GL_TRUE, sizeof(short int)},
{WINED3DFMT_R16G16B16A16_UNORM, WINED3D_FFP_EMIT_USHORT4N, 4, GL_UNSIGNED_SHORT, 4, GL_TRUE, sizeof(short int)},
{WINED3DFMT_R10G10B10A2_UINT, WINED3D_FFP_EMIT_UDEC3, 3, GL_UNSIGNED_SHORT, 3, GL_FALSE, sizeof(short int)},
{WINED3DFMT_R16G16B16A16_FLOAT, WINED3D_FFP_EMIT_FLOAT16_4, 4, GL_FLOAT, 4, GL_FALSE, sizeof(GLhalfNV)}
};
typedef struct {
unsigned int Flags;
/*****************************************************************************
* OpenGL format template. Contains unexciting formats which do not need
* extension checks. The order in this table is independent of the order in
* the table StaticPixelFormatDesc above. Not all formats have to be in this
* table.
*/
static const GlPixelFormatDescTemplate gl_formats_template[] = {
/* WINED3DFORMAT internal srgbInternal rtInternal
format type
flags */
{WINED3DFMT_UNKNOWN, 0, 0, 0,
0, 0,
0},
/* FourCC formats */
/* GL_APPLE_ycbcr_422 claims that its '2YUV' format, which is supported via the UNSIGNED_SHORT_8_8_REV_APPLE type
* is equivalent to 'UYVY' format on Windows, and the 'YUVS' via UNSIGNED_SHORT_8_8_APPLE equates to 'YUY2'. The
* d3d9 test however shows that the opposite is true. Since the extension is from 2002, it predates the x86 based
* Macs, so probably the endianess differs. This could be tested as soon as we have a Windows and MacOS on a big
* endian machine
*/
{WINED3DFMT_MULTI2_ARGB8, 0, 0, 0,
0, 0,
0},
{WINED3DFMT_G8R8_G8B8, 0, 0, 0,
0, 0,
0},
{WINED3DFMT_R8G8_B8G8, 0, 0, 0,
0, 0,
0},
/* IEEE formats */
/* Hmm? */
{WINED3DFMT_CxV8U8, 0, 0, 0,
0, 0,
0},
/* Float */
WINED3DFMT_FLAG_POSTPIXELSHADER_BLENDING | WINED3DFMT_FLAG_FILTERING | WINED3DFMT_FLAG_RENDERTARGET},
/* Palettized formats */
{WINED3DFMT_A8P8, 0, 0, 0,
0, 0,
0},
0},
/* Standard ARGB formats */
WINED3DFMT_FLAG_POSTPIXELSHADER_BLENDING | WINED3DFMT_FLAG_FILTERING | WINED3DFMT_FLAG_RENDERTARGET},
WINED3DFMT_FLAG_POSTPIXELSHADER_BLENDING | WINED3DFMT_FLAG_FILTERING | WINED3DFMT_FLAG_RENDERTARGET},
WINED3DFMT_FLAG_POSTPIXELSHADER_BLENDING | WINED3DFMT_FLAG_FILTERING | WINED3DFMT_FLAG_RENDERTARGET},
WINED3DFMT_FLAG_POSTPIXELSHADER_BLENDING | WINED3DFMT_FLAG_FILTERING | WINED3DFMT_FLAG_RENDERTARGET},
{WINED3DFMT_A8R3G3B2, 0, 0, 0,
0, 0,
0},
WINED3DFMT_FLAG_POSTPIXELSHADER_BLENDING | WINED3DFMT_FLAG_FILTERING | WINED3DFMT_FLAG_RENDERTARGET},
/* Luminance */
0},
/* Bump mapping stuff */
{WINED3DFMT_W11V11U10, 0, 0, 0,
0, 0,
0},
{WINED3DFMT_A2W10V10U10, 0, 0, 0,
0, 0,
0},
/* Depth stencil formats */
WINED3DFMT_FLAG_POSTPIXELSHADER_BLENDING | WINED3DFMT_FLAG_FILTERING | WINED3DFMT_FLAG_DEPTH | WINED3DFMT_FLAG_STENCIL},
/* Is this a vertex buffer? */
{WINED3DFMT_VERTEXDATA, 0, 0, 0,
0, 0,
0},
{WINED3DFMT_R16_UINT, 0, 0, 0,
0, 0,
0},
{WINED3DFMT_R32_UINT, 0, 0, 0,
0, 0,
0},
0},
/* Vendor-specific formats */
{WINED3DFMT_ATI2N, 0, 0, 0,
0},
{WINED3DFMT_NVHU, 0, 0, 0,
0},
{WINED3DFMT_NVHS, 0, 0, 0,
0}
};
/* First check if the format is at the position of its value.
* This will catch the argb formats before the loop is entered
*/
return fmt;
} else {
unsigned int i;
return i;
}
}
}
return -1;
}
{
UINT i;
gl_info->gl_formats = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, format_count * sizeof(*gl_info->gl_formats));
if (!gl_info->gl_formats)
{
ERR("Failed to allocate memory.\n");
return FALSE;
}
for (i = 0; i < format_count; ++i)
{
}
return TRUE;
}
#define GLINFO_LOCATION (*gl_info)
{
while(glGetError());
GL_EXTCALL(glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, GL_TEXTURE_2D, tex, 0));
checkGLcall("Framebuffer format check");
return status == GL_FRAMEBUFFER_COMPLETE_EXT;
}
{
unsigned int i;
for (i = 0; i < sizeof(gl_formats_template) / sizeof(gl_formats_template[0]); ++i)
{
struct GlPixelFormatDesc *desc;
if (fmt_idx == -1)
{
ERR("Format %s (%#x) not found.\n",
return FALSE;
}
{
/* Check if the default internal format is supported as a frame buffer target, otherwise
* fall back to the render target internal.
*
* Try to stick to the standard format if possible, this limits precision differences */
{
TRACE("Internal format of %s not supported as FBO target, using render target internal instead\n",
}
else
{
}
}
else
{
}
}
return TRUE;
}
{
int idx;
/* When ARB_texture_rg is supported we only require 16-bit for R16F instead of 64-bit RGBA16F */
if (GL_SUPPORT(ARB_TEXTURE_RG))
{
}
/* When ARB_texture_rg is supported we only require 32-bit for R32F instead of 128-bit RGBA32F */
if (GL_SUPPORT(ARB_TEXTURE_RG))
{
}
/* V8U8 is supported natively by GL_ATI_envmap_bumpmap and GL_NV_texture_shader.
* V16U16 is only supported by GL_NV_texture_shader. The formats need fixup if
* their extensions are not available. GL_ATI_envmap_bumpmap is not used because
* the only driver that implements it(fglrx) has a buggy implementation.
*
* V8U8 and V16U16 need a fixup of the undefined blue channel. OpenGL
* returns 0.0 when sampling from it, DirectX 1.0. So we always have in-shader
* conversion for this format.
*/
if (!GL_SUPPORT(NV_TEXTURE_SHADER))
{
}
else
{
}
if (!GL_SUPPORT(NV_TEXTURE_SHADER))
{
/* If GL_NV_texture_shader is not supported, those formats are converted, incompatibly
* with each other
*/
}
else
{
/* If GL_NV_texture_shader is supported, WINED3DFMT_L6V5U5 and WINED3DFMT_X8L8V8U8
* are converted at surface loading time, but they do not need any modification in
* the shader, thus they are compatible with all WINED3DFMT_UNKNOWN group formats.
* WINED3DFMT_Q8W8V8U8 doesn't even need load-time conversion
*/
}
{
}
else if (GL_SUPPORT(ATI_TEXTURE_COMPRESSION_3DC))
{
}
if (!GL_SUPPORT(APPLE_YCBCR_422))
{
}
{
}
{
/* Do not change the size of the type, it is CPU side. We have to change the GPU-side information though.
* It is the job of the vertex buffer code to make sure that the vbos have the right format */
}
}
{
unsigned int i;
for (i = 0; i < (sizeof(format_vertex_info) / sizeof(*format_vertex_info)); ++i)
{
struct GlPixelFormatDesc *format_desc;
if (fmt_idx == -1)
{
ERR("Format %s (%#x) not found.\n",
return FALSE;
}
}
return TRUE;
}
{
return init_format_base_info(gl_info);
}
{
if (!init_format_texture_info(gl_info))
{
return FALSE;
}
if (!init_format_vertex_info(gl_info))
{
return FALSE;
}
return TRUE;
}
const struct GlPixelFormatDesc *getFormatDescEntry(WINED3DFORMAT fmt, const WineD3D_GL_Info *gl_info)
{
if(idx == -1) {
/* Get the caller a valid pointer */
}
}
/*****************************************************************************
* Trace formatting of useful values
*/
switch (fmt) {
default:
{
char fourcc[5];
fourcc[4] = 0;
else
}
return "unrecognized";
}
}
switch (devtype) {
default:
return "unrecognized";
}
}
switch (usage & WINED3DUSAGE_MASK) {
#define WINED3DUSAGE_TO_STR(u) case u: return #u
case 0: return "none";
default:
return "unrecognized";
}
}
switch (usagequery & WINED3DUSAGE_QUERY_MASK) {
#define WINED3DUSAGEQUERY_TO_STR(u) case u: return #u
case 0: return "none";
default:
return "unrecognized";
}
}
switch (method) {
#define WINED3DDECLMETHOD_TO_STR(u) case u: return #u
default:
return "unrecognized";
}
}
switch (usage) {
#define WINED3DDECLUSAGE_TO_STR(u) case u: return #u
default:
return "unrecognized";
}
}
switch (res) {
default:
return "unrecognized";
}
}
switch (PrimitiveType) {
default:
return "unrecognized";
}
}
switch (state) {
#define D3DSTATE_TO_STR(u) case u: return #u
default:
return "unrecognized";
}
}
switch (state) {
#define D3DSTATE_TO_STR(u) case u: return #u
default:
return "unrecognized";
}
}
switch (filter_type) {
#define D3DTEXTUREFILTERTYPE_TO_STR(u) case u: return #u
default:
return "unrecognized";
}
}
switch (state) {
#define D3DSTATE_TO_STR(u) case u: return #u
default:
return "unrecognized";
}
}
switch (d3dtop) {
#define D3DTOP_TO_STR(u) case u: return #u
default:
return "unrecognized";
}
}
switch (tstype) {
default:
return ("WINED3DTS_WORLDMATRIX > 0");
}
return "unrecognized";
}
}
switch (Pool) {
#define POOL_TO_STR(p) case p: return #p
default:
return "unrecognized";
}
}
switch(status) {
#define FBOSTATUS_TO_STR(u) case u: return #u
default:
return "unrecognized";
}
}
switch(error) {
#define GLERROR_TO_STR(u) case u: return #u
default:
return "unrecognized";
}
}
switch(basis) {
case WINED3DBASIS_BEZIER: return "WINED3DBASIS_BEZIER";
case WINED3DBASIS_BSPLINE: return "WINED3DBASIS_BSPLINE";
case WINED3DBASIS_INTERPOLATE: return "WINED3DBASIS_INTERPOLATE";
default: return "unrecognized";
}
}
switch(degree) {
case WINED3DDEGREE_LINEAR: return "WINED3DDEGREE_LINEAR";
case WINED3DDEGREE_QUADRATIC: return "WINED3DDEGREE_QUADRATIC";
case WINED3DDEGREE_CUBIC: return "WINED3DDEGREE_CUBIC";
case WINED3DDEGREE_QUINTIC: return "WINED3DDEGREE_QUINTIC";
default: return "unrecognized";
}
}
{
switch(source)
{
#define WINED3D_TO_STR(x) case x: return #x
default:
return "unrecognized";
}
}
{
switch(yuv_fixup)
{
#define WINED3D_TO_STR(x) case x: return #x
default:
return "unrecognized";
}
}
{
if (is_yuv_fixup(fixup))
{
return;
}
TRACE("\tX: %s%s\n", debug_fixup_channel_source(fixup.x_source), fixup.x_sign_fixup ? ", SIGN_FIXUP" : "");
TRACE("\tY: %s%s\n", debug_fixup_channel_source(fixup.y_source), fixup.y_sign_fixup ? ", SIGN_FIXUP" : "");
TRACE("\tZ: %s%s\n", debug_fixup_channel_source(fixup.z_source), fixup.z_sign_fixup ? ", SIGN_FIXUP" : "");
TRACE("\tW: %s%s\n", debug_fixup_channel_source(fixup.w_source), fixup.w_sign_fixup ? ", SIGN_FIXUP" : "");
}
char buf[128];
buf[0] = 0;
}
/*****************************************************************************
* Useful functions mapping GL <-> D3D values
*/
switch(op) {
case WINED3DSTENCILOP_KEEP : return GL_KEEP;
case WINED3DSTENCILOP_ZERO : return GL_ZERO;
case WINED3DSTENCILOP_REPLACE : return GL_REPLACE;
case WINED3DSTENCILOP_INCRSAT : return GL_INCR;
case WINED3DSTENCILOP_DECRSAT : return GL_DECR;
case WINED3DSTENCILOP_INVERT : return GL_INVERT;
case WINED3DSTENCILOP_INCR : return GL_INCR_WRAP_EXT;
case WINED3DSTENCILOP_DECR : return GL_DECR_WRAP_EXT;
default:
return GL_KEEP;
}
}
switch ((WINED3DCMPFUNC)func) {
case WINED3DCMP_NEVER : return GL_NEVER;
case WINED3DCMP_LESS : return GL_LESS;
case WINED3DCMP_EQUAL : return GL_EQUAL;
case WINED3DCMP_LESSEQUAL : return GL_LEQUAL;
case WINED3DCMP_GREATER : return GL_GREATER;
case WINED3DCMP_NOTEQUAL : return GL_NOTEQUAL;
case WINED3DCMP_GREATEREQUAL : return GL_GEQUAL;
case WINED3DCMP_ALWAYS : return GL_ALWAYS;
default:
return 0;
}
}
BOOL is_invalid_op(IWineD3DDeviceImpl *This, int stage, WINED3DTEXTUREOP op, DWORD arg1, DWORD arg2, DWORD arg3) {
return FALSE;
}
/* Setup this textures matrix according to the texture flags*/
{
float mat[16];
checkGLcall("glMatrixMode(GL_TEXTURE)");
checkGLcall("glLoadIdentity()");
return;
}
ERR("Invalid texture transform flags: WINED3DTTFF_COUNT1|WINED3DTTFF_PROJECTED\n");
return;
}
if (flags & WINED3DTTFF_PROJECTED) {
if(!ffp_proj_control) {
switch (flags & ~WINED3DTTFF_PROJECTED) {
case WINED3DTTFF_COUNT2:
break;
case WINED3DTTFF_COUNT3:
break;
}
}
} else { /* under directx the R/Z coord can be used for translation, under opengl we use the Q coord instead */
if(!calculatedCoords) {
switch(vtx_fmt)
{
case WINED3DFMT_R32_FLOAT:
/* Direct3D passes the default 1.0 in the 2nd coord, while gl passes it in the 4th.
* swap 2nd and 4th coord. No need to store the value of mat[12] in mat[4] because
* the input value to the transformation will be 0, so the matrix value is irrelevant
*/
break;
case WINED3DFMT_R32G32_FLOAT:
/* See above, just 3rd and 4th coord
*/
break;
case WINED3DFMT_R32G32B32_FLOAT: /* Opengl defaults match dx defaults */
case WINED3DFMT_R32G32B32A32_FLOAT: /* No defaults apply, all app defined */
/* This is to prevent swapping the matrix lines and put the default 4th coord = 1.0
* into a bad place. The division elimination below will apply to make sure the
* 1.0 doesn't do anything bad. The caller will set this value if the stride is 0
*/
case WINED3DFMT_UNKNOWN: /* No texture coords, 0/0/0/1 defaults are passed */
break;
default:
FIXME("Unexpected fixed function texture coord input\n");
}
}
if(!ffp_proj_control) {
switch (flags & ~WINED3DTTFF_PROJECTED) {
/* case WINED3DTTFF_COUNT1: Won't ever get here */
/* OpenGL divides the first 3 vertex coord by the 4th by default,
* which is essentially the same as D3DTTFF_PROJECTED. Make sure that
* the 4th coord evaluates to 1.0 to eliminate that.
*
* If the fixed function pipeline is used, the 4th value remains unused,
* so there is no danger in doing this. With vertex shaders we have a
* problem. Should an app hit that problem, the code here would have to
* check for pixel shaders, and the shader has to undo the default gl divide.
*
* A more serious problem occurs if the app passes 4 coordinates in, and the
* 4th is != 1.0(opengl default). This would have to be fixed in drawStridedSlow
* or a replacement shader
*/
}
}
}
checkGLcall("glLoadMatrixf(mat)");
}
/* This small helper function is used to convert a bitmask into the number of masked bits */
unsigned int count_bits(unsigned int mask)
{
unsigned int count;
{
}
return count;
}
/* Helper function for retrieving color info for ChoosePixelFormat and wglChoosePixelFormatARB.
* The later function requires individual color components. */
{
switch(format_desc->format)
{
case WINED3DFMT_X8R8G8B8:
case WINED3DFMT_R8G8B8:
case WINED3DFMT_A8R8G8B8:
case WINED3DFMT_A2R10G10B10:
case WINED3DFMT_X1R5G5B5:
case WINED3DFMT_A1R5G5B5:
case WINED3DFMT_R5G6B5:
case WINED3DFMT_X4R4G4B4:
case WINED3DFMT_A4R4G4B4:
case WINED3DFMT_R3G3B2:
case WINED3DFMT_A8P8:
case WINED3DFMT_P8:
break;
default:
return FALSE;
}
TRACE("Returning red: %d, green: %d, blue: %d, alpha: %d, total: %d for fmt=%s\n",
return TRUE;
}
/* Helper function for retrieving depth/stencil info for ChoosePixelFormat and wglChoosePixelFormatARB */
BOOL getDepthStencilBits(const struct GlPixelFormatDesc *format_desc, short *depthSize, short *stencilSize)
{
switch(format_desc->format)
{
case WINED3DFMT_D16_LOCKABLE:
case WINED3DFMT_D16_UNORM:
case WINED3DFMT_D15S1:
case WINED3DFMT_D24X8:
case WINED3DFMT_D24X4S4:
case WINED3DFMT_D24S8:
case WINED3DFMT_D24FS8:
case WINED3DFMT_D32:
case WINED3DFMT_D32F_LOCKABLE:
break;
default:
return FALSE;
}
TRACE("Returning depthSize: %d and stencilSize: %d for fmt=%s\n",
return TRUE;
}
/* DirectDraw stuff */
switch(depth) {
case 8: return WINED3DFMT_P8;
case 15: return WINED3DFMT_X1R5G5B5;
case 16: return WINED3DFMT_R5G6B5;
case 32: return WINED3DFMT_X8R8G8B8; /* EVE online and the Fur demo need 32bit AdapterDisplayMode to return X8R8G8B8 */
default: return WINED3DFMT_UNKNOWN;
}
}
/* Now do the multiplication 'by hand'.
I know that all this could be optimised, but this will be done later :-) */
temp.u.s._11 = (src1->u.s._11 * src2->u.s._11) + (src1->u.s._21 * src2->u.s._12) + (src1->u.s._31 * src2->u.s._13) + (src1->u.s._41 * src2->u.s._14);
temp.u.s._21 = (src1->u.s._11 * src2->u.s._21) + (src1->u.s._21 * src2->u.s._22) + (src1->u.s._31 * src2->u.s._23) + (src1->u.s._41 * src2->u.s._24);
temp.u.s._31 = (src1->u.s._11 * src2->u.s._31) + (src1->u.s._21 * src2->u.s._32) + (src1->u.s._31 * src2->u.s._33) + (src1->u.s._41 * src2->u.s._34);
temp.u.s._41 = (src1->u.s._11 * src2->u.s._41) + (src1->u.s._21 * src2->u.s._42) + (src1->u.s._31 * src2->u.s._43) + (src1->u.s._41 * src2->u.s._44);
temp.u.s._12 = (src1->u.s._12 * src2->u.s._11) + (src1->u.s._22 * src2->u.s._12) + (src1->u.s._32 * src2->u.s._13) + (src1->u.s._42 * src2->u.s._14);
temp.u.s._22 = (src1->u.s._12 * src2->u.s._21) + (src1->u.s._22 * src2->u.s._22) + (src1->u.s._32 * src2->u.s._23) + (src1->u.s._42 * src2->u.s._24);
temp.u.s._32 = (src1->u.s._12 * src2->u.s._31) + (src1->u.s._22 * src2->u.s._32) + (src1->u.s._32 * src2->u.s._33) + (src1->u.s._42 * src2->u.s._34);
temp.u.s._42 = (src1->u.s._12 * src2->u.s._41) + (src1->u.s._22 * src2->u.s._42) + (src1->u.s._32 * src2->u.s._43) + (src1->u.s._42 * src2->u.s._44);
temp.u.s._13 = (src1->u.s._13 * src2->u.s._11) + (src1->u.s._23 * src2->u.s._12) + (src1->u.s._33 * src2->u.s._13) + (src1->u.s._43 * src2->u.s._14);
temp.u.s._23 = (src1->u.s._13 * src2->u.s._21) + (src1->u.s._23 * src2->u.s._22) + (src1->u.s._33 * src2->u.s._23) + (src1->u.s._43 * src2->u.s._24);
temp.u.s._33 = (src1->u.s._13 * src2->u.s._31) + (src1->u.s._23 * src2->u.s._32) + (src1->u.s._33 * src2->u.s._33) + (src1->u.s._43 * src2->u.s._34);
temp.u.s._43 = (src1->u.s._13 * src2->u.s._41) + (src1->u.s._23 * src2->u.s._42) + (src1->u.s._33 * src2->u.s._43) + (src1->u.s._43 * src2->u.s._44);
temp.u.s._14 = (src1->u.s._14 * src2->u.s._11) + (src1->u.s._24 * src2->u.s._12) + (src1->u.s._34 * src2->u.s._13) + (src1->u.s._44 * src2->u.s._14);
temp.u.s._24 = (src1->u.s._14 * src2->u.s._21) + (src1->u.s._24 * src2->u.s._22) + (src1->u.s._34 * src2->u.s._23) + (src1->u.s._44 * src2->u.s._24);
temp.u.s._34 = (src1->u.s._14 * src2->u.s._31) + (src1->u.s._24 * src2->u.s._32) + (src1->u.s._34 * src2->u.s._33) + (src1->u.s._44 * src2->u.s._34);
temp.u.s._44 = (src1->u.s._14 * src2->u.s._41) + (src1->u.s._24 * src2->u.s._42) + (src1->u.s._34 * src2->u.s._43) + (src1->u.s._44 * src2->u.s._44);
/* And copy the new matrix in the good storage.. */
}
int i;
switch (d3dvtVertexType & WINED3DFVF_POSITION_MASK) {
default: ERR("Unexpected position mask\n");
}
for (i = 0; i < numTextures; i++) {
}
return size;
}
/***********************************************************************
* CalculateTexRect
*
* Calculates the dimensions of the opengl texture used for blits.
* Handled oversized opengl textures and updates the source rectangle
* accordingly
*
* Params:
* This: Surface to operate on
* Rect: Requested rectangle
*
* Returns:
* TRUE if the texture part can be loaded,
* FALSE otherwise
*
*********************************************************************/
/* The sizes might be reversed */
}
}
/* No oversized texture? This is easy */
/* Which rect from the texture do I need? */
} else {
}
return TRUE;
} else {
/* Check if we can succeed at all */
TRACE("Requested rectangle is too large for gl\n");
return FALSE;
}
/* A part of the texture has to be picked. First, check if
* some texture part is loaded already, if yes try to re-use it.
* If the texture is dirty, or the part can't be used,
* re-position the part to load
*/
/* Ok, the rectangle is ok, re-use it */
TRACE("Using existing gl Texture\n");
} else {
/* Rectangle is not ok, dirtify the texture to reload it */
TRACE("Dirtifying texture to force reload\n");
}
}
/* Now if we are dirty(no else if!) */
/* Set the new rectangle. Use the following strategy:
* 1) Use as big textures as possible.
* 2) Place the texture part in the way that the requested
* part is in the middle of the texture(well, almost)
* 3) If the texture is moved over the edges of the
* surface, replace it nicely
* 4) If the coord is not limiting the texture size,
* use the whole size
*/
}
}
} else {
}
}
} else {
}
}
/* Re-calculate the rect to draw */
/* Get the gl coordinates. The gl rectangle is a power of 2, eigher the max size,
* or the pow2Width / pow2Height of the surface.
*
* Can never be GL_TEXTURE_RECTANGLE_ARB because oversized surfaces are always set up
* as regular GL_TEXTURE_2D.
*/
}
return TRUE;
}
/* Hash table functions */
struct hash_table_t *hash_table_create(hash_function_t *hash_function, compare_function_t *compare_function)
{
struct hash_table_t *table;
unsigned int initial_size = 8;
table = HeapAlloc(GetProcessHeap(), 0, sizeof(struct hash_table_t) + (initial_size * sizeof(struct list)));
if (!table)
{
ERR("Failed to allocate table, returning NULL.\n");
return NULL;
}
table->shrink_size = 0;
{
ERR("Failed to allocate table buckets, returning NULL.\n");
return NULL;
}
table->entries = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, table->grow_size * sizeof(struct hash_table_entry_t));
{
ERR("Failed to allocate table entries, returning NULL.\n");
return NULL;
}
table->entry_count = 0;
return table;
}
void hash_table_destroy(struct hash_table_t *table, void (*free_value)(void *value, void *cb), void *cb)
{
unsigned int i = 0;
for (i = 0; i < table->entry_count; ++i)
{
if(free_value) {
}
}
}
void hash_table_for_each_entry(struct hash_table_t *table, void (*callback)(void *value, void *context), void *context)
{
unsigned int i = 0;
for (i = 0; i < table->entry_count; ++i)
{
}
}
static inline struct hash_table_entry_t *hash_table_get_by_idx(const struct hash_table_t *table, const void *key,
unsigned int idx)
{
struct hash_table_entry_t *entry;
return NULL;
}
{
unsigned int new_entry_count = 0;
struct hash_table_entry_t *new_entries;
struct list *new_buckets;
unsigned int i;
new_buckets = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, new_bucket_count * sizeof(struct list));
if (!new_buckets)
{
ERR("Failed to allocate new buckets, returning FALSE.\n");
return FALSE;
}
new_entries = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, grow_size * sizeof(struct hash_table_entry_t));
if (!new_entries)
{
ERR("Failed to allocate new entries, returning FALSE.\n");
return FALSE;
}
for (i = 0; i < table->bucket_count; ++i)
{
{
{
int j;
}
}
}
return TRUE;
}
{
unsigned int idx;
unsigned int hash;
struct hash_table_entry_t *entry;
if (entry)
{
if (!value)
{
/* Remove the entry */
/* Shrink if necessary */
{
ERR("Failed to shrink the table...\n");
}
}
}
return;
}
if (!value) return;
/* Grow if necessary */
{
{
ERR("Failed to grow the table, returning.\n");
return;
}
}
/* Find an entry to insert */
{
} else {
}
/* Insert the entry */
}
{
}
{
unsigned int idx;
struct hash_table_entry_t *entry;
}
void gen_ffp_frag_op(IWineD3DStateBlockImpl *stateblock, struct ffp_frag_settings *settings, BOOL ignore_textype) {
#define ARG1 0x01
#define ARG2 0x02
#define ARG0 0x04
/* undefined */ 0,
/* D3DTOP_DISABLE */ 0,
/* D3DTOP_SELECTARG1 */ ARG1,
/* D3DTOP_SELECTARG2 */ ARG2,
};
unsigned int i;
for(i = 0; i < GL_LIMITS(texture_stages); i++) {
i++;
break;
}
if(texture) {
if(ignore_textype) {
} else {
case GL_TEXTURE_1D:
break;
case GL_TEXTURE_2D:
break;
case GL_TEXTURE_3D:
break;
case GL_TEXTURE_CUBE_MAP_ARB:
break;
case GL_TEXTURE_RECTANGLE_ARB:
break;
}
}
} else {
}
carg0 = ARG_UNUSED;
carg2 = ARG_UNUSED;
}
if(cop == WINED3DTOP_DOTPRODUCT3) {
/* A dotproduct3 on the colorop overwrites the alphaop operation and replicates
* the color result to the alpha component of the destination
*/
} else {
}
{
{
{
if (aop == WINED3DTOP_DISABLE)
{
}
{
{
}
else aarg1 = WINED3DTA_TEXTURE;
}
{
{
}
else aarg2 = WINED3DTA_TEXTURE;
}
}
}
}
aarg0 = ARG_UNUSED;
aarg2 = ARG_UNUSED;
}
} else {
}
} else {
}
} else {
}
}
/* Clear unsupported stages */
for(; i < MAX_TEXTURES; i++) {
}
if(use_vs(stateblock) || ((IWineD3DVertexDeclarationImpl *) stateblock->vertexDecl)->position_transformed) {
} else {
case WINED3DFOG_NONE:
case WINED3DFOG_LINEAR:
break;
case WINED3DFOG_EXP:
break;
case WINED3DFOG_EXP2:
break;
}
}
} else {
case WINED3DFOG_LINEAR:
break;
case WINED3DFOG_EXP:
break;
case WINED3DFOG_EXP2:
break;
}
}
} else {
settings->sRGB_write = 0;
}
}
const struct ffp_frag_settings *settings)
{
}
/* Note that the key is the implementation independent part of the ffp_frag_desc structure,
* whereas desc points to an extended structure with implementation specific parts.
* Make a copy of the key because hash_table_put takes ownership of it
*/
}
/* Activates the texture dimension according to the bound D3D texture.
* Does not care for the colorop or correct gl texture unit(when using nvrc)
* Requires the caller to activate the correct unit before
*/
void texture_activate_dimensions(DWORD stage, IWineD3DStateBlockImpl *stateblock, WineD3DContext *context) {
case GL_TEXTURE_2D:
checkGLcall("glDisable(GL_TEXTURE_3D)");
if(GL_SUPPORT(ARB_TEXTURE_CUBE_MAP)) {
checkGLcall("glDisable(GL_TEXTURE_CUBE_MAP_ARB)");
}
if(GL_SUPPORT(ARB_TEXTURE_RECTANGLE)) {
checkGLcall("glDisable(GL_TEXTURE_RECTANGLE_ARB)");
}
checkGLcall("glEnable(GL_TEXTURE_2D)");
break;
case GL_TEXTURE_RECTANGLE_ARB:
checkGLcall("glDisable(GL_TEXTURE_2D)");
checkGLcall("glDisable(GL_TEXTURE_3D)");
if(GL_SUPPORT(ARB_TEXTURE_CUBE_MAP)) {
checkGLcall("glDisable(GL_TEXTURE_CUBE_MAP_ARB)");
}
checkGLcall("glEnable(GL_TEXTURE_RECTANGLE_ARB)");
break;
case GL_TEXTURE_3D:
if(GL_SUPPORT(ARB_TEXTURE_CUBE_MAP)) {
checkGLcall("glDisable(GL_TEXTURE_CUBE_MAP_ARB)");
}
if(GL_SUPPORT(ARB_TEXTURE_RECTANGLE)) {
checkGLcall("glDisable(GL_TEXTURE_RECTANGLE_ARB)");
}
checkGLcall("glDisable(GL_TEXTURE_2D)");
checkGLcall("glEnable(GL_TEXTURE_3D)");
break;
case GL_TEXTURE_CUBE_MAP_ARB:
checkGLcall("glDisable(GL_TEXTURE_2D)");
checkGLcall("glDisable(GL_TEXTURE_3D)");
if(GL_SUPPORT(ARB_TEXTURE_RECTANGLE)) {
checkGLcall("glDisable(GL_TEXTURE_RECTANGLE_ARB)");
}
checkGLcall("glEnable(GL_TEXTURE_CUBE_MAP_ARB)");
break;
}
} else {
checkGLcall("glEnable(GL_TEXTURE_2D)");
checkGLcall("glDisable(GL_TEXTURE_3D)");
if(GL_SUPPORT(ARB_TEXTURE_CUBE_MAP)) {
checkGLcall("glDisable(GL_TEXTURE_CUBE_MAP_ARB)");
}
if(GL_SUPPORT(ARB_TEXTURE_RECTANGLE)) {
checkGLcall("glDisable(GL_TEXTURE_RECTANGLE_ARB)");
}
/* Binding textures is done by samplers. A dummy texture will be bound */
}
}
/* No need to enable / disable anything here for unused samplers. The tex_colorop
* handler takes care. Also no action is needed with pixel shaders, or if tex_colorop
* will take care of this business
*/
}
unsigned int ffp_frag_program_key_hash(const void *key)
{
const struct ffp_frag_settings *k = key;
unsigned int hash = 0, i;
/* This takes the texture op settings of stage 0 and 1 into account.
* how exactly depends on the memory laybout of the compiler, but it
* should not matter too much. Stages > 1 are used rarely, so there's
* no need to process them. Even if they're used it is likely that
* the ffp setup has distinct stage 0 and 1 settings.
*/
for(i = 0; i < 2; i++) {
}
return hash;
}
{
}
{
static const BYTE l[] =
{
0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
};
UINT32 i;
return (i = x >> 16) ? (x = i >> 8) ? l[x] + 24 : l[i] + 16 : (i = x >> 8) ? l[i] + 8 : l[x];
}