utils.c revision 5112e32d7072e280613921c982a6672f2c859cf3
/*
* Utility functions for the WineD3D Library
*
* Copyright 2002-2004 Jason Edmeades
* Copyright 2003-2004 Raphael Junqueira
* Copyright 2004 Christian Costa
* Copyright 2005 Oliver Stieber
* Copyright 2006-2008 Henri Verbeet
* Copyright 2007-2008 Stefan Dösinger for CodeWeavers
* Copyright 2009 Henri Verbeet for CodeWeavers
*
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
*/
/*
* Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
* other than GPL or LGPL is available it will apply instead, Sun elects to use only
* the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
* a choice of LGPL license versions is made available with the language indicating
* that LGPLv2 or any later version may be used, or where a choice of which version
* of the LGPL is applied is otherwise unspecified.
*/
#include "config.h"
#include "wined3d_private.h"
struct StaticPixelFormatDesc
{
short depthSize, stencilSize;
};
/*****************************************************************************
* Pixel format array
*
* For the formats WINED3DFMT_A32B32G32R32F, WINED3DFMT_A16B16G16R16F,
* and WINED3DFMT_A16B16G16R16 do not have correct alpha masks, because the
* high masks do not fit into the 32 bit values needed for ddraw. It is only
* used for ddraw mostly, and to figure out if the format has alpha at all, so
* setting a mask like 0x1 for those surfaces is correct. The 64 and 128 bit
* formats are not usable in 2D rendering because ddraw doesn't support them.
*/
static const struct StaticPixelFormatDesc formats[] =
{
/* WINED3DFORMAT alphamask redmask greenmask bluemask bpp depth stencil */
/* FourCC formats */
/* IEEE formats */
/* Hmm? */
/* Float */
/* Palettized formats */
/* Standard ARGB formats. */
/* Luminance */
/* Bump mapping stuff */
/* Depth stencil formats */
/* Vendor-specific formats */
};
struct wined3d_format_base_flags
{
};
static const struct wined3d_format_base_flags format_base_flags[] =
{
};
{
};
static const struct wined3d_format_compression_info format_compression_info[] =
{
};
struct wined3d_format_vertex_info
{
enum wined3d_ffp_emit_idx emit_idx;
unsigned int component_size;
};
static const struct wined3d_format_vertex_info format_vertex_info[] =
{
{WINED3DFMT_B8G8R8A8_UNORM, WINED3D_FFP_EMIT_D3DCOLOR, 4, GL_UNSIGNED_BYTE, 4, GL_TRUE, sizeof(BYTE)},
{WINED3DFMT_R8G8B8A8_UINT, WINED3D_FFP_EMIT_UBYTE4, 4, GL_UNSIGNED_BYTE, 4, GL_FALSE, sizeof(BYTE)},
{WINED3DFMT_R16G16B16A16_SINT, WINED3D_FFP_EMIT_SHORT4, 4, GL_SHORT, 4, GL_FALSE, sizeof(short int)},
{WINED3DFMT_R8G8B8A8_UNORM, WINED3D_FFP_EMIT_UBYTE4N, 4, GL_UNSIGNED_BYTE, 4, GL_TRUE, sizeof(BYTE)},
{WINED3DFMT_R16G16B16A16_SNORM, WINED3D_FFP_EMIT_SHORT4N, 4, GL_SHORT, 4, GL_TRUE, sizeof(short int)},
{WINED3DFMT_R16G16_UNORM, WINED3D_FFP_EMIT_USHORT2N, 2, GL_UNSIGNED_SHORT, 2, GL_TRUE, sizeof(short int)},
{WINED3DFMT_R16G16B16A16_UNORM, WINED3D_FFP_EMIT_USHORT4N, 4, GL_UNSIGNED_SHORT, 4, GL_TRUE, sizeof(short int)},
{WINED3DFMT_R10G10B10A2_UINT, WINED3D_FFP_EMIT_UDEC3, 3, GL_UNSIGNED_SHORT, 3, GL_FALSE, sizeof(short int)},
{WINED3DFMT_R16G16B16A16_FLOAT, WINED3D_FFP_EMIT_FLOAT16_4, 4, GL_FLOAT, 4, GL_FALSE, sizeof(GLhalfNV)}
};
typedef struct {
unsigned int Flags;
/*****************************************************************************
* OpenGL format template. Contains unexciting formats which do not need
* extension checks. The order in this table is independent of the order in
* the table StaticPixelFormatDesc above. Not all formats have to be in this
* table.
*/
static const GlPixelFormatDescTemplate gl_formats_template[] = {
/* WINED3DFORMAT internal srgbInternal rtInternal
format type
flags
extension */
/* FourCC formats */
/* GL_APPLE_ycbcr_422 claims that its '2YUV' format, which is supported via the UNSIGNED_SHORT_8_8_REV_APPLE type
* is equivalent to 'UYVY' format on Windows, and the 'YUVS' via UNSIGNED_SHORT_8_8_APPLE equates to 'YUY2'. The
* d3d9 test however shows that the opposite is true. Since the extension is from 2002, it predates the x86 based
* Macs, so probably the endianess differs. This could be tested as soon as we have a Windows and MacOS on a big
* endian machine
*/
/* IEEE formats */
/* Float */
/* Palettized formats */
0,
0,
/* Standard ARGB formats */
WINED3DFMT_FLAG_POSTPIXELSHADER_BLENDING | WINED3DFMT_FLAG_FILTERING | WINED3DFMT_FLAG_RENDERTARGET,
WINED3DFMT_FLAG_POSTPIXELSHADER_BLENDING | WINED3DFMT_FLAG_FILTERING | WINED3DFMT_FLAG_RENDERTARGET,
WINED3DFMT_FLAG_POSTPIXELSHADER_BLENDING | WINED3DFMT_FLAG_FILTERING | WINED3DFMT_FLAG_RENDERTARGET,
WINED3DFMT_FLAG_POSTPIXELSHADER_BLENDING | WINED3DFMT_FLAG_FILTERING | WINED3DFMT_FLAG_RENDERTARGET,
WINED3DFMT_FLAG_POSTPIXELSHADER_BLENDING | WINED3DFMT_FLAG_FILTERING | WINED3DFMT_FLAG_RENDERTARGET,
/* Luminance */
0,
/* Bump mapping stuff */
/* Depth stencil formats */
WINED3DFMT_FLAG_POSTPIXELSHADER_BLENDING | WINED3DFMT_FLAG_FILTERING | WINED3DFMT_FLAG_DEPTH | WINED3DFMT_FLAG_STENCIL,
WINED3DFMT_FLAG_POSTPIXELSHADER_BLENDING | WINED3DFMT_FLAG_FILTERING | WINED3DFMT_FLAG_DEPTH | WINED3DFMT_FLAG_STENCIL,
/* Vendor-specific formats */
0,
0,
};
/* First check if the format is at the position of its value.
* This will catch the argb formats before the loop is entered
*/
return fmt;
} else {
unsigned int i;
return i;
}
}
}
return -1;
}
{
UINT i;
gl_info->gl_formats = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, format_count * sizeof(*gl_info->gl_formats));
if (!gl_info->gl_formats)
{
ERR("Failed to allocate memory.\n");
return FALSE;
}
for (i = 0; i < format_count; ++i)
{
}
for (i = 0; i < (sizeof(format_base_flags) / sizeof(*format_base_flags)); ++i)
{
if (fmt_idx == -1)
{
ERR("Format %s (%#x) not found.\n",
return FALSE;
}
}
return TRUE;
}
{
unsigned int i;
for (i = 0; i < (sizeof(format_compression_info) / sizeof(*format_compression_info)); ++i)
{
struct GlPixelFormatDesc *format_desc;
if (fmt_idx == -1)
{
ERR("Format %s (%#x) not found.\n",
return FALSE;
}
}
return TRUE;
}
/* Context activation is done by the caller. */
static void check_fbo_compat(const struct wined3d_gl_info *gl_info, struct GlPixelFormatDesc *format_desc)
{
/* Check if the default internal format is supported as a frame buffer
* target, otherwise fall back to the render target internal.
*
* Try to stick to the standard format if possible, this limits precision differences. */
ENTER_GL();
while(glGetError());
gl_info->fbo_ops.glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, tex, 0);
checkGLcall("Framebuffer format check");
if (status == GL_FRAMEBUFFER_COMPLETE)
{
}
else
{
if (!format_desc->rtInternal)
{
{
FIXME("Format %s with rendertarget flag is not supported as FBO color attachment,"
}
else
{
TRACE("Format %s is not supported as FBO color attachment.\n", debug_d3dformat(format_desc->format));
}
}
else
{
TRACE("Format %s is not supported as FBO color attachment, trying rtInternal format as fallback.\n",
while(glGetError());
gl_info->fbo_ops.glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, tex, 0);
checkGLcall("Framebuffer format check");
if (status == GL_FRAMEBUFFER_COMPLETE)
{
TRACE("Format %s rtInternal format is supported as FBO color attachment\n",
}
else
{
FIXME("Format %s rtInternal format is not supported as FBO color attachment.\n",
}
}
}
if (status == GL_FRAMEBUFFER_COMPLETE && format_desc->Flags & WINED3DFMT_FLAG_POSTPIXELSHADER_BLENDING)
{
{
gl_info->fbo_ops.glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, rb);
gl_info->fbo_ops.glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_RENDERBUFFER, rb);
checkGLcall("RB attachment");
}
if (glGetError() == GL_INVALID_FRAMEBUFFER_OPERATION)
{
while(glGetError());
TRACE("Format doesn't support post-pixelshader blending.\n");
}
{
gl_info->fbo_ops.glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, 0);
gl_info->fbo_ops.glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_RENDERBUFFER, 0);
checkGLcall("RB cleanup");
}
}
LEAVE_GL();
}
/* Context activation is done by the caller. */
{
unsigned int i;
{
ENTER_GL();
LEAVE_GL();
}
{
if (!desc->glInternal) continue;
{
continue;
}
{
TRACE("Skipping format %s because it's a compressed format.\n",
continue;
}
{
TRACE("Checking if format %s is supported as FBO color attachment...\n", debug_d3dformat(desc->format));
}
else
{
}
}
{
ENTER_GL();
LEAVE_GL();
}
}
{
unsigned int i;
for (i = 0; i < sizeof(gl_formats_template) / sizeof(gl_formats_template[0]); ++i)
{
struct GlPixelFormatDesc *desc;
if (fmt_idx == -1)
{
ERR("Format %s (%#x) not found.\n",
return FALSE;
}
}
return TRUE;
}
{
return TRUE;
}
/* A context is provided by the caller */
{
/* Render a filtered texture and see what happens. This is intended to detect the lack of
* float16 filtering on ATI X1000 class cards. The drivers disable filtering instead of
* falling back to software. If this changes in the future this code will get fooled and
* apps might hit the software path due to incorrectly advertised caps.
*
* Its unlikely that this changes however. GL Games like Mass Effect depend on the filter
* disable fallback, if Apple or ATI ever change the driver behavior they will break more
* than Wine. The Linux binary <= r500 driver is not maintained any more anyway
*/
ENTER_GL();
while(glGetError());
gl_info->fbo_ops.glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, buffer, 0);
glClearColor(0, 1, 0, 0);
glEnd();
{
TRACE("Read back colors 0x%08x and 0x%08x close to unfiltered color, asuming no filtering\n",
}
else
{
TRACE("Read back colors are 0x%08x and 0x%08x, assuming texture is filtered\n",
}
if(glGetError())
{
}
LEAVE_GL();
return ret;
}
static void init_format_filter_info(struct wined3d_gl_info *gl_info, enum wined3d_pci_vendor vendor)
{
unsigned int fmt_idx, i;
WINED3DFORMAT fmts16[] = {
};
struct GlPixelFormatDesc *desc;
{
WARN("No FBO support, or no FBO ORM, guessing filter info from GL caps\n");
{
TRACE("Nvidia card with texture_float support: Assuming float16 blending\n");
}
{
TRACE("More than 44 GLSL varyings - assuming d3d10 card with float16 blending\n");
}
else
{
TRACE("Assuming no float16 blending\n");
}
if(filtered)
{
{
}
}
return;
}
{
if(filtered)
{
}
else
{
}
}
}
{
int idx;
/* V8U8 is supported natively by GL_ATI_envmap_bumpmap and GL_NV_texture_shader.
* V16U16 is only supported by GL_NV_texture_shader. The formats need fixup if
* their extensions are not available. GL_ATI_envmap_bumpmap is not used because
* the only driver that implements it(fglrx) has a buggy implementation.
*
* V8U8 and V16U16 need a fixup of the undefined blue channel. OpenGL
* returns 0.0 when sampling from it, DirectX 1.0. So we always have in-shader
* conversion for this format.
*/
{
}
else
{
}
{
/* If GL_NV_texture_shader is not supported, those formats are converted, incompatibly
* with each other
*/
}
else
{
/* If GL_NV_texture_shader is supported, WINED3DFMT_L6V5U5 and WINED3DFMT_X8L8V8U8
* are converted at surface loading time, but they do not need any modification in
* the shader, thus they are compatible with all WINED3DFMT_UNKNOWN group formats.
* WINED3DFMT_Q8W8V8U8 doesn't even need load-time conversion
*/
}
{
}
{
}
{
}
{
}
{
/* Do not change the size of the type, it is CPU side. We have to change the GPU-side information though.
* It is the job of the vertex buffer code to make sure that the vbos have the right format */
}
}
{
unsigned int i;
for (i = 0; i < (sizeof(format_vertex_info) / sizeof(*format_vertex_info)); ++i)
{
struct GlPixelFormatDesc *format_desc;
if (fmt_idx == -1)
{
ERR("Format %s (%#x) not found.\n",
return FALSE;
}
}
return TRUE;
}
{
{
return FALSE;
}
return TRUE;
}
/* Context activation is done by the caller. */
{
return TRUE;
fail:
return FALSE;
}
const struct GlPixelFormatDesc *getFormatDescEntry(WINED3DFORMAT fmt, const struct wined3d_gl_info *gl_info)
{
if(idx == -1) {
/* Get the caller a valid pointer */
}
}
/*****************************************************************************
* Trace formatting of useful values
*/
switch (fmt) {
default:
{
char fourcc[5];
fourcc[4] = 0;
else
}
return "unrecognized";
}
}
switch (devtype) {
default:
return "unrecognized";
}
}
{
char buf[284];
buf[0] = '\0';
}
{
char buf[238];
buf[0] = '\0';
}
switch (method) {
#define WINED3DDECLMETHOD_TO_STR(u) case u: return #u
default:
return "unrecognized";
}
}
switch (usage) {
#define WINED3DDECLUSAGE_TO_STR(u) case u: return #u
default:
return "unrecognized";
}
}
switch (res) {
default:
return "unrecognized";
}
}
switch (PrimitiveType) {
default:
return "unrecognized";
}
}
switch (state) {
#define D3DSTATE_TO_STR(u) case u: return #u
default:
return "unrecognized";
}
}
switch (state) {
#define D3DSTATE_TO_STR(u) case u: return #u
default:
return "unrecognized";
}
}
switch (filter_type) {
#define D3DTEXTUREFILTERTYPE_TO_STR(u) case u: return #u
default:
return "unrecognized";
}
}
switch (state) {
#define D3DSTATE_TO_STR(u) case u: return #u
default:
return "unrecognized";
}
}
switch (d3dtop) {
#define D3DTOP_TO_STR(u) case u: return #u
default:
return "unrecognized";
}
}
switch (tstype) {
default:
return ("WINED3DTS_WORLDMATRIX > 0");
}
return "unrecognized";
}
}
switch (Pool) {
#define POOL_TO_STR(p) case p: return #p
default:
return "unrecognized";
}
}
switch(status) {
#define FBOSTATUS_TO_STR(u) case u: return #u
default:
return "unrecognized";
}
}
switch(error) {
#define GLERROR_TO_STR(u) case u: return #u
default:
return "unrecognized";
}
}
switch(basis) {
case WINED3DBASIS_BEZIER: return "WINED3DBASIS_BEZIER";
case WINED3DBASIS_BSPLINE: return "WINED3DBASIS_BSPLINE";
case WINED3DBASIS_INTERPOLATE: return "WINED3DBASIS_INTERPOLATE";
default: return "unrecognized";
}
}
switch(degree) {
case WINED3DDEGREE_LINEAR: return "WINED3DDEGREE_LINEAR";
case WINED3DDEGREE_QUADRATIC: return "WINED3DDEGREE_QUADRATIC";
case WINED3DDEGREE_CUBIC: return "WINED3DDEGREE_CUBIC";
case WINED3DDEGREE_QUINTIC: return "WINED3DDEGREE_QUINTIC";
default: return "unrecognized";
}
}
{
switch(source)
{
#define WINED3D_TO_STR(x) case x: return #x
default:
return "unrecognized";
}
}
{
switch(yuv_fixup)
{
#define WINED3D_TO_STR(x) case x: return #x
default:
return "unrecognized";
}
}
{
if (is_yuv_fixup(fixup))
{
return;
}
TRACE("\tX: %s%s\n", debug_fixup_channel_source(fixup.x_source), fixup.x_sign_fixup ? ", SIGN_FIXUP" : "");
TRACE("\tY: %s%s\n", debug_fixup_channel_source(fixup.y_source), fixup.y_sign_fixup ? ", SIGN_FIXUP" : "");
TRACE("\tZ: %s%s\n", debug_fixup_channel_source(fixup.z_source), fixup.z_sign_fixup ? ", SIGN_FIXUP" : "");
TRACE("\tW: %s%s\n", debug_fixup_channel_source(fixup.w_source), fixup.w_sign_fixup ? ", SIGN_FIXUP" : "");
}
char buf[128];
buf[0] = 0;
}
/*****************************************************************************
* Useful functions mapping GL <-> D3D values
*/
switch(op) {
case WINED3DSTENCILOP_KEEP : return GL_KEEP;
case WINED3DSTENCILOP_ZERO : return GL_ZERO;
case WINED3DSTENCILOP_REPLACE : return GL_REPLACE;
case WINED3DSTENCILOP_INCRSAT : return GL_INCR;
case WINED3DSTENCILOP_DECRSAT : return GL_DECR;
case WINED3DSTENCILOP_INVERT : return GL_INVERT;
case WINED3DSTENCILOP_INCR : return GL_INCR_WRAP_EXT;
case WINED3DSTENCILOP_DECR : return GL_DECR_WRAP_EXT;
default:
return GL_KEEP;
}
}
switch ((WINED3DCMPFUNC)func) {
case WINED3DCMP_NEVER : return GL_NEVER;
case WINED3DCMP_LESS : return GL_LESS;
case WINED3DCMP_EQUAL : return GL_EQUAL;
case WINED3DCMP_LESSEQUAL : return GL_LEQUAL;
case WINED3DCMP_GREATER : return GL_GREATER;
case WINED3DCMP_NOTEQUAL : return GL_NOTEQUAL;
case WINED3DCMP_GREATEREQUAL : return GL_GEQUAL;
case WINED3DCMP_ALWAYS : return GL_ALWAYS;
default:
return 0;
}
}
BOOL is_invalid_op(IWineD3DDeviceImpl *This, int stage, WINED3DTEXTUREOP op, DWORD arg1, DWORD arg2, DWORD arg3) {
return FALSE;
}
/* Setup this textures matrix according to the texture flags*/
/* GL locking is done by the caller (state handler) */
{
float mat[16];
checkGLcall("glMatrixMode(GL_TEXTURE)");
checkGLcall("glLoadIdentity()");
return;
}
ERR("Invalid texture transform flags: WINED3DTTFF_COUNT1|WINED3DTTFF_PROJECTED\n");
return;
}
if (flags & WINED3DTTFF_PROJECTED) {
if(!ffp_proj_control) {
switch (flags & ~WINED3DTTFF_PROJECTED) {
case WINED3DTTFF_COUNT2:
break;
case WINED3DTTFF_COUNT3:
break;
}
}
} else { /* under directx the R/Z coord can be used for translation, under opengl we use the Q coord instead */
if(!calculatedCoords) {
switch(vtx_fmt)
{
case WINED3DFMT_R32_FLOAT:
/* Direct3D passes the default 1.0 in the 2nd coord, while gl passes it in the 4th.
* swap 2nd and 4th coord. No need to store the value of mat[12] in mat[4] because
* the input value to the transformation will be 0, so the matrix value is irrelevant
*/
break;
case WINED3DFMT_R32G32_FLOAT:
/* See above, just 3rd and 4th coord
*/
break;
case WINED3DFMT_R32G32B32_FLOAT: /* Opengl defaults match dx defaults */
case WINED3DFMT_R32G32B32A32_FLOAT: /* No defaults apply, all app defined */
/* This is to prevent swapping the matrix lines and put the default 4th coord = 1.0
* into a bad place. The division elimination below will apply to make sure the
* 1.0 doesn't do anything bad. The caller will set this value if the stride is 0
*/
case WINED3DFMT_UNKNOWN: /* No texture coords, 0/0/0/1 defaults are passed */
break;
default:
FIXME("Unexpected fixed function texture coord input\n");
}
}
if(!ffp_proj_control) {
switch (flags & ~WINED3DTTFF_PROJECTED) {
/* case WINED3DTTFF_COUNT1: Won't ever get here */
/* OpenGL divides the first 3 vertex coord by the 4th by default,
* which is essentially the same as D3DTTFF_PROJECTED. Make sure that
* the 4th coord evaluates to 1.0 to eliminate that.
*
* If the fixed function pipeline is used, the 4th value remains unused,
* so there is no danger in doing this. With vertex shaders we have a
* problem. Should an app hit that problem, the code here would have to
* check for pixel shaders, and the shader has to undo the default gl divide.
*
* A more serious problem occurs if the app passes 4 coordinates in, and the
* 4th is != 1.0(opengl default). This would have to be fixed in drawStridedSlow
* or a replacement shader
*/
}
}
}
checkGLcall("glLoadMatrixf(mat)");
}
/* This small helper function is used to convert a bitmask into the number of masked bits */
unsigned int count_bits(unsigned int mask)
{
unsigned int count;
{
}
return count;
}
/* Helper function for retrieving color info for ChoosePixelFormat and wglChoosePixelFormatARB.
* The later function requires individual color components. */
{
switch(format_desc->format)
{
case WINED3DFMT_B8G8R8_UNORM:
case WINED3DFMT_B5G6R5_UNORM:
case WINED3DFMT_B2G3R3_UNORM:
case WINED3DFMT_P8_UINT:
break;
default:
return FALSE;
}
TRACE("Returning red: %d, green: %d, blue: %d, alpha: %d, total: %d for fmt=%s\n",
return TRUE;
}
/* Helper function for retrieving depth/stencil info for ChoosePixelFormat and wglChoosePixelFormatARB */
BOOL getDepthStencilBits(const struct GlPixelFormatDesc *format_desc, short *depthSize, short *stencilSize)
{
switch(format_desc->format)
{
case WINED3DFMT_D16_LOCKABLE:
case WINED3DFMT_D16_UNORM:
case WINED3DFMT_X8D24_UNORM:
case WINED3DFMT_D32_UNORM:
case WINED3DFMT_D32_FLOAT:
break;
default:
return FALSE;
}
TRACE("Returning depthSize: %d and stencilSize: %d for fmt=%s\n",
return TRUE;
}
/* DirectDraw stuff */
switch(depth) {
case 8: return WINED3DFMT_P8_UINT;
case 15: return WINED3DFMT_B5G5R5X1_UNORM;
case 16: return WINED3DFMT_B5G6R5_UNORM;
case 32: return WINED3DFMT_B8G8R8X8_UNORM; /* EVE online and the Fur demo need 32bit AdapterDisplayMode to return WINED3DFMT_B8G8R8X8_UNORM */
default: return WINED3DFMT_UNKNOWN;
}
}
/* Now do the multiplication 'by hand'.
I know that all this could be optimised, but this will be done later :-) */
temp.u.s._11 = (src1->u.s._11 * src2->u.s._11) + (src1->u.s._21 * src2->u.s._12) + (src1->u.s._31 * src2->u.s._13) + (src1->u.s._41 * src2->u.s._14);
temp.u.s._21 = (src1->u.s._11 * src2->u.s._21) + (src1->u.s._21 * src2->u.s._22) + (src1->u.s._31 * src2->u.s._23) + (src1->u.s._41 * src2->u.s._24);
temp.u.s._31 = (src1->u.s._11 * src2->u.s._31) + (src1->u.s._21 * src2->u.s._32) + (src1->u.s._31 * src2->u.s._33) + (src1->u.s._41 * src2->u.s._34);
temp.u.s._41 = (src1->u.s._11 * src2->u.s._41) + (src1->u.s._21 * src2->u.s._42) + (src1->u.s._31 * src2->u.s._43) + (src1->u.s._41 * src2->u.s._44);
temp.u.s._12 = (src1->u.s._12 * src2->u.s._11) + (src1->u.s._22 * src2->u.s._12) + (src1->u.s._32 * src2->u.s._13) + (src1->u.s._42 * src2->u.s._14);
temp.u.s._22 = (src1->u.s._12 * src2->u.s._21) + (src1->u.s._22 * src2->u.s._22) + (src1->u.s._32 * src2->u.s._23) + (src1->u.s._42 * src2->u.s._24);
temp.u.s._32 = (src1->u.s._12 * src2->u.s._31) + (src1->u.s._22 * src2->u.s._32) + (src1->u.s._32 * src2->u.s._33) + (src1->u.s._42 * src2->u.s._34);
temp.u.s._42 = (src1->u.s._12 * src2->u.s._41) + (src1->u.s._22 * src2->u.s._42) + (src1->u.s._32 * src2->u.s._43) + (src1->u.s._42 * src2->u.s._44);
temp.u.s._13 = (src1->u.s._13 * src2->u.s._11) + (src1->u.s._23 * src2->u.s._12) + (src1->u.s._33 * src2->u.s._13) + (src1->u.s._43 * src2->u.s._14);
temp.u.s._23 = (src1->u.s._13 * src2->u.s._21) + (src1->u.s._23 * src2->u.s._22) + (src1->u.s._33 * src2->u.s._23) + (src1->u.s._43 * src2->u.s._24);
temp.u.s._33 = (src1->u.s._13 * src2->u.s._31) + (src1->u.s._23 * src2->u.s._32) + (src1->u.s._33 * src2->u.s._33) + (src1->u.s._43 * src2->u.s._34);
temp.u.s._43 = (src1->u.s._13 * src2->u.s._41) + (src1->u.s._23 * src2->u.s._42) + (src1->u.s._33 * src2->u.s._43) + (src1->u.s._43 * src2->u.s._44);
temp.u.s._14 = (src1->u.s._14 * src2->u.s._11) + (src1->u.s._24 * src2->u.s._12) + (src1->u.s._34 * src2->u.s._13) + (src1->u.s._44 * src2->u.s._14);
temp.u.s._24 = (src1->u.s._14 * src2->u.s._21) + (src1->u.s._24 * src2->u.s._22) + (src1->u.s._34 * src2->u.s._23) + (src1->u.s._44 * src2->u.s._24);
temp.u.s._34 = (src1->u.s._14 * src2->u.s._31) + (src1->u.s._24 * src2->u.s._32) + (src1->u.s._34 * src2->u.s._33) + (src1->u.s._44 * src2->u.s._34);
temp.u.s._44 = (src1->u.s._14 * src2->u.s._41) + (src1->u.s._24 * src2->u.s._42) + (src1->u.s._34 * src2->u.s._43) + (src1->u.s._44 * src2->u.s._44);
/* And copy the new matrix in the good storage.. */
}
int i;
switch (d3dvtVertexType & WINED3DFVF_POSITION_MASK) {
default: ERR("Unexpected position mask\n");
}
for (i = 0; i < numTextures; i++) {
}
return size;
}
/***********************************************************************
* CalculateTexRect
*
* Calculates the dimensions of the opengl texture used for blits.
* Handled oversized opengl textures and updates the source rectangle
* accordingly
*
* Params:
* This: Surface to operate on
* Rect: Requested rectangle
*
* Returns:
* TRUE if the texture part can be loaded,
* FALSE otherwise
*
*********************************************************************/
{
/* The sizes might be reversed */
}
}
/* No oversized texture? This is easy */
/* Which rect from the texture do I need? */
{
} else {
}
return TRUE;
} else {
/* Check if we can succeed at all */
TRACE("Requested rectangle is too large for gl\n");
return FALSE;
}
/* A part of the texture has to be picked. First, check if
* some texture part is loaded already, if yes try to re-use it.
* If the texture is dirty, or the part can't be used,
* re-position the part to load
*/
/* Ok, the rectangle is ok, re-use it */
TRACE("Using existing gl Texture\n");
} else {
/* Rectangle is not ok, dirtify the texture to reload it */
TRACE("Dirtifying texture to force reload\n");
}
}
/* Now if we are dirty(no else if!) */
/* Set the new rectangle. Use the following strategy:
* 1) Use as big textures as possible.
* 2) Place the texture part in the way that the requested
* part is in the middle of the texture(well, almost)
* 3) If the texture is moved over the edges of the
* surface, replace it nicely
* 4) If the coord is not limiting the texture size,
* use the whole size
*/
}
}
} else {
}
{
}
} else {
}
}
/* Re-calculate the rect to draw */
/* Get the gl coordinates. The gl rectangle is a power of 2, eigher the max size,
* or the pow2Width / pow2Height of the surface.
*
* Can never be GL_TEXTURE_RECTANGLE_ARB because oversized surfaces are always set up
* as regular GL_TEXTURE_2D.
*/
}
return TRUE;
}
void gen_ffp_frag_op(IWineD3DStateBlockImpl *stateblock, struct ffp_frag_settings *settings, BOOL ignore_textype) {
#define ARG1 0x01
#define ARG2 0x02
#define ARG0 0x04
/* undefined */ 0,
/* D3DTOP_DISABLE */ 0,
/* D3DTOP_SELECTARG1 */ ARG1,
/* D3DTOP_SELECTARG2 */ ARG2,
};
unsigned int i;
{
i++;
break;
}
if(texture) {
if(ignore_textype) {
} else {
case GL_TEXTURE_1D:
break;
case GL_TEXTURE_2D:
break;
case GL_TEXTURE_3D:
break;
case GL_TEXTURE_CUBE_MAP_ARB:
break;
case GL_TEXTURE_RECTANGLE_ARB:
break;
}
}
} else {
}
carg0 = ARG_UNUSED;
carg2 = ARG_UNUSED;
}
if(cop == WINED3DTOP_DOTPRODUCT3) {
/* A dotproduct3 on the colorop overwrites the alphaop operation and replicates
* the color result to the alpha component of the destination
*/
} else {
}
{
{
{
if (aop == WINED3DTOP_DISABLE)
{
}
{
{
}
else aarg1 = WINED3DTA_TEXTURE;
}
{
{
}
else aarg2 = WINED3DTA_TEXTURE;
}
}
}
}
aarg0 = ARG_UNUSED;
aarg2 = ARG_UNUSED;
}
} else {
}
} else {
}
} else {
}
}
/* Clear unsupported stages */
for(; i < MAX_TEXTURES; i++) {
}
if(use_vs(stateblock) || ((IWineD3DVertexDeclarationImpl *) stateblock->vertexDecl)->position_transformed) {
} else {
case WINED3DFOG_NONE:
case WINED3DFOG_LINEAR:
break;
case WINED3DFOG_EXP:
break;
case WINED3DFOG_EXP2:
break;
}
}
} else {
case WINED3DFOG_LINEAR:
break;
case WINED3DFOG_EXP:
break;
case WINED3DFOG_EXP2:
break;
}
}
} else {
settings->sRGB_write = 0;
}
/* No need to emulate clipplanes if GL supports native vertex shader clipping or if
* the fixed function vertex pipeline is used(which always supports clipplanes), or
* if no clipplane is enabled
*/
settings->emul_clipplanes = 0;
} else {
}
}
const struct ffp_frag_settings *settings)
{
}
{
/* Note that the key is the implementation independent part of the ffp_frag_desc structure,
* whereas desc points to an extended structure with implementation specific parts. */
{
ERR("Failed to insert ffp frag shader.\n");
}
}
/* Activates the texture dimension according to the bound D3D texture.
* Does not care for the colorop or correct gl texture unit(when using nvrc)
* Requires the caller to activate the correct unit before
*/
/* GL locking is done by the caller (state handler) */
void texture_activate_dimensions(DWORD stage, IWineD3DStateBlockImpl *stateblock, struct wined3d_context *context)
{
{
case GL_TEXTURE_2D:
checkGLcall("glDisable(GL_TEXTURE_3D)");
{
checkGLcall("glDisable(GL_TEXTURE_CUBE_MAP_ARB)");
}
{
checkGLcall("glDisable(GL_TEXTURE_RECTANGLE_ARB)");
}
checkGLcall("glEnable(GL_TEXTURE_2D)");
break;
case GL_TEXTURE_RECTANGLE_ARB:
checkGLcall("glDisable(GL_TEXTURE_2D)");
checkGLcall("glDisable(GL_TEXTURE_3D)");
{
checkGLcall("glDisable(GL_TEXTURE_CUBE_MAP_ARB)");
}
checkGLcall("glEnable(GL_TEXTURE_RECTANGLE_ARB)");
break;
case GL_TEXTURE_3D:
{
checkGLcall("glDisable(GL_TEXTURE_CUBE_MAP_ARB)");
}
{
checkGLcall("glDisable(GL_TEXTURE_RECTANGLE_ARB)");
}
checkGLcall("glDisable(GL_TEXTURE_2D)");
checkGLcall("glEnable(GL_TEXTURE_3D)");
break;
case GL_TEXTURE_CUBE_MAP_ARB:
checkGLcall("glDisable(GL_TEXTURE_2D)");
checkGLcall("glDisable(GL_TEXTURE_3D)");
{
checkGLcall("glDisable(GL_TEXTURE_RECTANGLE_ARB)");
}
checkGLcall("glEnable(GL_TEXTURE_CUBE_MAP_ARB)");
break;
}
} else {
checkGLcall("glEnable(GL_TEXTURE_2D)");
checkGLcall("glDisable(GL_TEXTURE_3D)");
{
checkGLcall("glDisable(GL_TEXTURE_CUBE_MAP_ARB)");
}
{
checkGLcall("glDisable(GL_TEXTURE_RECTANGLE_ARB)");
}
/* Binding textures is done by samplers. A dummy texture will be bound */
}
}
/* GL locking is done by the caller (state handler) */
void sampler_texdim(DWORD state, IWineD3DStateBlockImpl *stateblock, struct wined3d_context *context)
{
/* No need to enable / disable anything here for unused samplers. The tex_colorop
* handler takes care. Also no action is needed with pixel shaders, or if tex_colorop
* will take care of this business
*/
if (mapped_stage == WINED3D_UNMAPPED_STAGE || mapped_stage >= context->gl_info->limits.textures) return;
}
{
}
{
}
void wined3d_rb_free(void *ptr)
{
}
{
const struct ffp_frag_settings *kb = &WINE_RB_ENTRY_VALUE(entry, const struct ffp_frag_desc, entry)->settings;
}
const struct wine_rb_functions wined3d_ffp_frag_program_rb_functions =
{
};
{
static const UINT l[] =
{
~0U, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
};
UINT32 i;
return (i = x >> 16) ? (x = i >> 8) ? l[x] + 24 : l[i] + 16 : (i = x >> 8) ? l[i] + 8 : l[x];
}
/* Set the shader type for this device, depending on the given capabilities
* and the user preferences in wined3d_settings. */
{
{
/* Geforce4 cards support GLSL but for vertex shaders only. Further its reported GLSL caps are
* wrong. This combined with the fact that glsl won't offer more features or performance, use ARB
* shaders only on this card. */
if (gl_info->supported[NV_VERTEX_PROGRAM] && !gl_info->supported[NV_VERTEX_PROGRAM2]) *vs_selected = SHADER_ARB;
else *vs_selected = SHADER_GLSL;
}
else *vs_selected = SHADER_NONE;
else if (gl_info->supported[ARB_FRAGMENT_SHADER] && wined3d_settings.glslRequested) *ps_selected = SHADER_GLSL;
else *ps_selected = SHADER_NONE;
}
const shader_backend_t *select_shader_backend(struct wined3d_adapter *adapter, WINED3DDEVTYPE device_type)
{
if (vs_selected_mode == SHADER_GLSL || ps_selected_mode == SHADER_GLSL) return &glsl_shader_backend;
if (vs_selected_mode == SHADER_ARB || ps_selected_mode == SHADER_ARB) return &arb_program_shader_backend;
return &none_shader_backend;
}
{
else return &ffp_fragment_pipeline;
}
const struct blit_shader *select_blit_implementation(struct wined3d_adapter *adapter, WINED3DDEVTYPE device_type)
{
else return &ffp_blit;
}