directx.c revision b535a806332554a7e017c6aeef00ed656199a9da
/*
* IWineD3D implementation
*
* Copyright 2002-2004 Jason Edmeades
* Copyright 2003-2004 Raphael Junqueira
* Copyright 2004 Christian Costa
* Copyright 2005 Oliver Stieber
* Copyright 2007-2008 Stefan Dösinger for CodeWeavers
* Copyright 2009 Henri Verbeet for CodeWeavers
*
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
*/
/*
* Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
* other than GPL or LGPL is available it will apply instead, Oracle elects to use only
* the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
* a choice of LGPL license versions is made available with the language indicating
* that LGPLv2 or any later version may be used, or where a choice of which version
* of the LGPL is applied is otherwise unspecified.
*/
#include "config.h"
#include <stdio.h>
#include "wined3d_private.h"
#define GLINFO_LOCATION (*gl_info)
/* The d3d device ID */
static const GUID IID_D3DDEVICE_D3DUID = { 0xaeb2cdd4, 0x6e41, 0x43ea, { 0x94,0x1c,0x83,0x61,0xcc,0x76,0x07,0x81 } };
/* Extension detection */
static const struct {
const char *extension_string;
} EXTENSION_MAP[] = {
/* APPLE */
{"GL_APPLE_client_storage", APPLE_CLIENT_STORAGE, 0 },
{"GL_APPLE_fence", APPLE_FENCE, 0 },
{"GL_APPLE_float_pixels", APPLE_FLOAT_PIXELS, 0 },
{"GL_APPLE_flush_buffer_range", APPLE_FLUSH_BUFFER_RANGE, 0 },
{"GL_APPLE_flush_render", APPLE_FLUSH_RENDER, 0 },
{"GL_APPLE_ycbcr_422", APPLE_YCBCR_422, 0 },
/* ARB */
{"GL_ARB_color_buffer_float", ARB_COLOR_BUFFER_FLOAT, 0 },
{"GL_ARB_depth_buffer_float", ARB_DEPTH_BUFFER_FLOAT, 0 },
{"GL_ARB_depth_clamp", ARB_DEPTH_CLAMP, 0 },
{"GL_ARB_depth_texture", ARB_DEPTH_TEXTURE, 0 },
{"GL_ARB_draw_buffers", ARB_DRAW_BUFFERS, 0 },
{"GL_ARB_fragment_program", ARB_FRAGMENT_PROGRAM, 0 },
{"GL_ARB_fragment_shader", ARB_FRAGMENT_SHADER, 0 },
{"GL_ARB_framebuffer_object", ARB_FRAMEBUFFER_OBJECT, 0 },
{"GL_ARB_geometry_shader4", ARB_GEOMETRY_SHADER4, 0 },
{"GL_ARB_half_float_pixel", ARB_HALF_FLOAT_PIXEL, 0 },
{"GL_ARB_half_float_vertex", ARB_HALF_FLOAT_VERTEX, 0 },
{"GL_ARB_imaging", ARB_IMAGING, 0 },
{"GL_ARB_map_buffer_range", ARB_MAP_BUFFER_RANGE, 0 },
{"GL_ARB_multitexture", ARB_MULTITEXTURE, 0 },
{"GL_ARB_occlusion_query", ARB_OCCLUSION_QUERY, 0 },
{"GL_ARB_pixel_buffer_object", ARB_PIXEL_BUFFER_OBJECT, 0 },
{"GL_ARB_point_parameters", ARB_POINT_PARAMETERS, 0 },
{"GL_ARB_point_sprite", ARB_POINT_SPRITE, 0 },
{"GL_ARB_provoking_vertex", ARB_PROVOKING_VERTEX, 0 },
{"GL_ARB_shader_objects", ARB_SHADER_OBJECTS, 0 },
{"GL_ARB_shader_texture_lod", ARB_SHADER_TEXTURE_LOD, 0 },
{"GL_ARB_shading_language_100", ARB_SHADING_LANGUAGE_100, 0 },
{"GL_ARB_sync", ARB_SYNC, 0 },
{"GL_ARB_texture_border_clamp", ARB_TEXTURE_BORDER_CLAMP, 0 },
{"GL_ARB_texture_compression", ARB_TEXTURE_COMPRESSION, 0 },
{"GL_ARB_texture_cube_map", ARB_TEXTURE_CUBE_MAP, 0 },
{"GL_ARB_texture_env_add", ARB_TEXTURE_ENV_ADD, 0 },
{"GL_ARB_texture_env_combine", ARB_TEXTURE_ENV_COMBINE, 0 },
{"GL_ARB_texture_env_dot3", ARB_TEXTURE_ENV_DOT3, 0 },
{"GL_ARB_texture_float", ARB_TEXTURE_FLOAT, 0 },
{"GL_ARB_texture_mirrored_repeat", ARB_TEXTURE_MIRRORED_REPEAT, 0 },
{"GL_IBM_texture_mirrored_repeat", ARB_TEXTURE_MIRRORED_REPEAT, 0 },
{"GL_ARB_texture_rectangle", ARB_TEXTURE_RECTANGLE, 0 },
{"GL_ARB_texture_rg", ARB_TEXTURE_RG, 0 },
{"GL_ARB_vertex_array_bgra", ARB_VERTEX_ARRAY_BGRA, 0 },
{"GL_ARB_vertex_blend", ARB_VERTEX_BLEND, 0 },
{"GL_ARB_vertex_buffer_object", ARB_VERTEX_BUFFER_OBJECT, 0 },
{"GL_ARB_vertex_program", ARB_VERTEX_PROGRAM, 0 },
{"GL_ARB_vertex_shader", ARB_VERTEX_SHADER, 0 },
/* ATI */
{"GL_ATI_fragment_shader", ATI_FRAGMENT_SHADER, 0 },
{"GL_ATI_separate_stencil", ATI_SEPARATE_STENCIL, 0 },
{"GL_ATI_texture_compression_3dc", ATI_TEXTURE_COMPRESSION_3DC, 0 },
{"GL_ATI_texture_env_combine3", ATI_TEXTURE_ENV_COMBINE3, 0 },
{"GL_ATI_texture_mirror_once", ATI_TEXTURE_MIRROR_ONCE, 0 },
/* EXT */
{"GL_EXT_blend_color", EXT_BLEND_COLOR, 0 },
{"GL_EXT_blend_equation_separate", EXT_BLEND_EQUATION_SEPARATE, 0 },
{"GL_EXT_blend_func_separate", EXT_BLEND_FUNC_SEPARATE, 0 },
{"GL_EXT_blend_minmax", EXT_BLEND_MINMAX, 0 },
{"GL_EXT_draw_buffers2", EXT_DRAW_BUFFERS2, 0 },
{"GL_EXT_fog_coord", EXT_FOG_COORD, 0 },
{"GL_EXT_framebuffer_blit", EXT_FRAMEBUFFER_BLIT, 0 },
{"GL_EXT_framebuffer_multisample", EXT_FRAMEBUFFER_MULTISAMPLE, 0 },
{"GL_EXT_framebuffer_object", EXT_FRAMEBUFFER_OBJECT, 0 },
{"GL_EXT_gpu_program_parameters", EXT_GPU_PROGRAM_PARAMETERS, 0 },
{"GL_EXT_gpu_shader4", EXT_GPU_SHADER4, 0 },
{"GL_EXT_packed_depth_stencil", EXT_PACKED_DEPTH_STENCIL, 0 },
{"GL_EXT_paletted_texture", EXT_PALETTED_TEXTURE, 0 },
{"GL_EXT_point_parameters", EXT_POINT_PARAMETERS, 0 },
{"GL_EXT_provoking_vertex", EXT_PROVOKING_VERTEX, 0 },
{"GL_EXT_secondary_color", EXT_SECONDARY_COLOR, 0 },
{"GL_EXT_stencil_two_side", EXT_STENCIL_TWO_SIDE, 0 },
{"GL_EXT_stencil_wrap", EXT_STENCIL_WRAP, 0 },
{"GL_EXT_texture_compression_rgtc", EXT_TEXTURE_COMPRESSION_RGTC, 0 },
{"GL_EXT_texture_compression_s3tc", EXT_TEXTURE_COMPRESSION_S3TC, 0 },
{"GL_EXT_texture_env_add", EXT_TEXTURE_ENV_ADD, 0 },
{"GL_EXT_texture_env_combine", EXT_TEXTURE_ENV_COMBINE, 0 },
{"GL_EXT_texture_env_dot3", EXT_TEXTURE_ENV_DOT3, 0 },
{"GL_EXT_texture_filter_anisotropic", EXT_TEXTURE_FILTER_ANISOTROPIC, 0 },
{"GL_EXT_texture_lod_bias", EXT_TEXTURE_LOD_BIAS, 0 },
{"GL_EXT_texture_sRGB", EXT_TEXTURE_SRGB, 0 },
{"GL_EXT_vertex_array_bgra", EXT_VERTEX_ARRAY_BGRA, 0 },
/* NV */
{"GL_NV_depth_clamp", NV_DEPTH_CLAMP, 0 },
{"GL_NV_fence", NV_FENCE, 0 },
{"GL_NV_fog_distance", NV_FOG_DISTANCE, 0 },
{"GL_NV_fragment_program", NV_FRAGMENT_PROGRAM, 0 },
{"GL_NV_fragment_program2", NV_FRAGMENT_PROGRAM2, 0 },
{"GL_NV_fragment_program_option", NV_FRAGMENT_PROGRAM_OPTION, 0 },
{"GL_NV_half_float", NV_HALF_FLOAT, 0 },
{"GL_NV_light_max_exponent", NV_LIGHT_MAX_EXPONENT, 0 },
{"GL_NV_register_combiners", NV_REGISTER_COMBINERS, 0 },
{"GL_NV_register_combiners2", NV_REGISTER_COMBINERS2, 0 },
{"GL_NV_texgen_reflection", NV_TEXGEN_REFLECTION, 0 },
{"GL_NV_texture_env_combine4", NV_TEXTURE_ENV_COMBINE4, 0 },
{"GL_NV_texture_shader", NV_TEXTURE_SHADER, 0 },
{"GL_NV_texture_shader2", NV_TEXTURE_SHADER2, 0 },
{"GL_NV_vertex_program", NV_VERTEX_PROGRAM, 0 },
{"GL_NV_vertex_program1_1", NV_VERTEX_PROGRAM1_1, 0 },
{"GL_NV_vertex_program2", NV_VERTEX_PROGRAM2, 0 },
{"GL_NV_vertex_program2_option", NV_VERTEX_PROGRAM2_OPTION, 0 },
{"GL_NV_vertex_program3", NV_VERTEX_PROGRAM3, 0 },
/* SGI */
{"GL_SGIS_generate_mipmap", SGIS_GENERATE_MIPMAP, 0 },
};
/**********************************************************
* Utility functions follow
**********************************************************/
static HRESULT WINAPI IWineD3DImpl_CheckDeviceFormat(IWineD3D *iface, UINT Adapter, WINED3DDEVTYPE DeviceType, WINED3DFORMAT AdapterFormat, DWORD Usage, WINED3DRESOURCETYPE RType, WINED3DFORMAT CheckFormat, WINED3DSURFTYPE SurfaceType);
const struct min_lookup minMipLookup[] =
{
/* NONE POINT LINEAR */
};
const struct min_lookup minMipLookup_noFilter[] =
{
/* NONE POINT LINEAR */
};
const struct min_lookup minMipLookup_noMip[] =
{
/* NONE POINT LINEAR */
};
{
/* NONE POINT LINEAR */
};
const GLenum magLookup_noFilter[] =
{
/* NONE POINT LINEAR */
};
/* drawStridedSlow attributes */
/**
* Note: GL seems to trap if GetDeviceCaps is called before any HWND's created,
* i.e., there is no GL Context - Get a default rendering context to enable the
* function query some info from GL.
*/
struct wined3d_fake_gl_ctx
{
};
{
{
}
{
}
{
}
}
{
int iPixelFormat;
TRACE("getting context...\n");
/* We need a fake window as a hdc retrieved using GetDC(0) can't be used for much GL purposes. */
{
goto fail;
}
{
goto fail;
}
/* PixelFormat selection */
pfd.dwFlags = PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER | PFD_DRAW_TO_WINDOW; /* PFD_GENERIC_ACCELERATED */
if (!iPixelFormat)
{
/* If this happens something is very wrong as ChoosePixelFormat barely fails. */
goto fail;
}
/* Create a GL context. */
{
goto fail;
}
/* Make it the current GL context. */
if (!context_set_current(NULL))
{
}
{
goto fail;
}
return TRUE;
fail:
{
}
return FALSE;
}
/* Adjust the amount of used texture memory */
{
return adapter->UsedTextureRam;
}
{
}
/**********************************************************
* IUnknown parts follows
**********************************************************/
{
return S_OK;
}
return E_NOINTERFACE;
}
return refCount;
}
if (ref == 0) {
unsigned int i;
for (i = 0; i < This->adapter_count; ++i)
{
}
#ifdef VBOX_WITH_WDDM
#endif
}
return ref;
}
/**********************************************************
* IWineD3D parts follows
**********************************************************/
/* GL locking is done by the caller */
{
const char *testcode =
"!!ARBvp1.0\n"
"PARAM C[66] = { program.env[0..65] };\n"
"ADDRESS A0;"
"PARAM zero = {0.0, 0.0, 0.0, 0.0};\n"
"ARL A0.x, zero.x;\n"
"MOV result.position, C[A0.x + 65];\n"
"END\n";
while(glGetError());
if(!prog) {
ERR("Failed to create an ARB offset limit test program\n");
}
if(glGetError() != 0) {
TRACE("OpenGL implementation does not allow indirect addressing offsets > 63\n");
} else TRACE("OpenGL implementation allows offsets > 63\n");
checkGLcall("ARB vp offset limit test cleanup");
return ret;
}
{
unsigned int i;
for (i = 0; i < (sizeof(EXTENSION_MAP) / sizeof(*EXTENSION_MAP)); ++i) {
return EXTENSION_MAP[i].version;
}
}
return 0;
}
enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
{
return FALSE;
}
enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
{
if (card_vendor == HW_VENDOR_NVIDIA)
{
{
return TRUE;
}
}
return FALSE;
}
enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
{
/* MacOS has various specialities in the extensions it advertises. Some have to be loaded from
* the opengl 1.2+ core, while other extensions are advertised, but software emulated. So try to
* detect the Apple OpenGL implementation to apply some extension fixups afterwards.
*
* Detecting this isn't really easy. The vendor string doesn't mention Apple. Compile-time checks
* aren't sufficient either because a Linux binary may display on a macos X server via remote X11.
* So try to detect the GL implementation by looking at certain Apple extensions. Some extensions
* like client storage might be supported on other implementations too, but GL_APPLE_flush_render
* is specific to the Mac OS X window management, and GL_APPLE_ycbcr_422 is QuickTime specific. So
* the chance that other implementations support them is rather small since Win32 QuickTime uses
* DirectDraw, not OpenGL.
*
* This test has been moved into wined3d_guess_gl_vendor()
*/
if (gl_vendor == GL_VENDOR_APPLE)
{
return TRUE;
}
return FALSE;
}
/* Context activation is done by the caller. */
{
/* Some OpenGL implementations, namely Apple's Geforce 8 driver, advertises PBOs,
* but glTexSubImage from a PBO fails miserably, with the first line repeated over
* all the texture. This function detects this bug by its symptom and disables PBOs
* if the test fails.
*
* The test uploads a 4x4 texture via the PBO in the "native" format GL_BGRA,
* GL_UNSIGNED_INT_8_8_8_8_REV. This format triggers the bug, and it is what we use
* for D3DFMT_A8R8G8B8. Then the texture is read back without any PBO and the data
* read back is compared to the original. If they are equal PBOs are assumed to work,
* otherwise the PBO extension is disabled. */
static const unsigned int pattern[] =
{
0x00000000, 0x000000ff, 0x0000ff00, 0x40ff0000,
0x80ffffff, 0x40ffff00, 0x00ff00ff, 0x0000ffff,
0x00ffff00, 0x00ff00ff, 0x0000ffff, 0x000000ff,
0x80ff00ff, 0x0000ffff, 0x00ff00ff, 0x40ff00ff
};
/* No PBO -> No point in testing them. */
ENTER_GL();
while (glGetError());
checkGLcall("Specifying the PBO test texture");
GL_EXTCALL(glBufferDataARB(GL_PIXEL_UNPACK_BUFFER_ARB, sizeof(pattern), pattern, GL_STREAM_DRAW_ARB));
checkGLcall("Specifying the PBO test pbo");
checkGLcall("Loading the PBO test texture");
wglFinish(); /* just to be sure */
checkGLcall("Reading back the PBO test texture");
checkGLcall("PBO test cleanup");
LEAVE_GL();
{
}
else
{
}
}
enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
{
}
enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
{
return TRUE;
}
enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
{
return gl_vendor == GL_VENDOR_FGLRX;
}
enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
{
/* DX9 cards support 40 single float varyings in hardware, most drivers report 32. ATI misreports
* 44 varyings. So assume that if we have more than 44 varyings we have a dx10 card.
* This detection is for the gl_ClipPos varying quirk. If a d3d9 card really supports more than 44
* varyings and we subtract one in dx9 shaders its not going to hurt us because the dx9 limit is
* hardcoded
*
* dx10 cards usually have 64 varyings */
}
/* A GL context is provided by the caller */
enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
{
ENTER_GL();
while(glGetError());
error = glGetError();
LEAVE_GL();
if(error == GL_NO_ERROR)
{
TRACE("GL Implementation accepts 4 component specular color pointers\n");
return TRUE;
}
else
{
TRACE("GL implementation does not accept 4 component specular colors, error %s\n",
return FALSE;
}
}
enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
{
}
/* A GL context is provided by the caller */
enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
{
const char *testcode =
"!!ARBvp1.0\n"
"OPTION NV_vertex_program2;\n"
"MOV result.clip[0], 0.0;\n"
"MOV result.position, 0.0;\n"
"END\n";
ENTER_GL();
while(glGetError());
if(!prog)
{
ERR("Failed to create the NVvp clip test program\n");
LEAVE_GL();
return FALSE;
}
if(pos != -1)
{
while(glGetError());
}
checkGLcall("GL_NV_vertex_program2_option result.clip[] test cleanup");
LEAVE_GL();
return ret;
}
/* Context activation is done by the caller. */
enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
{
ENTER_GL();
checkGLcall("glTexImage2D");
gl_info->fbo_ops.glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, tex, 0);
checkGLcall("glFramebufferTexture2D");
checkGLcall("glCheckFramebufferStatus");
checkGLcall("glTexSubImage2D");
checkGLcall("glClear");
checkGLcall("glGetTexImage");
checkGLcall("glBindTexture");
checkGLcall("glDeleteTextures");
LEAVE_GL();
}
{
TRACE_(d3d_caps)("Using ARB vs constant limit(=%u) for GLSL.\n", gl_info->limits.arb_vs_native_constants);
TRACE_(d3d_caps)("Using ARB ps constant limit(=%u) for GLSL.\n", gl_info->limits.arb_ps_native_constants);
}
{
/* MacOS needs uniforms for relative addressing offsets. This can accumulate to quite a few uniforms.
* Beyond that the general uniform isn't optimal, so reserve a number of uniforms. 12 vec4's should
* allow 48 different offsets or other helper immediate values. */
}
/* fglrx crashes with a very bad kernel panic if GL_POINT_SPRITE_ARB is set to GL_COORD_REPLACE_ARB
* on more than one texture unit. This means that the d3d9 visual point size test will cause a
* kernel panic on any machine running fglrx 9.3(latest that supports r300 to r500 cards). This
* quirk only enables point sprites on the first texture unit. This keeps point sprites working in
* most games, but avoids the crash
*
* A more sophisticated way would be to find all units that need texture coordinates and enable
* point sprites for one if only one is found, and software emulate point sprites in drawStridedSlow
* if more than one unit needs texture coordinates(This requires software ffp and vertex shaders though)
*
* Note that disabling the extension entirely does not gain predictability because there is no point
* sprite capability flag in d3d, so the potential rendering bugs are the same if we disable the extension. */
{
{
TRACE("Limiting point sprites to one texture unit.\n");
}
}
{
/* MacOS advertises GL_ARB_texture_non_power_of_two on ATI r500 and earlier cards, although
* these cards only support GL_ARB_texture_rectangle(D3DPTEXTURECAPS_NONPOW2CONDITIONAL).
* If real NP2 textures are used, the driver falls back to software. We could just remove the
* extension and use GL_ARB_texture_rectangle instead, but texture_rectangle is inconventient
* due to the non-normalized texture coordinates. Thus set an internal extension flag,
* GL_WINE_normalized_texrect, which signals the code that it can use non power of two textures
* as per GL_ARB_texture_non_power_of_two, but has to stick to the texture_rectangle limits.
*
* fglrx doesn't advertise GL_ARB_texture_non_power_of_two, but it advertises opengl 2.0 which
* has this extension promoted to core. The extension loading code sets this extension supported
* due to that, so this code works on fglrx as well. */
{
TRACE("GL_ARB_texture_non_power_of_two advertised on R500 or earlier card, removing.\n");
}
/* fglrx has the same structural issues as the one described in quirk_apple_glsl_constants, although
* it is generally more efficient. Reserve just 8 constants. */
}
{
/* The nVidia GeForceFX series reports OpenGL 2.0 capabilities with the latest drivers versions, but
* doesn't explicitly advertise the ARB_tex_npot extension in the GL extension string.
* This usually means that ARB_tex_npot is supported in hardware as long as the application is staying
* within the limits enforced by the ARB_texture_rectangle extension. This however is not true for the
* FX series, which instantly falls back to a slower software path as soon as ARB_tex_npot is used.
* We therefore completely remove ARB_tex_npot from the list of supported extensions.
*
* Note that wine_normalized_texrect can't be used in this case because internally it uses ARB_tex_npot,
* triggering the software fallback. There is not much we can do here apart from disabling the
* software-emulated extension and reenable ARB_tex_rect (which was previously disabled
* in IWineD3DImpl_FillGLCaps).
* This fixup removes performance problems on both the FX 5900 and FX 5700 (e.g. for framebuffer
* post-processing effects in the game "Max Payne 2").
* The behaviour can be verified through a simple test app attached in bugreport #14724. */
TRACE("GL_ARB_texture_non_power_of_two advertised through OpenGL 2.0 on NV FX card, removing.\n");
}
{
/* The Intel GPUs on MacOS set the .w register of texcoords to 0.0 by default, which causes problems
* with fixed function fragment processing. Ideally this flag should be detected with a test shader
* and OpenGL feedback mode, but some GL implementations (MacOS ATI at least, probably all MacOS ones)
* do not like vertex shaders in feedback mode and return an error, even though it should be valid
* according to the spec.
*
* We don't want to enable this on all cards, as it adds an extra instruction per texcoord used. This
* makes the shader slower and eats instruction slots which should be available to the d3d app.
*
* ATI Radeon HD 2xxx cards on MacOS have the issue. Instead of checking for the buggy cards, blacklist
* all radeon cards on Macs and whitelist the good ones. That way we're prepared for the future. If
* this workaround is activated on cards that do not need it, it won't break things, just affect
* performance negatively. */
TRACE("Enabling vertex texture coord fixes in vertex shaders.\n");
}
{
}
{
}
{
}
{
}
{
}
enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
{
return FALSE;
}
{
}
#ifdef VBOX_WITH_WDDM
enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
{
return TRUE;
}
{
return;
}
#endif
struct driver_quirk
{
enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device);
const char *description;
};
static const struct driver_quirk quirk_table[] =
{
{
"ATI GLSL constant and normalized texrect quirk"
},
/* MacOS advertises more GLSL vertex shader uniforms than supported by the hardware, and if more are
* used it falls back to software. While the compiler can detect if the shader uses all declared
* uniforms, the optimization fails if the shader uses relative addressing. So any GLSL shader
* using relative addressing falls back to software.
*
* ARB vp gives the correct amount of uniforms, so use it instead of GLSL. */
{
"Apple GLSL uniform override"
},
{
"Geforce 5 NP2 disable"
},
{
"Init texcoord .w for Apple Intel GPU driver"
},
{
"Init texcoord .w for Apple ATI >= r600 GPU driver"
},
{
"Fglrx point sprite crash workaround"
},
{
"Reserved varying for gl_ClipPos"
},
{
/* GL_EXT_secondary_color does not allow 4 component secondary colors, but most
* GL implementations accept it. The Mac GL is the only implementation known to
* reject it.
*
* If we can pass 4 component specular colors, do it, because (a) we don't have
* to screw around with the data, and (b) the D3D fixed function vertex pipeline
* passes specular alpha to the pixel shader if any is used. Otherwise the
* specular alpha is used to pass the fog coordinate, which we pass to opengl
* via GL_EXT_fog_coord.
*/
"Allow specular alpha quirk"
},
{
/* The pixel formats provided by GL_NV_texture_shader are broken on OSX
* (rdar://5682521).
*/
"Apple NV_texture_shader disable"
},
{
"Apple NV_vertex_program clip bug quirk"
},
{
"FBO rebind for attachment updates"
},
{
"Fullsize blit"
},
#ifdef VBOX_WITH_WDDM
{
"disable shader 3 support"
},
#endif
};
/* Certain applications (Steam) complain if we report an outdated driver version. In general,
* reporting a driver version is moot because we are not the Windows driver, and we have different
* bugs, features, etc.
*
* The driver version has the form "x.y.z.w".
*
* "x" is the Windows version the driver is meant for:
* 4 -> 95/98/NT4
* 5 -> 2000
* 6 -> 2000/XP
* 7 -> Vista
* 8 -> Win 7
*
* "y" is the Direct3D level the driver supports:
* 11 -> d3d6
* 12 -> d3d7
* 13 -> d3d8
* 14 -> d3d9
* 15 -> d3d10
*
* "z" is unknown, possibly vendor specific.
*
* "w" is the vendor specific driver version.
*/
struct driver_version_information
{
const char *description; /* Description of the card e.g. NVIDIA RIVA TNT */
};
static const struct driver_version_information driver_version_table[] =
{
/* Nvidia drivers. Geforce6 and newer cards are supported by the current driver (180.x)
* GeforceFX support is up to 173.x, - driver uses numbering x.y.11.7341 for 173.41 where x is the windows revision (6=2000/xp, 7=vista), y is unknown
* Geforce2MX/3/4 up to 96.x - driver uses numbering 9.6.8.9 for 96.89
*
* All version numbers used below are from the Linux nvidia drivers. */
/* ATI cards. The driver versions are somewhat similar, but not quite the same. Let's hardcode. */
/* TODO: Add information about legacy ATI hardware, Intel and other cards. */
};
{
unsigned int i;
{
}
{
}
switch (vendor)
{
case HW_VENDOR_ATI:
break;
case HW_VENDOR_NVIDIA:
break;
case HW_VENDOR_INTEL:
default:
break;
}
if (!GetVersionExW(&os_version))
{
ERR("Failed to get OS version, reporting 2000/XP.\n");
driver_os_version = 6;
}
else
{
switch (os_version.dwMajorVersion)
{
case 4:
driver_os_version = 4;
break;
case 5:
driver_os_version = 6;
break;
case 6:
if (os_version.dwMinorVersion == 0)
{
driver_os_version = 7;
}
else
{
{
FIXME("Unhandled OS version %u.%u, reporting Win 7.\n",
}
driver_os_version = 8;
}
break;
default:
FIXME("Unhandled OS version %u.%u, reporting 2000/XP.\n",
driver_os_version = 6;
break;
}
}
for (i = 0; i < (sizeof(driver_version_table) / sizeof(driver_version_table[0])); ++i)
{
{
driver_info->version_high = MAKEDWORD_VERSION(driver_os_version, driver_version_table[i].d3d_level);
break;
}
}
}
/* Context activation is done by the caller. */
enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
{
unsigned int i;
for (i = 0; i < (sizeof(quirk_table) / sizeof(*quirk_table)); ++i)
{
}
/* Find out if PBOs work as they are supposed to. */
}
{
const char *ptr = gl_version;
}
static enum wined3d_gl_vendor wined3d_guess_gl_vendor(struct wined3d_gl_info *gl_info, const char *gl_vendor_string, const char *gl_renderer)
{
/* MacOS has various specialities in the extensions it advertises. Some have to be loaded from
* the opengl 1.2+ core, while other extensions are advertised, but software emulated. So try to
* detect the Apple OpenGL implementation to apply some extension fixups afterwards.
*
* Detecting this isn't really easy. The vendor string doesn't mention Apple. Compile-time checks
* aren't sufficient either because a Linux binary may display on a macos X server via remote X11.
* So try to detect the GL implementation by looking at certain Apple extensions. Some extensions
* like client storage might be supported on other implementations too, but GL_APPLE_flush_render
* is specific to the Mac OS X window management, and GL_APPLE_ycbcr_422 is QuickTime specific. So
* the chance that other implementations support them is rather small since Win32 QuickTime uses
* DirectDraw, not OpenGL. */
return GL_VENDOR_APPLE;
return GL_VENDOR_NVIDIA;
return GL_VENDOR_FGLRX;
return GL_VENDOR_INTEL;
return GL_VENDOR_MESA;
return GL_VENDOR_UNKNOWN;
}
static enum wined3d_pci_vendor wined3d_guess_card_vendor(const char *gl_vendor_string, const char *gl_renderer)
{
return HW_VENDOR_NVIDIA;
return HW_VENDOR_ATI;
return HW_VENDOR_INTEL;
return HW_VENDOR_SOFTWARE;
FIXME_(d3d_caps)("Received unrecognized GL_VENDOR %s. Returning HW_VENDOR_NVIDIA.\n", debugstr_a(gl_vendor_string));
return HW_VENDOR_NVIDIA;
}
const char *gl_renderer, unsigned int *vidmem)
{
#ifndef VBOX_WITH_WDDM
if (WINE_D3D10_CAPABLE(gl_info))
#endif
{
/* Geforce 200 - highend */
{
*vidmem = 1024;
return CARD_NVIDIA_GEFORCE_GTX280;
}
/* Geforce 200 - midend high */
{
*vidmem = 896;
return CARD_NVIDIA_GEFORCE_GTX275;
}
/* Geforce 200 - midend */
{
*vidmem = 1024;
return CARD_NVIDIA_GEFORCE_GTX260;
}
/* Geforce 200 - midend */
{
*vidmem = 512;
return CARD_NVIDIA_GEFORCE_GT240;
}
/* Geforce9 - highend / Geforce 200 - midend (GTS 150/250 are based on the same core) */
{
*vidmem = 512;
return CARD_NVIDIA_GEFORCE_9800GT;
}
/* Geforce9 - midend */
{
return CARD_NVIDIA_GEFORCE_9600GT;
}
/* Geforce9 - midend low / Geforce 200 - low */
{
return CARD_NVIDIA_GEFORCE_9500GT;
}
/* Geforce9 - lowend */
{
return CARD_NVIDIA_GEFORCE_9400GT;
}
/* Geforce9 - lowend low */
{
return CARD_NVIDIA_GEFORCE_9200;
}
/* Geforce8 - highend */
{
return CARD_NVIDIA_GEFORCE_8800GTS;
}
/* Geforce8 - midend mobile */
{
*vidmem = 512;
return CARD_NVIDIA_GEFORCE_8600MGT;
}
/* Geforce8 - midend */
{
*vidmem = 256;
return CARD_NVIDIA_GEFORCE_8600GT;
}
/* Geforce8 - lowend */
{
return CARD_NVIDIA_GEFORCE_8300GS;
}
/* Geforce8-compatible fall back if the GPU is not in the list yet */
*vidmem = 128;
return CARD_NVIDIA_GEFORCE_8300GS;
}
/* Both the GeforceFX, 6xxx and 7xxx series support D3D9. The last two types have more
* shader capabilities, so we use the shader capabilities to distinguish between FX and 6xxx/7xxx.
*/
{
/* Geforce7 - highend */
{
return CARD_NVIDIA_GEFORCE_7800GT;
}
/* Geforce7 midend */
{
return CARD_NVIDIA_GEFORCE_7600;
}
/* Geforce7 lower medium */
{
return CARD_NVIDIA_GEFORCE_7400;
}
/* Geforce7 lowend */
{
return CARD_NVIDIA_GEFORCE_7300;
}
/* Geforce6 highend */
{
return CARD_NVIDIA_GEFORCE_6800;
}
/* Geforce6 - midend */
{
return CARD_NVIDIA_GEFORCE_6600GT;
}
/* Geforce6/7 lowend */
return CARD_NVIDIA_GEFORCE_6200; /* Geforce 6100/6150/6200/7300/7400/7500 */
}
if (WINE_D3D9_CAPABLE(gl_info))
{
/* GeforceFX - highend */
{
return CARD_NVIDIA_GEFORCEFX_5800;
}
/* GeforceFX - midend */
{
return CARD_NVIDIA_GEFORCEFX_5600;
}
/* GeforceFX - lowend */
return CARD_NVIDIA_GEFORCEFX_5200; /* GeforceFX 5100/5200/5250/5300/5500 */
}
if (WINE_D3D8_CAPABLE(gl_info))
{
{
return CARD_NVIDIA_GEFORCE4_TI4200; /* Geforce4 Ti4200/Ti4400/Ti4600/Ti4800, Quadro4 */
}
return CARD_NVIDIA_GEFORCE3; /* Geforce3 standard/Ti200/Ti500, Quadro DCC */
}
if (WINE_D3D7_CAPABLE(gl_info))
{
{
/* Most Geforce4MX GPUs have at least 64MB of memory, some
* early models had 32MB but most have 64MB or even 128MB. */
*vidmem = 64;
return CARD_NVIDIA_GEFORCE4_MX; /* MX420/MX440/MX460/MX4000 */
}
{
return CARD_NVIDIA_GEFORCE2_MX; /* Geforce2 standard/MX100/MX200/MX400, Quadro2 MXR */
}
{
return CARD_NVIDIA_GEFORCE2; /* Geforce2 GTS/Pro/Ti/Ultra, Quadro2 */
}
/* Most Geforce1 cards have 32MB, there are also some rare 16
* and 64MB (Dell) models. */
*vidmem = 32;
return CARD_NVIDIA_GEFORCE; /* Geforce 256/DDR, Quadro */
}
{
return CARD_NVIDIA_RIVA_TNT2; /* Riva TNT2 standard/M64/Pro/Ultra */
}
return CARD_NVIDIA_RIVA_TNT; /* Riva TNT, Vanta */
}
const char *gl_renderer, unsigned int *vidmem)
{
*
* Beware: renderer string do not match exact card model,
* eg HD 4800 is returned for multiple cards, even for RV790 based ones. */
#ifndef VBOX_WITH_WDDM
if (WINE_D3D10_CAPABLE(gl_info))
#endif
{
/* Radeon EG CYPRESS XT / PRO HD5800 - highend */
{
return CARD_ATI_RADEON_HD5800;
}
/* Radeon EG JUNIPER XT / LE HD5700 - midend */
{
return CARD_ATI_RADEON_HD5700;
}
/* Radeon R7xx HD4800 - highend */
{
return CARD_ATI_RADEON_HD4800;
}
/* Radeon R740 HD4700 - midend */
{
*vidmem = 512;
return CARD_ATI_RADEON_HD4700;
}
/* Radeon R730 HD4600 - midend */
{
*vidmem = 512;
return CARD_ATI_RADEON_HD4600;
}
{
*vidmem = 256;
return CARD_ATI_RADEON_HD4350;
}
{
return CARD_ATI_RADEON_HD2900;
}
{
return CARD_ATI_RADEON_HD2600;
}
* Note HD2300=DX9, HD2350=DX10 */
{
return CARD_ATI_RADEON_HD2350;
}
{
return CARD_ATI_RADEON_HD3200;
}
/* Default for when no GPU has been found */
return CARD_ATI_RADEON_HD3200;
}
if (WINE_D3D8_CAPABLE(gl_info))
{
/* Radeon R5xx */
{
return CARD_ATI_RADEON_X1600;
}
)
{
return CARD_ATI_RADEON_X700;
}
/* Radeon Xpress Series - onboard, DX9b, Shader 2.0, 300-400MHz */
{
return CARD_ATI_RADEON_XPRESS_200M;
}
/* Radeon R3xx */
return CARD_ATI_RADEON_9500; /* Radeon 9500/9550/9600/9700/9800/X300/X550/X600 */
}
if (WINE_D3D8_CAPABLE(gl_info))
{
return CARD_ATI_RADEON_8500; /* Radeon 8500/9000/9100/9200/9300 */
}
if (WINE_D3D7_CAPABLE(gl_info))
{
return CARD_ATI_RADEON_7200; /* Radeon 7000/7100/7200/7500 */
}
return CARD_ATI_RAGE_128PRO;
}
const char *gl_renderer, unsigned int *vidmem)
{
{
/* MacOS calls the card GMA X3100, Google findings also suggest the name GM965 */
*vidmem = 128;
return CARD_INTEL_X3100;
}
{
/* MacOS calls the card GMA 950, but everywhere else the PCI ID is named 945GM */
*vidmem = 64;
return CARD_INTEL_I945GM;
}
return CARD_INTEL_I915G;
}
const char *gl_renderer, unsigned int *vidmem)
{
*
* Beware: renderer string do not match exact card model,
* eg HD 4800 is returned for multiple cards, even for RV790 based ones. */
{
/* Radeon R7xx HD4800 - highend */
{
return CARD_ATI_RADEON_HD4800;
}
/* Radeon R740 HD4700 - midend */
{
*vidmem = 512;
return CARD_ATI_RADEON_HD4700;
}
/* Radeon R730 HD4600 - midend */
{
*vidmem = 512;
return CARD_ATI_RADEON_HD4600;
}
{
*vidmem = 256;
return CARD_ATI_RADEON_HD4350;
}
{
return CARD_ATI_RADEON_HD2900;
}
{
return CARD_ATI_RADEON_HD2600;
}
{
return CARD_ATI_RADEON_HD2350;
}
{
return CARD_ATI_RADEON_HD3200;
}
/* Radeon R5xx */
{
return CARD_ATI_RADEON_X1600;
}
{
return CARD_ATI_RADEON_X700;
}
/* Radeon Xpress Series - onboard, DX9b, Shader 2.0, 300-400MHz */
{
return CARD_ATI_RADEON_XPRESS_200M;
}
/* Radeon R3xx */
{
return CARD_ATI_RADEON_9500; /* Radeon 9500/9550/9600/9700/9800/X300/X550/X600 */
}
}
if (WINE_D3D9_CAPABLE(gl_info))
{
/* Radeon R7xx HD4800 - highend */
{
return CARD_ATI_RADEON_HD4800;
}
/* Radeon R740 HD4700 - midend */
{
*vidmem = 512;
return CARD_ATI_RADEON_HD4700;
}
/* Radeon R730 HD4600 - midend */
{
*vidmem = 512;
return CARD_ATI_RADEON_HD4600;
}
{
*vidmem = 256;
return CARD_ATI_RADEON_HD4350;
}
{
return CARD_ATI_RADEON_HD2900;
}
{
return CARD_ATI_RADEON_HD2600;
}
{
return CARD_ATI_RADEON_HD2350;
}
{
return CARD_ATI_RADEON_HD3200;
}
}
if (WINE_D3D8_CAPABLE(gl_info))
{
return CARD_ATI_RADEON_8500; /* Radeon 8500/9000/9100/9200/9300 */
}
if (WINE_D3D7_CAPABLE(gl_info))
{
return CARD_ATI_RADEON_7200; /* Radeon 7000/7100/7200/7500 */
}
return CARD_ATI_RAGE_128PRO;
}
const char *gl_renderer, unsigned int *vidmem)
{
#ifndef VBOX_WITH_WDDM
#else
/* tmp work around to disable quirk_no_np2 quirk for mesa drivers */
#endif
return CARD_NVIDIA_RIVA_128;
}
const char *gl_renderer, unsigned int *vidmem)
{
return CARD_INTEL_I915G;
}
struct vendor_card_selection
{
enum wined3d_gl_vendor gl_vendor;
const char *description; /* Description of the card selector i.e. Apple OS/X Intel */
enum wined3d_pci_device (*select_card)(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
unsigned int *vidmem );
};
static const struct vendor_card_selection vendor_card_select_table[] =
{
};
static enum wined3d_pci_device wined3d_guess_card(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
{
/* Above is a list of Nvidia and ATI GPUs. Both vendors have dozens of
* different GPUs with roughly the same features. In most cases GPUs from a
* certain family differ in clockspeeds, the amount of video memory and the
* number of shader pipelines.
*
* A Direct3D device object contains the PCI id (vendor + device) of the
* videocard which is used for rendering. Various applications use this
* information to get a rough estimation of the features of the card and
* some might use it for enabling 3d effects only on certain types of
* videocards. In some cases games might even use it to work around bugs
* which happen on certain videocards/driver combinations. The problem is
* that OpenGL only exposes a rendering string containing the name of the
* videocard and not the PCI id.
*
* Various games depend on the PCI id, so somehow we need to provide one.
* A simple option is to parse the renderer string and translate this to
* the right PCI id. This is a lot of work because there are more than 200
* GPUs just for Nvidia. Various cards share the same renderer string, so
* the amount of code might be 'small' but there are quite a number of
* exceptions which would make this a pain to maintain. Another way would
* be to query the PCI id from the operating system (assuming this is the
* videocard which is used for rendering which is not always the case).
* This would work but it is not very portable. Second it would not work
* well in, let's say, a remote X situation in which the amount of 3d
* features which can be used is limited.
*
* As said most games only use the PCI id to get an indication of the
* capabilities of the card. It doesn't really matter if the given id is
* the correct one if we return the id of a card with similar 3d features.
*
* The code below checks the OpenGL capabilities of a videocard and matches
* that to a certain level of Direct3D functionality. Once a card passes
* the Direct3D9 check, we know that the card (in case of Nvidia) is at
* least a GeforceFX. To give a better estimate we do a basic check on the
* renderer string but if that won't pass we return a default card. This
* way is better than maintaining a full card database as even without a
* full database we can return a card with similar features. Second the
* size of the database can be made quite small because when you know what
* type of 3d functionality a card has, you know to which GPU family the
* GPU must belong. Because of this you only have to check a small part of
* the renderer string to distinguishes between different models from that
* family.
*
* The code also selects a default amount of video memory which we will
* use for an estimation of the amount of free texture memory. In case of
* real D3D the amount of texture memory includes video memory and system
* memory (to be specific AGP memory or in case of PCIE TurboCache /
* HyperMemory). We don't know how much system memory can be addressed by
* the system but we can make a reasonable estimation about the amount of
* video memory. If the value is slightly wrong it doesn't matter as we
* didn't include AGP-like memory which makes the amount of addressable
* memory higher and second OpenGL isn't that critical it moves to system
* memory behind our backs if really needed. Note that the amount of video
* memory can be overruled using a registry setting. */
int i;
for (i = 0; i < (sizeof(vendor_card_select_table) / sizeof(*vendor_card_select_table)); ++i)
{
continue;
}
*gl_vendor, *card_vendor);
/* Default to generic Nvidia hardware based on the supported OpenGL extensions. The choice
* for Nvidia was because the hardware and drivers they make are of good quality. This makes
* them a good generic choice. */
#ifndef VBOX_WITH_WDDM
#else
/* tmp work around to disable quirk_no_np2 quirk for not-recognized drivers */
#endif
return CARD_NVIDIA_RIVA_128;
}
static const struct fragment_pipeline *select_fragment_implementation(struct wined3d_adapter *adapter)
{
else return &ffp_fragment_pipeline;
}
{
if (vs_selected_mode == SHADER_GLSL || ps_selected_mode == SHADER_GLSL) return &glsl_shader_backend;
if (vs_selected_mode == SHADER_ARB || ps_selected_mode == SHADER_ARB) return &arb_program_shader_backend;
return &none_shader_backend;
}
{
else return &ffp_blit;
}
/* Context activation is done by the caller. */
{
const char *GL_Extensions = NULL;
const char *WGL_Extensions = NULL;
struct fragment_caps fragment_caps;
enum wined3d_gl_vendor gl_vendor;
enum wined3d_pci_device device;
unsigned i;
unsigned int vidmem=0;
ENTER_GL();
if (!gl_renderer_str)
{
LEAVE_GL();
return FALSE;
}
if (!gl_vendor_str)
{
LEAVE_GL();
return FALSE;
}
/* Parse the GL_VERSION field into major and minor information */
if (!gl_version_str)
{
LEAVE_GL();
return FALSE;
}
/*
* Initialize openGL extension related variables
* with Default values
*/
gl_info->limits.combined_samplers = gl_info->limits.fragment_samplers + gl_info->limits.vertex_samplers;
/* Retrieve opengl defaults */
/* Parse the gl supported features, in theory enabling parts of our code appropriately. */
if (!GL_Extensions)
{
LEAVE_GL();
return FALSE;
}
LEAVE_GL();
while (*GL_Extensions)
{
const char *start;
char current_ext[256];
for (i = 0; i < (sizeof(EXTENSION_MAP) / sizeof(*EXTENSION_MAP)); ++i)
{
{
break;
}
}
}
/* Now work out what GL support this card really has */
{ \
}
ENTER_GL();
/* Now mark all the extensions supported which are included in the opengl core version. Do this *after*
* loading the functions, otherwise the code above will load the extension entry points instead of the
* core functions, which may not work. */
for (i = 0; i < (sizeof(EXTENSION_MAP) / sizeof(*EXTENSION_MAP)); ++i)
{
{
}
}
{
/* GL_NV_fence and GL_APPLE_fence provide the same functionality basically.
* The apple extension interacts with some other apple exts. Disable the NV
* extension if the apple one is support to prevent confusion in other parts
* of the code. */
}
{
/* GL_APPLE_float_pixels == GL_ARB_texture_float + GL_ARB_half_float_pixel
*
* The enums are the same:
* GL_RGBA16F_ARB = GL_RGBA_FLOAT16_APPLE = 0x881A
* GL_RGB16F_ARB = GL_RGB_FLOAT16_APPLE = 0x881B
* GL_RGBA32F_ARB = GL_RGBA_FLOAT32_APPLE = 0x8814
* GL_RGB32F_ARB = GL_RGB_FLOAT32_APPLE = 0x8815
* GL_HALF_FLOAT_ARB = GL_HALF_APPLE = 0x140B
*/
{
}
{
}
}
{
/* GL_ARB_map_buffer_range and GL_APPLE_flush_buffer_range provide the same
* functionality. Prefer the ARB extension */
}
{
}
{
}
{
}
{
{
/* Also disable ATI_FRAGMENT_SHADER if register combiners and texture_shader2
* are supported. The nv extensions provide the same functionality as the
* ATI one, and a bit more(signed pixelformats). */
}
}
{
}
{
}
{
{
}
else
{
}
{
/* Loading GLSL sampler uniforms is much simpler if we can assume that the sampler setup
* is known at shader link time. In a vertex shader + pixel shader combination this isn't
* an issue because then the sampler setup only depends on the two shaders. If a pixel
* shader is used with fixed function vertex processing we're fine too because fixed function
* vertex processing doesn't use any samplers. If fixed function fragment processing is
* used we have to make sure that all vertex sampler setups are valid together with all
* possible fixed function fragment processing setups. This is true if vsamplers + MAX_TEXTURES
* <= max_samplers. This is true on all d3d9 cards that support vtf(gf 6 and gf7 cards).
* dx9 radeon cards do not support vertex texture fetch. DX10 cards have 128 samplers, and
* dx9 is limited to 8 fixed function texture stages and 4 vertex samplers. DX10 does not have
* a fixed function pipeline anymore.
*
* So this is just a check to check that our assumption holds true. If not, write a warning
* and reduce the number of vertex samplers or probably disable vertex texture fetch. */
{
FIXME("OpenGL implementation supports %u vertex samplers and %u total samplers.\n",
FIXME("Expected vertex samplers + MAX_TEXTURES(=8) > combined_samplers.\n");
else
}
}
else
{
}
}
{
}
{
}
{
}
{
TRACE_(d3d_caps)("Max ARB_FRAGMENT_PROGRAM float constants: %d.\n", gl_info->limits.arb_ps_float_constants);
GL_EXTCALL(glGetProgramivARB(GL_FRAGMENT_PROGRAM_ARB, GL_MAX_PROGRAM_NATIVE_PARAMETERS_ARB, &gl_max));
GL_EXTCALL(glGetProgramivARB(GL_FRAGMENT_PROGRAM_ARB, GL_MAX_PROGRAM_NATIVE_TEMPORARIES_ARB, &gl_max));
TRACE_(d3d_caps)("Max ARB_FRAGMENT_PROGRAM native temporaries: %d.\n", gl_info->limits.arb_ps_temps);
GL_EXTCALL(glGetProgramivARB(GL_FRAGMENT_PROGRAM_ARB, GL_MAX_PROGRAM_NATIVE_INSTRUCTIONS_ARB, &gl_max));
TRACE_(d3d_caps)("Max ARB_FRAGMENT_PROGRAM native instructions: %d.\n", gl_info->limits.arb_ps_instructions);
GL_EXTCALL(glGetProgramivARB(GL_FRAGMENT_PROGRAM_ARB, GL_MAX_PROGRAM_LOCAL_PARAMETERS_ARB, &gl_max));
TRACE_(d3d_caps)("Max ARB_FRAGMENT_PROGRAM local parameters: %d.\n", gl_info->limits.arb_ps_instructions);
}
{
TRACE_(d3d_caps)("Max ARB_VERTEX_PROGRAM float constants: %d.\n", gl_info->limits.arb_vs_float_constants);
GL_EXTCALL(glGetProgramivARB(GL_VERTEX_PROGRAM_ARB, GL_MAX_PROGRAM_NATIVE_PARAMETERS_ARB, &gl_max));
GL_EXTCALL(glGetProgramivARB(GL_VERTEX_PROGRAM_ARB, GL_MAX_PROGRAM_NATIVE_TEMPORARIES_ARB, &gl_max));
GL_EXTCALL(glGetProgramivARB(GL_VERTEX_PROGRAM_ARB, GL_MAX_PROGRAM_NATIVE_INSTRUCTIONS_ARB, &gl_max));
TRACE_(d3d_caps)("Max ARB_VERTEX_PROGRAM native instructions: %d.\n", gl_info->limits.arb_vs_instructions);
}
{
#ifdef VBOX_WITH_WDDM
/* AFAICT the " / 4" here comes from that we're going to use the glsl_vs/ps_float_constants to create vec4 arrays,
* thus each array element has 4 components, so the actual number of vec4 arrays is GL_MAX_VERTEX/FRAGMENT_UNIFORM_COMPONENTS_ARB / 4
* win8 Aero won't properly work with this constant < 256 in any way,
* while Intel drivers I've encountered this problem with supports vec4 arrays of size > GL_MAX_VERTEX/FRAGMENT_UNIFORM_COMPONENTS_ARB / 4
* so use it here.
* @todo: add logging
* @todo: perhaps should be movet to quirks?
* */
{
/* tmp workaround Win8 Aero requirement for 256 */
{
}
}
#endif
TRACE_(d3d_caps)("Max ARB_VERTEX_SHADER float constants: %u.\n", gl_info->limits.glsl_vs_float_constants);
}
{
#ifdef VBOX_WITH_WDDM
/* AFAICT the " / 4" here comes from that we're going to use the glsl_vs/ps_float_constants to create vec4 arrays,
* thus each array element has 4 components, so the actual number of vec4 arrays is GL_MAX_VERTEX/FRAGMENT_UNIFORM_COMPONENTS_ARB / 4
* win8 Aero won't properly work with this constant < 256 in any way,
* while Intel drivers I've encountered this problem with supports vec4 arrays of size > GL_MAX_VERTEX/FRAGMENT_UNIFORM_COMPONENTS_ARB / 4
* so use it here.
* @todo: add logging
* @todo: perhaps should be movet to quirks?
* */
{
/* tmp workaround Win8 Aero requirement for 256 */
{
}
}
#endif
TRACE_(d3d_caps)("Max ARB_FRAGMENT_SHADER float constants: %u.\n", gl_info->limits.glsl_ps_float_constants);
}
{
/* The format of the GLSL version string is "major.minor[.release] [vendor info]". */
}
{
}
else
{
}
{
/* If we have full NP2 texture support, disable
* GL_ARB_texture_rectangle because we will never use it.
* This saves a few redundant glDisable calls. */
}
{
/* Disable NV_register_combiners and fragment shader if this is supported.
* generally the NV extensions are preferred over the ATI ones, and this
* extension is disabled if register_combiners and texture_shader2 are both
* supported. So we reach this place only if we have incomplete NV dxlevel 8
* fragment processing support. */
}
{
/* GL_ARB_half_float_vertex is a subset of GL_NV_half_float. */
}
{
}
else
{
}
checkGLcall("extension detection");
LEAVE_GL();
/* In some cases the number of texture stages can be larger than the number
* of samplers. The GF4 for example can use only 2 samplers (no fragment
* shaders), but 8 texture stages (register combiners). */
gl_info->limits.sampler_stages = max(gl_info->limits.fragment_samplers, gl_info->limits.texture_stages);
{
gl_info->fbo_ops.glGetFramebufferAttachmentParameteriv = gl_info->glGetFramebufferAttachmentParameteriv;
}
else
{
{
gl_info->fbo_ops.glGetFramebufferAttachmentParameteriv = gl_info->glGetFramebufferAttachmentParameterivEXT;
}
{
WARN_(d3d_caps)("Framebuffer objects not supported, falling back to backbuffer offscreen rendering mode.\n");
}
{
}
{
}
}
/* MRTs are currently only supported when FBOs are used. */
{
}
TRACE_(d3d_caps)("found GL_VENDOR (%s)->(0x%04x/0x%04x)\n", debugstr_a(gl_vendor_str), gl_vendor, card_vendor);
/* If we have an estimate use it, else default to 64MB; */
if(vidmem)
else
/* Make sure there's an active HDC else the WGL extensions will fail */
hdc = pwglGetCurrentDC();
if (hdc) {
/* Not all GL drivers might offer WGL extensions e.g. VirtualBox */
if (NULL == WGL_Extensions) {
ERR(" WGL_Extensions returns NULL\n");
} else {
while (*WGL_Extensions != 0x00) {
const char *Start;
char ThisExtn[256];
}
continue;
}
}
}
}
}
return TRUE;
}
/**********************************************************
* IWineD3D implementation follows
**********************************************************/
return This->adapter_count;
}
{
return WINED3D_OK;
}
return NULL;
}
}
/* FIXME: GetAdapterModeCount and EnumAdapterModes currently only returns modes
of the same bpp but different resolutions */
/* Note: dx9 supplies a format. Calls from d3d8 supply WINED3DFMT_UNKNOWN */
static UINT WINAPI IWineD3DImpl_GetAdapterModeCount(IWineD3D *iface, UINT Adapter, WINED3DFORMAT Format) {
return 0;
}
/* TODO: Store modes per adapter and read it from the adapter structure */
if (Adapter == 0) { /* Display */
const struct wined3d_format_desc *format_desc = getFormatDescEntry(Format, &This->adapters[Adapter].gl_info);
unsigned int i = 0;
unsigned int j = 0;
{
++j;
if (Format == WINED3DFMT_UNKNOWN)
{
/* This is for D3D8, do not enumerate P8 here */
}
{
++i;
}
}
return i;
} else {
}
return 0;
}
/* Note: dx9 supplies a format. Calls from d3d8 supply WINED3DFMT_UNKNOWN */
static HRESULT WINAPI IWineD3DImpl_EnumAdapterModes(IWineD3D *iface, UINT Adapter, WINED3DFORMAT Format, UINT Mode, WINED3DDISPLAYMODE* pMode) {
TRACE_(d3d_caps)("(%p}->(Adapter:%d, mode:%d, pMode:%p, format:%s)\n", This, Adapter, Mode, pMode, debug_d3dformat(Format));
/* Validate the parameters as much as possible */
return WINED3DERR_INVALIDCALL;
}
/* TODO: Store modes per adapter and read it from the adapter structure */
if (Adapter == 0)
{
const struct wined3d_format_desc *format_desc = getFormatDescEntry(Format, &This->adapters[Adapter].gl_info);
int ModeIdx = 0;
UINT i = 0;
int j = 0;
/* If we are filtering to a specific format (D3D9), then need to skip
all unrelated modes, but if mode is irrelevant (D3D8), then we can
just count through the ones with valid bit depths */
{
if (Format == WINED3DFMT_UNKNOWN)
{
/* This is for D3D8, do not enumerate P8 here */
}
{
++i;
}
}
if (i == 0) {
return WINED3DERR_INVALIDCALL;
}
ModeIdx = j - 1;
/* Now get the display mode via the calculated index */
if (Format == WINED3DFMT_UNKNOWN) {
} else {
}
} else {
return WINED3DERR_INVALIDCALL;
}
}
else
{
}
return WINED3D_OK;
}
static HRESULT WINAPI IWineD3DImpl_GetAdapterDisplayMode(IWineD3D *iface, UINT Adapter, WINED3DDISPLAYMODE *pMode)
{
return WINED3DERR_INVALIDCALL;
}
if (Adapter == 0) { /* Display */
int bpp = 0;
{
}
} else {
}
return WINED3D_OK;
}
{
TRACE("iface %p, adapter_idx %u, display_mode %p, display_rotation %p.\n", iface, Adapter, pMode, pRotation);
{
return WINED3D_OK;
}
{
return WINED3DERR_INVALIDCALL;
}
{
return WINED3DERR_INVALIDCALL;
}
if (Adapter == 0) { /* Display */
int bpp = 0;
if (pMode)
{
{
}
{
#if 0 //defined(RT_ARCH_AMD64) && !defined(VBOX_WITH_WDDM)
# ifndef DM_INTERLACED
# define DM_INTERLACED 0x00000002
# endif
#else
#endif
{
}
}
}
if (pRotation)
{
{
#if 0 //defined(RT_ARCH_AMD64) && !defined(VBOX_WITH_WDDM)
switch (DevModeW.dmDisplayOrientation)
#else
#endif
{
case DMDO_DEFAULT:
break;
case DMDO_90:
break;
case DMDO_180:
break;
case DMDO_270:
break;
default:
#if 0 //defined(RT_ARCH_AMD64) && !defined(VBOX_WITH_WDDM)
#else
#endif
break;
}
}
}
} else {
}
return WINED3D_OK;
}
/* NOTE: due to structure differences between dx8 and dx9 D3DADAPTER_IDENTIFIER,
and fields being inserted in the middle, a new structure is used in place */
struct wined3d_adapter *adapter;
return WINED3DERR_INVALIDCALL;
}
/* Return the information requested */
if (pIdentifier->driver_size)
{
}
if (pIdentifier->description_size)
{
}
/* Note that d3d8 doesn't supply a device name. */
if (pIdentifier->device_name_size)
{
{
ERR("Device name size too small.\n");
return WINED3DERR_INVALIDCALL;
}
}
pIdentifier->subsystem_id = 0;
pIdentifier->revision = 0;
memcpy(&pIdentifier->device_identifier, &IID_D3DDEVICE_D3DUID, sizeof(pIdentifier->device_identifier));
return WINED3D_OK;
}
static BOOL IWineD3DImpl_IsPixelFormatCompatibleWithRenderFmt(const struct wined3d_gl_info *gl_info,
{
if(!cfg)
return FALSE;
/* Float formats need FBOs. If FBOs are used this function isn't called */
{
return FALSE;
}
return FALSE;
return FALSE;
return FALSE;
return FALSE;
return TRUE;
}
/* Probably a RGBA_float or color index mode */
return FALSE;
}
{
short depthSize, stencilSize;
if(!cfg)
return FALSE;
{
return FALSE;
}
/* Float formats need FBOs. If FBOs are used this function isn't called */
if ((format_desc->format == WINED3DFMT_D16_LOCKABLE) || (format_desc->format == WINED3DFMT_D32_FLOAT))
/* On some modern cards like the Geforce8/9 GLX doesn't offer some dephthstencil formats which D3D9 reports.
* We can safely report 'compatible' formats (e.g. D24 can be used for D16) as long as we aren't dealing with
* a lockable format. This also helps D3D <= 7 as they expect D16 which isn't offered without this on Geforce8 cards. */
return FALSE;
/* Some cards like Intel i915 ones only offer D24S8 but lots of games also need a format without stencil, so
* allow more stencil bits than requested. */
return FALSE;
return TRUE;
}
static HRESULT WINAPI IWineD3DImpl_CheckDepthStencilMatch(IWineD3D *iface, UINT Adapter, WINED3DDEVTYPE DeviceType,
int nCfgs;
const WineD3D_PixelFormat *cfgs;
const struct wined3d_adapter *adapter;
const struct wined3d_format_desc *rt_format_desc;
const struct wined3d_format_desc *ds_format_desc;
int it;
WARN_(d3d_caps)("(%p)-> (STUB) (Adptr:%d, DevType:(%x,%s), AdptFmt:(%x,%s), RendrTgtFmt:(%x,%s), DepthStencilFmt:(%x,%s))\n",
TRACE("(%p) Failed: Atapter (%u) higher than supported adapters (%u) returning WINED3DERR_INVALIDCALL\n", This, Adapter, IWineD3D_GetAdapterCount(iface));
return WINED3DERR_INVALIDCALL;
}
{
return WINED3D_OK;
}
}
else
{
if (IWineD3DImpl_IsPixelFormatCompatibleWithRenderFmt(&adapter->gl_info, &cfgs[it], rt_format_desc))
{
{
return WINED3D_OK;
}
}
}
}
WARN_(d3d_caps)("unsupported format pair: %s and %s\n", debug_d3dformat(RenderTargetFormat), debug_d3dformat(DepthStencilFormat));
return WINED3DERR_NOTAVAILABLE;
}
static HRESULT WINAPI IWineD3DImpl_CheckDeviceMultiSampleType(IWineD3D *iface, UINT Adapter, WINED3DDEVTYPE DeviceType,
WINED3DFORMAT SurfaceFormat, BOOL Windowed, WINED3DMULTISAMPLE_TYPE MultiSampleType, DWORD *pQualityLevels)
{
const struct wined3d_format_desc *glDesc;
const struct wined3d_adapter *adapter;
TRACE_(d3d_caps)("(%p)-> (Adptr:%d, DevType:(%x,%s), SurfFmt:(%x,%s), Win?%d, MultiSamp:%x, pQual:%p)\n",
This,
return WINED3DERR_INVALIDCALL;
}
/* TODO: handle Windowed, add more quality levels */
if (WINED3DMULTISAMPLE_NONE == MultiSampleType) {
return WINED3D_OK;
}
/* By default multisampling is disabled right now as it causes issues
* on some Nvidia driver versions and it doesn't work well in combination
* with FBOs yet. */
return WINED3DERR_NOTAVAILABLE;
if (!glDesc) return WINED3DERR_INVALIDCALL;
int i, nCfgs;
const WineD3D_PixelFormat *cfgs;
for(i=0; i<nCfgs; i++) {
continue;
continue;
TRACE("Found iPixelFormat=%d to support MultiSampleType=%d for format %s\n", cfgs[i].iPixelFormat, MultiSampleType, debug_d3dformat(SurfaceFormat));
if(pQualityLevels)
return WINED3D_OK;
}
}
int i, nCfgs;
const WineD3D_PixelFormat *cfgs;
{
return WINED3DERR_NOTAVAILABLE;
}
for(i=0; i<nCfgs; i++) {
continue;
continue;
continue;
continue;
/* Not all drivers report alpha-less formats since they use 32-bit anyway, so accept alpha even if we didn't ask for it. */
continue;
continue;
TRACE("Found iPixelFormat=%d to support MultiSampleType=%d for format %s\n", cfgs[i].iPixelFormat, MultiSampleType, debug_d3dformat(SurfaceFormat));
if(pQualityLevels)
return WINED3D_OK;
}
}
return WINED3DERR_NOTAVAILABLE;
}
static HRESULT WINAPI IWineD3DImpl_CheckDeviceType(IWineD3D *iface, UINT Adapter, WINED3DDEVTYPE DeviceType,
{
TRACE("iface %p, adapter_idx %u, device_type %s, display_format %s, backbuffer_format %s, windowed %#x.\n",
return WINED3DERR_INVALIDCALL;
}
/* The task of this function is to check whether a certain display / backbuffer format
* combination is available on the given adapter. In fullscreen mode microsoft specified
* that the display format shouldn't provide alpha and that ignoring alpha the backbuffer
* and display format should match exactly.
* In windowed mode format conversion can occur and this depends on the driver. When format
* conversion is done, this function should nevertheless fail and applications need to use
* CheckDeviceFormatConversion.
* At the moment we assume that fullscreen and windowed have the same capabilities */
/* There are only 4 display formats */
if (!(DisplayFormat == WINED3DFMT_B5G6R5_UNORM
{
return WINED3DERR_NOTAVAILABLE;
}
/* If the requested DisplayFormat is not available, don't continue */
if(!nmodes) {
return WINED3DERR_NOTAVAILABLE;
}
/* Windowed mode allows you to specify WINED3DFMT_UNKNOWN for the backbufferformat, it means 'reuse' the display format for the backbuffer */
return WINED3DERR_NOTAVAILABLE;
}
/* In FULLSCREEN mode R5G6B5 can only be mixed with backbuffer format R5G6B5 */
{
TRACE_(d3d_caps)("Unsupported display/backbuffer format combination %s/%s\n", debug_d3dformat(DisplayFormat), debug_d3dformat(BackBufferFormat));
return WINED3DERR_NOTAVAILABLE;
}
/* In FULLSCREEN mode X1R5G5B5 can only be mixed with backbuffer format *1R5G5B5 */
&& !(BackBufferFormat == WINED3DFMT_B5G5R5X1_UNORM || BackBufferFormat == WINED3DFMT_B5G5R5A1_UNORM))
{
TRACE_(d3d_caps)("Unsupported display/backbuffer format combination %s/%s\n", debug_d3dformat(DisplayFormat), debug_d3dformat(BackBufferFormat));
return WINED3DERR_NOTAVAILABLE;
}
/* In FULLSCREEN mode X8R8G8B8 can only be mixed with backbuffer format *8R8G8B8 */
&& !(BackBufferFormat == WINED3DFMT_B8G8R8X8_UNORM || BackBufferFormat == WINED3DFMT_B8G8R8A8_UNORM))
{
TRACE_(d3d_caps)("Unsupported display/backbuffer format combination %s/%s\n", debug_d3dformat(DisplayFormat), debug_d3dformat(BackBufferFormat));
return WINED3DERR_NOTAVAILABLE;
}
/* A2R10G10B10 is only allowed in fullscreen mode and it can only be mixed with backbuffer format A2R10G10B10 */
{
TRACE_(d3d_caps)("Unsupported display/backbuffer format combination %s/%s\n", debug_d3dformat(DisplayFormat), debug_d3dformat(BackBufferFormat));
return WINED3DERR_NOTAVAILABLE;
}
/* Use CheckDeviceFormat to see if the BackBufferFormat is usable with the given DisplayFormat */
hr = IWineD3DImpl_CheckDeviceFormat(iface, Adapter, DeviceType, DisplayFormat, WINED3DUSAGE_RENDERTARGET, WINED3DRTYPE_SURFACE, BackBufferFormat, SURFACE_OPENGL);
TRACE_(d3d_caps)("Unsupported display/backbuffer format combination %s/%s\n", debug_d3dformat(DisplayFormat), debug_d3dformat(BackBufferFormat));
return hr;
}
/* Check if we support bumpmapping for a format */
{
switch(format_desc->format)
{
case WINED3DFMT_R8G8_SNORM:
case WINED3DFMT_R16G16_SNORM:
/* Ask the fixed function pipeline implementation if it can deal
* with the conversion. If we've got a GL extension giving native
* support this will be an identity conversion. */
{
return TRUE;
}
return FALSE;
default:
return FALSE;
}
}
/* Check if the given DisplayFormat + DepthStencilFormat combination is valid for the Adapter */
const struct wined3d_format_desc *display_format_desc, const struct wined3d_format_desc *ds_format_desc)
{
int it=0;
{
/* With FBOs WGL limitations do not apply, but the format needs to be FBO attachable */
}
else
{
/* Walk through all WGL pixel formats to find a match */
{
{
{
return TRUE;
}
}
}
}
return FALSE;
}
static BOOL CheckFilterCapability(struct wined3d_adapter *adapter, const struct wined3d_format_desc *format_desc)
{
/* The flags entry of a format contains the filtering capability */
return FALSE;
}
/* Check the render target capabilities of a format */
const struct wined3d_format_desc *adapter_format_desc, const struct wined3d_format_desc *check_format_desc)
{
/* Filter out non-RT formats */
int it;
getColorBits(adapter_format_desc, &AdapterRed, &AdapterGreen, &AdapterBlue, &AdapterAlpha, &AdapterTotalSize);
/* In backbuffer mode the front and backbuffer share the same WGL pixelformat.
* The format must match in RGB, alpha is allowed to be different. (Only the backbuffer can have alpha) */
return FALSE;
}
/* Check if there is a WGL pixel format matching the requirements, the format should also be window
* drawable (not offscreen; e.g. Nvidia offers R5G6B5 for pbuffers even when X is running at 24bit) */
{
{
return TRUE;
}
}
}
{
/* For now return TRUE for FBOs until we have some proper checks.
* Note that this function will only be called when the format is around for texturing. */
return TRUE;
}
return FALSE;
}
static BOOL CheckSrgbReadCapability(struct wined3d_adapter *adapter, const struct wined3d_format_desc *format_desc)
{
/* Check for supported sRGB formats (Texture loading and framebuffer) */
{
return FALSE;
}
switch (format_desc->format)
{
case WINED3DFMT_L8_UNORM:
case WINED3DFMT_L8A8_UNORM:
case WINED3DFMT_DXT1:
case WINED3DFMT_DXT2:
case WINED3DFMT_DXT3:
case WINED3DFMT_DXT4:
case WINED3DFMT_DXT5:
return TRUE;
default:
TRACE_(d3d_caps)("[FAILED] Gamma texture format %s not supported.\n", debug_d3dformat(format_desc->format));
return FALSE;
}
return FALSE;
}
{
* doing the color fixup in shaders.
* Note Windows drivers (at least on the Geforce 8800) also offer this on R5G6B5. */
if ((format_desc->format == WINED3DFMT_B8G8R8X8_UNORM) || (format_desc->format == WINED3DFMT_B8G8R8A8_UNORM))
{
int vs_selected_mode;
int ps_selected_mode;
return TRUE;
}
}
TRACE_(d3d_caps)("[FAILED] - no SRGB writing support on format=%s\n", debug_d3dformat(format_desc->format));
return FALSE;
}
/* Check if a format support blending in combination with pixel shaders */
const struct wined3d_format_desc *format_desc)
{
/* The flags entry of a format contains the post pixel shader blending capability */
return FALSE;
}
static BOOL CheckWrapAndMipCapability(struct wined3d_adapter *adapter, const struct wined3d_format_desc *format_desc)
{
/* OpenGL supports mipmapping on all formats basically. Wrapping is unsupported,
* but we have to report mipmapping so we cannot reject this flag. Tests show that
* windows reports WRAPANDMIP on unfilterable surfaces as well, apparently to show
* that wrapping is supported. The lack of filtering will sort out the mipmapping
* capability anyway.
*
* For now lets report this on all formats, but in the future we may want to
* restrict it to some should games need that
*/
return TRUE;
}
/* Check if a texture format is supported on the given adapter */
{
switch (format_desc->format)
{
/*****
* supported: RGB(A) formats
*/
case WINED3DFMT_B8G8R8_UNORM: /* Enable for dx7, blacklisted for 8 and 9 above */
case WINED3DFMT_B5G6R5_UNORM:
case WINED3DFMT_A8_UNORM:
case WINED3DFMT_R16G16_UNORM:
return TRUE;
case WINED3DFMT_B2G3R3_UNORM:
return FALSE;
/*****
* Not supported: Palettized
* Since it is not widely available, don't offer it. Further no Windows driver offers
* WINED3DFMT_P8_UINT_A8_NORM, so don't offer it either.
*/
case WINED3DFMT_P8_UINT:
return FALSE;
/*****
* Supported: (Alpha)-Luminance
*/
case WINED3DFMT_L8_UNORM:
case WINED3DFMT_L8A8_UNORM:
case WINED3DFMT_L16_UNORM:
return TRUE;
/* Not supported on Windows, thus disabled */
case WINED3DFMT_L4A4_UNORM:
return FALSE;
/*****
*/
case WINED3DFMT_D16_LOCKABLE:
case WINED3DFMT_D16_UNORM:
case WINED3DFMT_X8D24_UNORM:
case WINED3DFMT_D32_UNORM:
case WINED3DFMT_D32_FLOAT:
return TRUE;
/*****
* Not supported everywhere(depends on GL_ATI_envmap_bumpmap or
* GL_NV_texture_shader). Emulated by shaders
*/
case WINED3DFMT_R8G8_SNORM:
case WINED3DFMT_R16G16_SNORM:
/* Ask the shader backend if it can deal with the conversion. If
* we've got a GL extension giving native support this will be an
* identity conversion. */
{
return TRUE;
}
return FALSE;
case WINED3DFMT_DXT1:
case WINED3DFMT_DXT2:
case WINED3DFMT_DXT3:
case WINED3DFMT_DXT4:
case WINED3DFMT_DXT5:
{
return TRUE;
}
return FALSE;
/*****
* Odd formats - not supported
*/
case WINED3DFMT_VERTEXDATA:
case WINED3DFMT_R16_UINT:
case WINED3DFMT_R32_UINT:
return FALSE;
/*****
* WINED3DFMT_R8G8_SNORM_Cx: Not supported right now
*/
case WINED3DFMT_R8G8_SNORM_Cx:
return FALSE;
/* YUV formats */
case WINED3DFMT_UYVY:
case WINED3DFMT_YUY2:
{
return TRUE;
}
return FALSE;
case WINED3DFMT_YV12:
return FALSE;
/* Not supported */
return FALSE;
/* Floating point formats */
case WINED3DFMT_R16_FLOAT:
case WINED3DFMT_R16G16_FLOAT:
{
return TRUE;
}
return FALSE;
case WINED3DFMT_R32_FLOAT:
case WINED3DFMT_R32G32_FLOAT:
{
return TRUE;
}
return FALSE;
/* ATI instancing hack: Although ATI cards do not support Shader Model 3.0, they support
* instancing. To query if the card supports instancing CheckDeviceFormat with the special format
* MAKEFOURCC('I','N','S','T') is used. Should a (broken) app check for this provide a proper return value.
* We can do instancing with all shader versions, but we need vertex shaders.
*
* Additionally applications have to set the D3DRS_POINTSIZE render state to MAKEFOURCC('I','N','S','T') once
* to enable instancing. WineD3D doesn't need that and just ignores it.
*
* With Shader Model 3.0 capable cards Instancing 'just works' in Windows.
*/
case WINED3DFMT_INST:
TRACE("ATI Instancing check hack\n");
{
return TRUE;
}
return FALSE;
/* Some weird FOURCC formats */
case WINED3DFMT_R8G8_B8G8:
case WINED3DFMT_G8R8_G8B8:
case WINED3DFMT_MULTI2_ARGB8:
return FALSE;
/* Vendor specific formats */
case WINED3DFMT_ATI2N:
{
{
return TRUE;
}
return TRUE;
}
return FALSE;
case WINED3DFMT_NVHU:
case WINED3DFMT_NVHS:
/* These formats seem to be similar to the HILO formats in GL_NV_texture_shader. NVHU
* is said to be GL_UNSIGNED_HILO16, NVHS GL_SIGNED_HILO16. Rumours say that d3d computes
* a 3rd channel similarly to D3DFMT_CxV8U8(So NVHS could be called D3DFMT_CxV16U16).
* ATI refused to support formats which can easilly be emulated with pixel shaders, so
* Applications have to deal with not having NVHS and NVHU.
*/
return FALSE;
case WINED3DFMT_UNKNOWN:
return FALSE;
default:
break;
}
return FALSE;
}
const struct wined3d_format_desc *adapter_format_desc,
{
if(SurfaceType == SURFACE_GDI) {
switch(check_format_desc->format)
{
case WINED3DFMT_B8G8R8_UNORM:
case WINED3DFMT_B5G6R5_UNORM:
case WINED3DFMT_B2G3R3_UNORM:
case WINED3DFMT_A8_UNORM:
case WINED3DFMT_R16G16_UNORM:
case WINED3DFMT_P8_UINT:
return TRUE;
default:
return FALSE;
}
}
/* All format that are supported for textures are supported for surfaces as well */
/* All depth stencil formats are supported on surfaces */
/* If opengl can't process the format natively, the blitter may be able to convert it */
{
return TRUE;
}
/* Reject other formats */
return FALSE;
}
const struct wined3d_format_desc *format_desc)
{
{
return FALSE;
}
switch (format_desc->format)
{
{
return FALSE;
}
return TRUE;
default:
return FALSE;
}
return FALSE;
}
static HRESULT WINAPI IWineD3DImpl_CheckDeviceFormat(IWineD3D *iface, UINT Adapter, WINED3DDEVTYPE DeviceType,
{
TRACE_(d3d_caps)("(%p)-> (STUB) (Adptr:%d, DevType:(%u,%s), AdptFmt:(%u,%s), Use:(%u,%s,%s), ResTyp:(%x,%s), CheckFmt:(%u,%s))\n",
This,
return WINED3DERR_INVALIDCALL;
}
if(RType == WINED3DRTYPE_CUBETEXTURE) {
if(SurfaceType != SURFACE_OPENGL) {
TRACE("[FAILED]\n");
return WINED3DERR_NOTAVAILABLE;
}
/* Cubetexture allows:
* - D3DUSAGE_AUTOGENMIPMAP
* - D3DUSAGE_DEPTHSTENCIL
* - D3DUSAGE_DYNAMIC
* - D3DUSAGE_NONSECURE (d3d9ex)
* - D3DUSAGE_RENDERTARGET
* - D3DUSAGE_SOFTWAREPROCESSING
* - D3DUSAGE_QUERY_WRAPANDMIP
*/
{
/* Check if the texture format is around */
{
if(Usage & WINED3DUSAGE_AUTOGENMIPMAP) {
/* Check for automatic mipmap generation support */
{
} else {
/* When autogenmipmap isn't around continue and return WINED3DOK_NOAUTOGEN instead of D3D_OK */
}
}
/* Always report dynamic locking */
if(Usage & WINED3DUSAGE_DYNAMIC)
if(Usage & WINED3DUSAGE_RENDERTARGET) {
{
} else {
return WINED3DERR_NOTAVAILABLE;
}
}
/* Always report software processing */
/* Check QUERY_FILTER support */
if(Usage & WINED3DUSAGE_QUERY_FILTER) {
{
} else {
return WINED3DERR_NOTAVAILABLE;
}
}
/* Check QUERY_POSTPIXELSHADER_BLENDING support */
{
} else {
return WINED3DERR_NOTAVAILABLE;
}
}
/* Check QUERY_SRGBREAD support */
if(Usage & WINED3DUSAGE_QUERY_SRGBREAD) {
{
} else {
return WINED3DERR_NOTAVAILABLE;
}
}
/* Check QUERY_SRGBWRITE support */
if(Usage & WINED3DUSAGE_QUERY_SRGBWRITE) {
{
} else {
return WINED3DERR_NOTAVAILABLE;
}
}
/* Check QUERY_VERTEXTEXTURE support */
{
} else {
return WINED3DERR_NOTAVAILABLE;
}
}
/* Check QUERY_WRAPANDMIP support */
if(Usage & WINED3DUSAGE_QUERY_WRAPANDMIP) {
{
} else {
return WINED3DERR_NOTAVAILABLE;
}
}
} else {
return WINED3DERR_NOTAVAILABLE;
}
} else {
return WINED3DERR_NOTAVAILABLE;
}
} else if(RType == WINED3DRTYPE_SURFACE) {
/* Surface allows:
* - D3DUSAGE_DEPTHSTENCIL
* - D3DUSAGE_NONSECURE (d3d9ex)
* - D3DUSAGE_RENDERTARGET
*/
{
if(Usage & WINED3DUSAGE_DEPTHSTENCIL) {
{
} else {
return WINED3DERR_NOTAVAILABLE;
}
}
if(Usage & WINED3DUSAGE_RENDERTARGET) {
{
} else {
return WINED3DERR_NOTAVAILABLE;
}
}
/* Check QUERY_POSTPIXELSHADER_BLENDING support */
{
} else {
return WINED3DERR_NOTAVAILABLE;
}
}
} else {
return WINED3DERR_NOTAVAILABLE;
}
} else if(RType == WINED3DRTYPE_TEXTURE) {
/* Texture allows:
* - D3DUSAGE_AUTOGENMIPMAP
* - D3DUSAGE_DEPTHSTENCIL
* - D3DUSAGE_DMAP
* - D3DUSAGE_DYNAMIC
* - D3DUSAGE_NONSECURE (d3d9ex)
* - D3DUSAGE_RENDERTARGET
* - D3DUSAGE_SOFTWAREPROCESSING
* - D3DUSAGE_TEXTAPI (d3d9ex)
* - D3DUSAGE_QUERY_WRAPANDMIP
*/
if(SurfaceType != SURFACE_OPENGL) {
TRACE("[FAILED]\n");
return WINED3DERR_NOTAVAILABLE;
}
/* Check if the texture format is around */
{
if(Usage & WINED3DUSAGE_AUTOGENMIPMAP) {
/* Check for automatic mipmap generation support */
{
} else {
/* When autogenmipmap isn't around continue and return WINED3DOK_NOAUTOGEN instead of D3D_OK */
}
}
/* Always report dynamic locking */
if(Usage & WINED3DUSAGE_DYNAMIC)
if(Usage & WINED3DUSAGE_RENDERTARGET) {
{
} else {
return WINED3DERR_NOTAVAILABLE;
}
}
/* Always report software processing */
/* Check QUERY_FILTER support */
if(Usage & WINED3DUSAGE_QUERY_FILTER) {
{
} else {
return WINED3DERR_NOTAVAILABLE;
}
}
/* Check QUERY_LEGACYBUMPMAP support */
{
} else {
return WINED3DERR_NOTAVAILABLE;
}
}
/* Check QUERY_POSTPIXELSHADER_BLENDING support */
{
} else {
return WINED3DERR_NOTAVAILABLE;
}
}
/* Check QUERY_SRGBREAD support */
if(Usage & WINED3DUSAGE_QUERY_SRGBREAD) {
{
} else {
return WINED3DERR_NOTAVAILABLE;
}
}
/* Check QUERY_SRGBWRITE support */
if(Usage & WINED3DUSAGE_QUERY_SRGBWRITE) {
{
} else {
return WINED3DERR_NOTAVAILABLE;
}
}
/* Check QUERY_VERTEXTEXTURE support */
{
} else {
return WINED3DERR_NOTAVAILABLE;
}
}
/* Check QUERY_WRAPANDMIP support */
if(Usage & WINED3DUSAGE_QUERY_WRAPANDMIP) {
{
} else {
return WINED3DERR_NOTAVAILABLE;
}
}
if(Usage & WINED3DUSAGE_DEPTHSTENCIL) {
{
} else {
return WINED3DERR_NOTAVAILABLE;
}
}
} else {
return WINED3DERR_NOTAVAILABLE;
}
/* Volume is to VolumeTexture what Surface is to Texture but its usage caps are not documented.
* Most driver seem to offer (nearly) the same on Volume and VolumeTexture, so do that too.
*
* Volumetexture allows:
* - D3DUSAGE_DYNAMIC
* - D3DUSAGE_NONSECURE (d3d9ex)
* - D3DUSAGE_SOFTWAREPROCESSING
* - D3DUSAGE_QUERY_WRAPANDMIP
*/
if(SurfaceType != SURFACE_OPENGL) {
TRACE("[FAILED]\n");
return WINED3DERR_NOTAVAILABLE;
}
/* Check volume texture and volume usage caps */
{
{
return WINED3DERR_NOTAVAILABLE;
}
/* Always report dynamic locking */
if(Usage & WINED3DUSAGE_DYNAMIC)
/* Always report software processing */
/* Check QUERY_FILTER support */
if(Usage & WINED3DUSAGE_QUERY_FILTER) {
{
} else {
return WINED3DERR_NOTAVAILABLE;
}
}
/* Check QUERY_POSTPIXELSHADER_BLENDING support */
{
} else {
return WINED3DERR_NOTAVAILABLE;
}
}
/* Check QUERY_SRGBREAD support */
if(Usage & WINED3DUSAGE_QUERY_SRGBREAD) {
{
} else {
return WINED3DERR_NOTAVAILABLE;
}
}
/* Check QUERY_SRGBWRITE support */
if(Usage & WINED3DUSAGE_QUERY_SRGBWRITE) {
{
} else {
return WINED3DERR_NOTAVAILABLE;
}
}
/* Check QUERY_VERTEXTEXTURE support */
{
} else {
return WINED3DERR_NOTAVAILABLE;
}
}
/* Check QUERY_WRAPANDMIP support */
if(Usage & WINED3DUSAGE_QUERY_WRAPANDMIP) {
{
} else {
return WINED3DERR_NOTAVAILABLE;
}
}
} else {
return WINED3DERR_NOTAVAILABLE;
}
/* Filter formats that need conversion; For one part, this conversion is unimplemented,
* and volume textures are huge, so it would be a big performance hit. Unless we hit an
* app needing one of those formats, don't advertize them to avoid leading apps into
* temptation. The windows drivers don't support most of those formats on volumes anyway,
* except of R32F.
*/
switch(CheckFormat) {
case WINED3DFMT_P8_UINT:
case WINED3DFMT_L4A4_UNORM:
case WINED3DFMT_R32_FLOAT:
case WINED3DFMT_R16_FLOAT:
case WINED3DFMT_R16G16_UNORM:
return WINED3DERR_NOTAVAILABLE;
case WINED3DFMT_R16G16_SNORM:
{
return WINED3DERR_NOTAVAILABLE;
}
break;
case WINED3DFMT_R8G8_SNORM:
{
return WINED3DERR_NOTAVAILABLE;
}
break;
case WINED3DFMT_DXT1:
case WINED3DFMT_DXT2:
case WINED3DFMT_DXT3:
case WINED3DFMT_DXT4:
case WINED3DFMT_DXT5:
/* The GL_EXT_texture_compression_s3tc spec requires that loading an s3tc
* compressed texture results in an error. While the D3D refrast does
* support s3tc volumes, at least the nvidia windows driver does not, so
* we're free not to support this format.
*/
return WINED3DERR_NOTAVAILABLE;
default:
/* Do nothing, continue with checking the format below */
break;
}
} else if(RType == WINED3DRTYPE_BUFFER){
/* For instance vertexbuffer/indexbuffer aren't supported yet because no Windows drivers seem to offer it */
return WINED3DERR_NOTAVAILABLE;
}
/* When the UsageCaps exactly matches Usage return WINED3D_OK except for the situation in which
* WINED3DUSAGE_AUTOGENMIPMAP isn't around, then WINED3DOK_NOAUTOGEN is returned if all the other
* usage flags match. */
return WINED3D_OK;
} else if((UsageCaps == (Usage & ~WINED3DUSAGE_AUTOGENMIPMAP)) && (Usage & WINED3DUSAGE_AUTOGENMIPMAP)){
return WINED3DOK_NOAUTOGEN;
} else {
TRACE_(d3d_caps)("[FAILED] - Usage=%#08x requested for CheckFormat=%s and RType=%d but only %#08x is available\n", Usage, debug_d3dformat(CheckFormat), RType, UsageCaps);
return WINED3DERR_NOTAVAILABLE;
}
}
{
FIXME("iface %p, adapter_idx %u, device_type %s, src_format %s, dst_format %s stub!\n",
return WINED3D_OK;
}
/* Note: d3d8 passes in a pointer to a D3DCAPS8 structure, which is a true
subset of a D3DCAPS9 structure. However, it has to come via a void *
as the d3d8 interface cannot import the d3d9 header */
static HRESULT WINAPI IWineD3DImpl_GetDeviceCaps(IWineD3D *iface, UINT Adapter, WINED3DDEVTYPE DeviceType, WINED3DCAPS* pCaps) {
int vs_selected_mode;
int ps_selected_mode;
struct shader_caps shader_caps;
struct fragment_caps fragment_caps;
return WINED3DERR_INVALIDCALL;
}
/* ------------------------------------------------
The following fields apply to both d3d8 and d3d9
------------------------------------------------ */
pCaps->DeviceType = (DeviceType == WINED3DDEVTYPE_HAL) ? WINED3DDEVTYPE_HAL : WINED3DDEVTYPE_REF; /* Not quite true, but use h/w supported by opengl I suppose */
{
}
/* TODO:
WINED3DPMISCCAPS_NULLREFERENCE
WINED3DPMISCCAPS_FOGANDSPECULARALPHA
WINED3DPMISCCAPS_MRTINDEPENDENTBITDEPTHS
WINED3DPMISCCAPS_FOGVERTEXCLAMPED */
{
}
{
}
/* FIXME Add:
WINED3DPRASTERCAPS_COLORPERSPECTIVE
WINED3DPRASTERCAPS_STRETCHBLTMULTISAMPLE
WINED3DPRASTERCAPS_ANTIALIASEDGES
WINED3DPRASTERCAPS_ZBUFFERLESSHSR
WINED3DPRASTERCAPS_WBUFFER */
/* NOTE: WINED3DPBLENDCAPS_SRCALPHASAT is not supported as dest blend factor,
* according to the glBlendFunc manpage
*
* WINED3DPBLENDCAPS_BOTHINVSRCALPHA and WINED3DPBLENDCAPS_BOTHSRCALPHA are
* legacy settings for srcblend only
*/
{
}
{
}
{
}
{
}
{
}
{
{
}
} else
pCaps->CubeTextureFilterCaps = 0;
{
} else
pCaps->VolumeTextureFilterCaps = 0;
{
}
{
}
{
}
{
{
}
{
}
{
}
} else
/* WINED3DLINECAPS_ANTIALIAS is not supported on Windows, and dx and gl seem to have a different
* idea how generating the smoothing alpha values works; the result is different
*/
else
pCaps->MaxVolumeExtent = 0;
{
}
{
}
/* FIXME: Add D3DVTXPCAPS_TWEENING, D3DVTXPCAPS_TEXGEN_SPHEREMAP */
pCaps->MaxPrimitiveCount = 0xFFFFF; /* For now set 2^20-1 which is used by most >=Geforce3/Radeon8500 cards */
/* d3d9.dll sets D3DDEVCAPS2_CAN_STRETCHRECT_FROM_TEXTURES here because StretchRects is implemented in d3d9 */
pCaps->MasterAdapterOrdinal = 0;
pCaps->AdapterOrdinalInGroup = 0;
pCaps->VertexTextureFilterCaps = 0;
/* Add shader misc caps. Only some of them belong to the shader parts of the pipeline */
/* This takes care for disabling vertex shader or pixel shader caps while leaving the other one enabled.
* Ignore shader model capabilities if disabled in config
*/
if(vs_selected_mode == SHADER_NONE) {
pCaps->MaxVertexShaderConst = 0;
} else {
}
if(ps_selected_mode == SHADER_NONE) {
} else {
}
/* The following caps are shader specific, but they are things we cannot detect, or which
* are the same among all shader models. So to avoid code duplication set the shader version
* specific, but otherwise constant caps here
*/
/* Where possible set the caps based on OpenGL extensions and if they aren't set (in case of software rendering)
use the VS 3.0 from MSDN or else if there's OpenGL spec use a hardcoded value minimum VS3.0 value. */
pCaps->VS20Caps.DynamicFlowControlDepth = WINED3DVS20_MAX_DYNAMICFLOWCONTROLDEPTH; /* VS 3.0 requires MAX_DYNAMICFLOWCONTROLDEPTH (24) */
pCaps->VS20Caps.StaticFlowControlDepth = WINED3DVS20_MAX_STATICFLOWCONTROLDEPTH ; /* level of nesting in loops / if-statements; VS 3.0 requires MAX (4) */
pCaps->MaxVShaderInstructionsExecuted = 65535; /* VS 3.0 needs at least 65535, some cards even use 2^32-1 */
}
{
} else { /* VS 1.x */
}
/* Where possible set the caps based on OpenGL extensions and if they aren't set (in case of software rendering)
use the PS 3.0 from MSDN or else if there's OpenGL spec use a hardcoded value minimum PS 3.0 value. */
/* Caps is more or less undocumented on MSDN but it appears to be used for PS20Caps based on results from R9600/FX5900/Geforce6800 cards from Windows */
pCaps->PS20Caps.DynamicFlowControlDepth = WINED3DPS20_MAX_DYNAMICFLOWCONTROLDEPTH; /* PS 3.0 requires MAX_DYNAMICFLOWCONTROLDEPTH (24) */
pCaps->PS20Caps.StaticFlowControlDepth = WINED3DPS20_MAX_STATICFLOWCONTROLDEPTH; /* PS 3.0 requires MAX_STATICFLOWCONTROLDEPTH (4) */
pCaps->PS20Caps.NumInstructionSlots = WINED3DPS20_MAX_NUMINSTRUCTIONSLOTS; /* PS 3.0 requires MAX_NUMINSTRUCTIONSLOTS (512) */
}
{
/* Below we assume PS2.0 specs, not extended 2.0a(GeforceFX)/2.0b(Radeon R3xx) ones */
pCaps->PS20Caps.NumInstructionSlots = WINED3DPS20_MIN_NUMINSTRUCTIONSLOTS; /* Minimum number (64 ALU + 32 Texture), a GeforceFX uses 512 */
} else { /* PS 1.x */
}
/* OpenGL supports all the formats below, perhaps not always
* without conversion, but it supports them.
* Further GLSL doesn't seem to have an official unsigned type so
* don't advertise it yet as I'm not sure how we handle it.
* We might need to add some clamping in the shader engine to
* support it.
* TODO: WINED3DDTCAPS_USHORT2N, WINED3DDTCAPS_USHORT4N, WINED3DDTCAPS_UDEC3, WINED3DDTCAPS_DEC3N */
{
}
} else
/* Set DirectDraw helper Caps */
/* Fill the ddraw caps structure */
/* Set D3D caps if OpenGL is available. */
{
}
return WINED3D_OK;
}
{
TRACE("iface %p, adapter_idx %u, device_type %#x, focus_window %p, flags %#x.\n"
"parent %p, device_parent %p, device %p.\n",
/* Validate the adapter number. If no adapters are available(no GL), ignore the adapter
* number and create a device without a 3D adapter for 2D only operation. */
{
return WINED3DERR_INVALIDCALL;
}
if (!object)
{
ERR("Failed to allocate device memory.\n");
return E_OUTOFMEMORY;
}
hr = device_init(object, This, adapter_idx, device_type, focus_window, flags, parent, device_parent);
{
return hr;
}
return WINED3D_OK;
}
return WINED3D_OK;
}
{
ERR("Invalid vertex attribute function called\n");
DebugBreak();
}
{
ERR("Invalid texcoord function called\n");
DebugBreak();
}
/* Helper functions for providing vertex data to opengl. The arrays are initialized based on
* the extension detection and are used in drawStridedSlow
*/
{
FIXME("Add a test for fixed function position from d3dcolor type\n");
D3DCOLOR_B_A(pos));
}
{
{
}
else
{
}
}
{
}
{
}
{
WARN("GL_EXT_secondary_color not supported\n");
}
{
/* No 4 component entry points here */
{
}
else
{
}
{
}
else
{
}
/* Only 3 component entry points here. Test how others behave. Float4 normals are used
* by one of our tests, trying to pass it to the pixel shader, which fails on Windows.
*/
multi_texcoord_funcs[WINED3D_FFP_EMIT_FLOAT1] = (glMultiTexCoordFunc)GL_EXTCALL(glMultiTexCoord1fvARB);
multi_texcoord_funcs[WINED3D_FFP_EMIT_FLOAT2] = (glMultiTexCoordFunc)GL_EXTCALL(glMultiTexCoord2fvARB);
multi_texcoord_funcs[WINED3D_FFP_EMIT_FLOAT3] = (glMultiTexCoordFunc)GL_EXTCALL(glMultiTexCoord3fvARB);
multi_texcoord_funcs[WINED3D_FFP_EMIT_FLOAT4] = (glMultiTexCoordFunc)GL_EXTCALL(glMultiTexCoord4fvARB);
multi_texcoord_funcs[WINED3D_FFP_EMIT_SHORT2] = (glMultiTexCoordFunc)GL_EXTCALL(glMultiTexCoord2svARB);
multi_texcoord_funcs[WINED3D_FFP_EMIT_SHORT4] = (glMultiTexCoordFunc)GL_EXTCALL(glMultiTexCoord4svARB);
{
/* Not supported by ARB_HALF_FLOAT_VERTEX, so check for NV_HALF_FLOAT */
multi_texcoord_funcs[WINED3D_FFP_EMIT_FLOAT16_2] = (glMultiTexCoordFunc)GL_EXTCALL(glMultiTexCoord2hvNV);
multi_texcoord_funcs[WINED3D_FFP_EMIT_FLOAT16_4] = (glMultiTexCoordFunc)GL_EXTCALL(glMultiTexCoord4hvNV);
} else {
}
}
{
/* No need to hold any lock. The calling library makes sure only one thread calls
* wined3d simultaneously
*/
TRACE("Initializing adapters\n");
if(!mod_gl) {
#ifdef USE_WIN32_OPENGL
#if defined(VBOX_WITH_WDDM) || defined(VBOX_WINE_WITH_SINGLE_SWAPCHAIN_CONTEXT)
#ifdef VBOX_WDDM_WOW64
#else
#endif
#else
#endif
if(!mod_gl) {
ERR("Can't load opengl32.dll!\n");
goto nogl_adapter;
}
#if defined(VBOX_WITH_WDDM) || defined(VBOX_WINE_WITH_SINGLE_SWAPCHAIN_CONTEXT)
/* init properly */
if(!pDrvValidateVersion) {
ERR("Can't get DrvValidateVersion\n");
goto nogl_adapter;
}
if(!pDrvValidateVersion(1)) {
ERR("DrvValidateVersion FAILED\n");
goto nogl_adapter;
}
#endif
#else
/* To bypass the opengl32 thunks load wglGetProcAddress from gdi32 (glXGetProcAddress wrapper) instead of opengl32's */
#endif
}
/* Load WGL core functions from opengl32.dll */
if(!pwglGetProcAddress) {
ERR("Unable to load wglGetProcAddress!\n");
goto nogl_adapter;
}
/* Dynamically load all GL core functions */
/* Load glFinish and glFlush from opengl32.dll even if we're not using WIN32 opengl
* otherwise because we have to use winex11.drv's override
*/
#ifdef USE_WIN32_OPENGL
#else
#endif
/* For now only one default adapter */
{
struct wined3d_fake_gl_ctx fake_gl_ctx = {0};
int iPixelFormat;
int res;
int i;
TRACE("Initializing default adapter\n");
{
goto nogl_adapter;
}
TRACE("Allocated LUID %08x:%08x for adapter.\n",
{
ERR("Failed to get a gl context for default adapter\n");
goto nogl_adapter;
}
if(!ret) {
ERR("Failed to initialize gl caps for default adapter\n");
goto nogl_adapter;
}
if(!ret) {
ERR("Failed to init gl formats\n");
goto nogl_adapter;
}
/* Use the VideoRamSize registry setting when set */
else
adapter->UsedTextureRam = 0;
/* Initialize the Adapter's DeviceName which is required for ChangeDisplaySettings and friends */
{
int attribute;
int attribs[11];
int values[11];
int nAttribs = 0;
adapter->cfgs = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, adapter->nCfgs *sizeof(WineD3D_PixelFormat));
{
if(!res)
continue;
/* Cache the pixel format */
cfgs->numSamples = 0;
/* Check multisample support */
{
int value[2];
/* value[0] = WGL_SAMPLE_BUFFERS_ARB which tells whether multisampling is supported.
* value[1] = number of multi sample buffers*/
if(value[0])
}
}
TRACE("iPixelFormat=%d, iPixelType=%#x, doubleBuffer=%d, RGBA=%d/%d/%d/%d, "
"depth=%d, stencil=%d, samples=%d, windowDrawable=%d\n",
cfgs++;
}
}
else
{
adapter->nCfgs = 0; /* We won't accept all formats e.g. software accelerated ones will be skipped */
{
if(!res)
continue;
/* We only want HW acceleration using an OpenGL ICD driver.
* PFD_GENERIC_FORMAT = slow opengl 1.1 gdi software rendering
* PFD_GENERIC_ACCELERATED = partial hw acceleration using a MCD driver (e.g. 3dfx minigl)
*/
{
continue;
}
cfgs->iPixelType = (ppfd.iPixelType == PFD_TYPE_RGBA) ? WGL_TYPE_RGBA_ARB : WGL_TYPE_COLORINDEX_ARB;
cfgs->numSamples = 0;
TRACE("iPixelFormat=%d, iPixelType=%#x, doubleBuffer=%d, RGBA=%d/%d/%d/%d, "
"depth=%d, stencil=%d, windowDrawable=%d\n",
cfgs++;
}
/* Yikes we haven't found any suitable formats. This should only happen in case of GDI software rendering which we can't use anyway as its 3D functionality is very, very limited */
{
ERR("Disabling Direct3D because no hardware accelerated pixel formats have been found!\n");
goto nogl_adapter;
}
}
/* D16, D24X8 and D24S8 are common depth / depth+stencil formats. All drivers support them though this doesn't
* mean that the format is offered in hardware. For instance Geforce8 cards don't have offer D16 in hardware
* but just fake it using D24(X8?) which is fine. D3D also allows that.
* Some display drivers (i915 on Linux) only report mixed depth+stencil formats like D24S8. MSDN clearly mentions
* that only on lockable formats (e.g. D16_locked) the bit order is guaranteed and that on other formats the
* driver is allowed to consume more bits EXCEPT for stencil bits.
*
* Mark an adapter with this broken stencil behavior.
*/
{
/* Nearly all drivers offer depth formats without stencil, only on i915 this if-statement won't be entered. */
break;
}
}
}
return TRUE;
/* Initialize an adapter for ddraw-only memory counting */
} else {
}
return FALSE;
}
/**********************************************************
* IWineD3D VTbl follows
**********************************************************/
static const struct IWineD3DVtbl IWineD3D_Vtbl =
{
/* IUnknown */
/* IWineD3D */
};
const struct wined3d_parent_ops wined3d_null_parent_ops =
{
};
{
if (!InitAdapters(wined3d))
{
WARN("Failed to initialize adapters.\n");
if (version > 7)
{
return E_FAIL;
}
}
return WINED3D_OK;
}