surface.c revision a7ba3d5f31ca70d04a3933e570374e5ec5eff84a
/*
* Copyright 1997-2000 Marcus Meissner
* Copyright 1998-2000 Lionel Ulmer
* Copyright 2000-2001 TransGaming Technologies Inc.
* Copyright 2002-2005 Jason Edmeades
* Copyright 2002-2003 Raphael Junqueira
* Copyright 2004 Christian Costa
* Copyright 2005 Oliver Stieber
* Copyright 2006-2011 Stefan Dösinger for CodeWeavers
* Copyright 2007-2008 Henri Verbeet
* Copyright 2006-2008 Roderick Colenbrander
* Copyright 2009-2011 Henri Verbeet for CodeWeavers
*
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
*/
#include "config.h"
#include "wined3d_private.h"
#ifdef VBOX_WITH_WINE_FIXES
# include <float.h>
#endif
#ifdef VBOX_WITH_WDDM
{
}
{
if (VBOXSHRC_IS_LOCKED(surf))
return;
/* perform data->texture synchronization */
}
{
if (!VBOXSHRC_IS_SHARED(surf))
return;
}
{
if (!VBOXSHRC_IS_SHARED(surf))
return;
}
#endif
static HRESULT IWineD3DSurfaceImpl_BltOverride(struct wined3d_surface *dst_surface, const RECT *dst_rect,
{
{
const struct wined3d_gl_info *gl_info;
struct wined3d_context *context;
if (surface->texture_name)
{
#ifdef VBOX_WITH_WDDM
#else
#endif
}
{
}
if (surface->rb_multisample)
{
}
if (surface->rb_resolved)
{
}
LIST_FOR_EACH_ENTRY_SAFE(entry, entry2, &surface->renderbuffers, struct wined3d_renderbuffer_entry, entry)
{
}
}
{
}
if (surface->overlay_dest)
{
}
#ifdef VBOX_WITH_WDDM
/* @rodo: CHECK AND REMOVE : this should not be necessary anymore */
{
struct wined3d_context *context;
UINT i;
for (i = 0; i < device->context_count; ++i)
{
/* pretty hacky, @todo: check if the context is acquired and re-acquire it with the new swapchain */
{
Assert(0);
}
}
}
#endif
}
{
else
}
{
if (swapchain)
{
}
else
{
switch (wined3d_settings.offscreen_rendering_mode)
{
case ORM_FBO:
break;
case ORM_BACKBUFFER:
break;
default:
return;
}
}
}
{
{
switch (wined3d_settings.offscreen_rendering_mode)
{
case ORM_FBO:
break;
case ORM_BACKBUFFER:
break;
default:
return;
}
}
}
struct blt_info
{
};
struct float_rect
{
float l;
float t;
float r;
float b;
};
{
}
static void surface_get_blt_info(GLenum target, const RECT *rect, GLsizei w, GLsizei h, struct blt_info *info)
{
struct float_rect f;
switch (target)
{
default:
/* Fall back to GL_TEXTURE_2D */
case GL_TEXTURE_2D:
break;
case GL_TEXTURE_RECTANGLE_ARB:
break;
cube_coords_float(rect, w, h, &f);
break;
cube_coords_float(rect, w, h, &f);
break;
cube_coords_float(rect, w, h, &f);
break;
cube_coords_float(rect, w, h, &f);
break;
cube_coords_float(rect, w, h, &f);
break;
cube_coords_float(rect, w, h, &f);
break;
}
}
static void surface_get_rect(const struct wined3d_surface *surface, const RECT *rect_in, RECT *rect_out)
{
if (rect_in)
else
{
}
}
/* Context activation is done by the caller. */
{
surface_get_blt_info(src_surface->texture_target, src_rect, src_surface->pow2Width, src_surface->pow2Height, &info);
checkGLcall("glEnable(bind_target)");
/* Filtering for StretchRect */
checkGLcall("glTexParameteri");
checkGLcall("glTexParameteri");
gl_info->gl_ops.gl.p_glTexParameteri(info.bind_target, GL_TEXTURE_SRGB_DECODE_EXT, GL_SKIP_DECODE_EXT);
checkGLcall("glTexEnvi");
/* Draw a quad */
/* Unbind the texture */
/* We changed the filtering settings on the texture. Inform the
* container about this to get the filters reset properly next draw. */
if (src_surface->container)
{
}
}
/* Works correctly only for <= 4 bpp formats. */
{
}
{
int extraline = 0;
{
return WINED3DERR_INVALIDCALL;
}
switch (format->byte_count)
{
case 2:
case 4:
/* Allocate extra space to store the RGB bit masks. */
b_info = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, sizeof(BITMAPINFOHEADER) + 3 * sizeof(DWORD));
break;
case 3:
break;
default:
/* Allocate extra space for a palette. */
break;
}
if (!b_info)
return E_OUTOFMEMORY;
/* Some applications access the surface in via DWORDs, and do not take
* the necessary care at the end of the surface. So we need at least
* 4 extra bytes at the end of the surface. Check against the page size,
* if the last page used for the surface has at least 4 spare bytes we're
* safe, otherwise add an extra line to the DIB section. */
{
extraline = 1;
TRACE("Adding an extra line to the DIB section.\n");
}
/* TODO: Is there a nicer way to force a specific alignment? (8 byte for ddraw) */
/* Get the bit masks */
{
case WINED3DFMT_B8G8R8_UNORM:
break;
case WINED3DFMT_B2G3R3_UNORM:
case WINED3DFMT_B5G6R5_UNORM:
break;
default:
/* Don't know palette */
break;
}
TRACE("Creating a DIB section with size %dx%dx%d, size=%d.\n",
surface->dib.DIBsection = CreateDIBSection(0, b_info, DIB_RGB_COLORS, &surface->dib.bitmap_data, 0, 0);
{
ERR("Failed to create DIB section.\n");
return HRESULT_FROM_WIN32(GetLastError());
}
/* Copy the existing surface to the dib section. */
{
}
else
{
/* This is to make maps read the GL texture although memory is allocated. */
}
/* Now allocate a DC. */
return WINED3D_OK;
}
static BOOL surface_need_pbo(const struct wined3d_surface *surface, const struct wined3d_gl_info *gl_info)
{
return FALSE;
return FALSE;
return FALSE;
return FALSE;
return TRUE;
}
static void surface_load_pbo(struct wined3d_surface *surface, const struct wined3d_gl_info *gl_info)
{
struct wined3d_context *context;
checkGLcall("glBindBufferARB");
checkGLcall("glBufferDataARB");
checkGLcall("glBindBufferARB");
/* We don't need the system memory anymore and we can't even use it for PBOs. */
{
}
}
#ifdef VBOX_WITH_WDDM
static HRESULT d3dfmt_get_conv(const struct wined3d_surface *surface, BOOL need_alpha_ck, BOOL use_texturing,
{
// DWORD alloc_flag = srgb ? SFLAG_SRGBALLOCATED : SFLAG_ALLOCATED;
struct wined3d_format format;
{
}
/* else -> all should be already set in texture init,
* which actually calls the current routine for each of texture's surfaces
* for setting up their state */
}
#endif
{
{
/* Whatever surface we have, make sure that there is memory allocated
* for the downloaded copy, or a PBO to map. */
surface->resource.heapMemory = HeapAlloc(GetProcessHeap(), 0, surface->resource.size + RESOURCE_ALIGNMENT);
ERR("Surface without memory or PBO has SFLAG_INSYSMEM set.\n");
}
}
{
return;
}
/* Context activation is done by the caller. */
static void surface_bind(struct wined3d_surface *surface, struct wined3d_context *context, BOOL srgb)
{
{
}
else
{
if (surface->texture_level)
{
ERR("Standalone surface %p is non-zero texture level %u.\n",
}
if (srgb)
if (!surface->texture_name)
{
#ifdef VBOX_WITH_WDDM
{
ERR("should not be here!");
}
else
#endif
{
checkGLcall("glGenTextures");
checkGLcall("glTexParameteri");
#ifdef VBOX_WITH_WDDM
if (VBOXSHRC_IS_SHARED(surface))
{
}
#endif
}
}
else
{
}
}
}
/* Context activation is done by the caller. */
{
/* We don't need a specific texture unit, but after binding the texture
* the current unit is dirty. Read the unit back instead of switching to
* 0, this avoids messing around with the state manager's GL states. The
* current texture unit should always be a valid one.
*
* To be more specific, this is tricky because we can implicitly be
* called from sampler() in state.c. This means we can't touch anything
* other than whatever happens to be the currently active texture, or we
* would risk marking already applied sampler states dirty again. */
#if 0 //def DEBUG_misha
{
}
#endif
if (active_sampler != WINED3D_UNMAPPED_STAGE)
}
{
}
{
if (surface->texture_name)
{
}
if (surface->texture_name_srgb)
{
}
}
{
/* TODO: Check against the maximum texture sizes supported by the video card. */
unsigned int pow2Width, pow2Height;
surface->texture_name = 0;
/* Non-power2 support */
if (gl_info->supported[ARB_TEXTURE_NON_POWER_OF_TWO] || gl_info->supported[WINED3D_GL_NORMALIZED_TEXRECT])
{
}
else
{
/* Find the nearest pow2 match */
pow2Width <<= 1;
pow2Height <<= 1;
}
{
/* TODO: Add support for non power two compressed textures. */
{
FIXME("(%p) Compressed non-power-two textures are not supported w(%d) h(%d)\n",
return WINED3DERR_NOTAVAILABLE;
}
}
{
}
if ((surface->pow2Width > gl_info->limits.texture_size || surface->pow2Height > gl_info->limits.texture_size)
{
/* One of three options:
* 1: Do the same as we do with NPOT and scale the texture, (any
* texture ops would require the texture to be scaled which is
* potentially slow)
* 2: Set the texture to the maximum size (bad idea).
* 3: WARN and return WINED3DERR_NOTAVAILABLE;
* 4: Create the surface, but allow it to be used only for DirectDraw
* Blts. Some apps (e.g. Swat 3) create textures with a Height of
* 16 and a Width > 3000 and blt 16x16 letter areas from them to
* the render target. */
if (surface->resource.pool == WINED3D_POOL_DEFAULT || surface->resource.pool == WINED3D_POOL_MANAGED)
{
WARN("Unable to allocate a surface which exceeds the maximum OpenGL texture size.\n");
return WINED3DERR_NOTAVAILABLE;
}
/* We should never use this surface in combination with OpenGL! */
TRACE("Creating an oversized surface: %ux%u.\n",
}
else
{
/* Don't use ARB_TEXTURE_RECTANGLE in case the surface format is P8
* and EXT_PALETTED_TEXTURE is used in combination with texture
* uploads (RTL_READTEX/RTL_TEXTEX). The reason is that
* EXT_PALETTED_TEXTURE doesn't work in combination with
* ARB_TEXTURE_RECTANGLE. */
{
}
}
switch (wined3d_settings.offscreen_rendering_mode)
{
case ORM_FBO:
break;
case ORM_BACKBUFFER:
break;
default:
return WINED3DERR_INVALIDCALL;
}
return WINED3D_OK;
}
{
if (!palette) return;
{
{
/* Make sure the texture is up to date. This call doesn't do
* anything if the texture is already up to date. */
/* We want to force a palette refresh, so mark the drawable as not being up to date */
if (!surface_is_offscreen(surface))
}
else
{
{
TRACE("Palette changed with surface that does not have an up to date system memory copy.\n");
}
}
}
{
unsigned int i;
TRACE("Updating the DC's palette.\n");
for (i = 0; i < 256; ++i)
{
col[i].rgbReserved = 0;
}
}
/* Propagate the changes to the drawable when we have a palette. */
}
{
/* If there's no destination surface there is nothing to do. */
if (!surface->overlay_dest)
return WINED3D_OK;
/* Blt calls ModifyLocation on the dest surface, which in turn calls
* DrawOverlay to update the overlay. Prevent an endless recursion. */
return WINED3D_OK;
return hr;
}
{
TRACE("surface %p, rect %s, flags %#x.\n",
if (flags & WINED3D_MAP_DISCARD)
{
TRACE("WINED3D_MAP_DISCARD flag passed, marking SYSMEM as up to date.\n");
}
else
{
/* surface_load_location() does not check if the rectangle specifies
* the full surface. Most callers don't need that, so do it here. */
}
{
const struct wined3d_gl_info *gl_info;
struct wined3d_context *context;
checkGLcall("glBindBufferARB");
/* This shouldn't happen but could occur if some other function
* didn't handle the PBO properly. */
ERR("The surface already has PBO memory allocated.\n");
surface->resource.allocatedMemory = GL_EXTCALL(glMapBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, GL_READ_WRITE_ARB));
checkGLcall("glMapBufferARB");
/* Make sure the PBO isn't set anymore in order not to break non-PBO
* calls. */
checkGLcall("glBindBufferARB");
}
{
if (!rect)
else
{
struct wined3d_box b;
b.front = 0;
b.back = 1;
}
}
}
{
{
const struct wined3d_gl_info *gl_info;
struct wined3d_context *context;
TRACE("Freeing PBO memory.\n");
checkGLcall("glUnmapBufferARB");
}
{
TRACE("Not dirtified, nothing to do.\n");
goto done;
}
{
{
fullsurface = TRUE;
}
else
{
/* TODO: Proper partial rectangle tracking. */
fullsurface = FALSE;
}
/* Partial rectangle tracking is not commonly implemented, it is only
* done for render targets. INSYSMEM was set before to tell
* surface_load_location() where to read the rectangle from.
* Indrawable is set because all modifications from the partial
* sysmem copy are written back to the drawable, thus the surface is
* merged again in the drawable. The sysmem copy is not fully up to
* date because only a subrectangle was read in Map(). */
if (!fullsurface)
{
}
}
{
FIXME("Depth / stencil buffer locking is not implemented.\n");
}
done:
/* Overlays have to be redrawn manually after changes with the GL implementation */
if (surface->overlay_dest)
}
{
return FALSE;
return FALSE;
return TRUE;
}
{
const struct wined3d_gl_info *gl_info;
struct wined3d_context *context;
TRACE("src_surface %p, src_location %s, src_rect %s,\n",
TRACE("dst_surface %p, dst_location %s, dst_rect %s.\n",
{
ERR("Incompatible formats %s and %s.\n",
return;
}
if (!src_mask)
{
ERR("Not a depth / stencil format: %s.\n",
return;
}
gl_mask = 0;
if (src_mask & WINED3DFMT_FLAG_DEPTH)
if (src_mask & WINED3DFMT_FLAG_STENCIL)
/* Make sure the locations are up-to-date. Loading the destination
* surface isn't required if the entire surface is overwritten. */
{
WARN("Invalid context, skipping blit.\n");
return;
}
if (gl_mask & GL_DEPTH_BUFFER_BIT)
{
}
if (gl_mask & GL_STENCIL_BUFFER_BIT)
{
{
}
}
gl_info->fbo_ops.glBlitFramebuffer(src_rect->left, src_rect->top, src_rect->right, src_rect->bottom,
checkGLcall("glBlitFramebuffer()");
}
/* Blit between surface locations. Onscreen on different swapchains is not supported.
* Depth / stencil is not supported. */
static void surface_blt_fbo(const struct wined3d_device *device, enum wined3d_texture_filter_type filter,
{
const struct wined3d_gl_info *gl_info;
struct wined3d_context *context;
TRACE("src_surface %p, src_location %s, src_rect %s,\n",
TRACE("dst_surface %p, dst_location %s, dst_rect %s.\n",
src_rect = *src_rect_in;
dst_rect = *dst_rect_in;
switch (filter)
{
case WINED3D_TEXF_LINEAR:
break;
default:
case WINED3D_TEXF_NONE:
case WINED3D_TEXF_POINT:
break;
}
/* Resolve the source surface first if needed. */
/* Make sure the locations are up-to-date. Loading the destination
* surface isn't required if the entire surface is overwritten. (And is
* in fact harmful if we're being called by surface_load_location() with
* the purpose of loading the destination surface.) */
{
WARN("Invalid context, skipping blit.\n");
return;
}
if (src_location == SFLAG_INDRAWABLE)
{
#ifndef VBOX_WINE_WITH_SINGLE_CONTEXT
#else
#endif
}
else
{
}
checkGLcall("glReadBuffer()");
if (dst_location == SFLAG_INDRAWABLE)
{
#ifndef VBOX_WINE_WITH_SINGLE_CONTEXT
#else
#endif
}
else
{
}
checkGLcall("glBlitFramebuffer()");
|| (dst_location == SFLAG_INDRAWABLE
}
const RECT *src_rect, DWORD src_usage, enum wined3d_pool src_pool, const struct wined3d_format *src_format,
const RECT *dst_rect, DWORD dst_usage, enum wined3d_pool dst_pool, const struct wined3d_format *dst_format)
{
return FALSE;
return FALSE;
switch (blit_op)
{
if (!((src_format->flags & WINED3DFMT_FLAG_FBO_ATTACHABLE) || (src_usage & WINED3DUSAGE_RENDERTARGET)))
return FALSE;
if (!((dst_format->flags & WINED3DFMT_FLAG_FBO_ATTACHABLE) || (dst_usage & WINED3DUSAGE_RENDERTARGET)))
return FALSE;
break;
return FALSE;
return FALSE;
break;
default:
return FALSE;
}
return FALSE;
return TRUE;
}
/* This function checks if the primary render target uses the 8bit paletted format. */
{
{
return TRUE;
}
return FALSE;
}
{
{
case WINED3DFMT_P8_UINT:
{
}
else
{
float_color->r = 0.0f;
float_color->g = 0.0f;
float_color->b = 0.0f;
}
break;
case WINED3DFMT_B5G6R5_UNORM:
float_color->a = 1.0f;
break;
case WINED3DFMT_B8G8R8_UNORM:
float_color->a = 1.0f;
break;
break;
default:
return FALSE;
}
return TRUE;
}
static BOOL surface_convert_depth_to_float(const struct wined3d_surface *surface, DWORD depth, float *float_depth)
{
{
break;
case WINED3DFMT_D16_UNORM:
break;
case WINED3DFMT_X8D24_UNORM:
break;
case WINED3DFMT_D32_UNORM:
break;
default:
return FALSE;
}
return TRUE;
}
/* Do not call while under the GL lock. */
static HRESULT wined3d_surface_depth_fill(struct wined3d_surface *surface, const RECT *rect, float depth)
{
const struct blit_shader *blitter;
if (!blitter)
{
FIXME("No blitter is capable of performing the requested depth fill operation.\n");
return WINED3DERR_INVALIDCALL;
}
}
static HRESULT wined3d_surface_depth_blt(struct wined3d_surface *src_surface, DWORD src_location, const RECT *src_rect,
{
return WINED3DERR_INVALIDCALL;
surface_depth_blt_fbo(device, src_surface, src_location, src_rect, dst_surface, dst_location, dst_rect);
return WINED3D_OK;
}
/* Do not call while under the GL lock. */
{
#ifdef VBOX_WITH_WDDM
#endif
TRACE("dst_surface %p, dst_rect %s, src_surface %p, src_rect %s, flags %#x, fx %p, filter %s.\n",
if (fx)
{
TRACE("ddckDestColorkey {%#x, %#x}.\n",
TRACE("ddckSrcColorkey {%#x, %#x}.\n",
}
{
WARN("Surface is busy, returning WINEDDERR_SURFACEBUSY.\n");
return WINEDDERR_SURFACEBUSY;
}
{
WARN("The application gave us a bad destination rectangle.\n");
return WINEDDERR_INVALIDRECT;
}
if (src_surface)
{
{
WARN("Application gave us bad source rectangle for Blt.\n");
return WINEDDERR_INVALIDRECT;
}
}
else
{
}
#ifdef VBOX_WITH_WDDM
/* once we've done locking, we should do unlock on exit,
* do goto post_process instead of return below! */
#endif
flags &= ~WINEDDBLT_DDFX;
if (flags & WINEDDBLT_WAIT)
flags &= ~WINEDDBLT_WAIT;
if (flags & WINEDDBLT_ASYNC)
{
static unsigned int once;
if (!once++)
FIXME("Can't handle WINEDDBLT_ASYNC flag.\n");
flags &= ~WINEDDBLT_ASYNC;
}
/* WINEDDBLT_DONOTWAIT appeared in DX7. */
if (flags & WINEDDBLT_DONOTWAIT)
{
static unsigned int once;
if (!once++)
FIXME("Can't handle WINEDDBLT_DONOTWAIT flag.\n");
flags &= ~WINEDDBLT_DONOTWAIT;
}
if (!device->d3d_initialized)
{
WARN("D3D not initialized, using fallback.\n");
goto cpu;
}
/* We want to avoid invalidating the sysmem location for converted
* surfaces, since otherwise we'd have to convert the data back when
* locking them. */
{
#ifndef VBOX_WITH_WDDM
#else
goto post_process;
#endif
}
if (flags & ~simple_blit)
{
goto fallback;
}
if (src_surface)
else
/* This isn't strictly needed. FBO blits for example could deal with
* cross-swapchain blits by first downloading the source to a texture
* before switching to the destination context. We just have this here to
* not have to deal with the issue, since cross-swapchain blits should be
* rare. */
{
FIXME("Using fallback for cross-swapchain blit.\n");
goto fallback;
}
dst_ds_flags = dst_surface->resource.format->flags & (WINED3DFMT_FLAG_DEPTH | WINED3DFMT_FLAG_STENCIL);
if (src_surface)
src_ds_flags = src_surface->resource.format->flags & (WINED3DFMT_FLAG_DEPTH | WINED3DFMT_FLAG_STENCIL);
else
src_ds_flags = 0;
if (src_ds_flags || dst_ds_flags)
{
if (flags & WINEDDBLT_DEPTHFILL)
{
float depth;
TRACE("Depth fill.\n");
#ifndef VBOX_WITH_WDDM
return WINED3DERR_INVALIDCALL;
#else
{
goto post_process;
}
#endif
#ifndef VBOX_WITH_WDDM
return WINED3D_OK;
#else
{
hr = WINED3D_OK;
goto post_process;
}
#endif
}
else
{
if (src_ds_flags != dst_ds_flags)
{
WARN("Rejecting depth / stencil blit between incompatible formats.\n");
#ifndef VBOX_WITH_WDDM
return WINED3DERR_INVALIDCALL;
#else
hr = WINED3D_OK;
goto post_process;
#endif
}
#ifndef VBOX_WITH_WDDM
return WINED3D_OK;
#else
{
hr = WINED3D_OK;
goto post_process;
}
#endif
}
}
else
{
/* In principle this would apply to depth blits as well, but we don't
* implement those in the CPU blitter at the moment. */
{
#ifdef DEBUG_misha
/* if below condition is not specified, i.e. both surfaces are in tecxture locations,
* we would perhaps need to fallback to tex->tex hw blitting */
#endif
if (scale)
TRACE("Not doing sysmem blit because of scaling.\n");
else if (convert)
TRACE("Not doing sysmem blit because of format conversion.\n");
else
#ifndef VBOX_WITH_WDDM
#else
{
goto post_process;
}
#endif
}
if (flags & WINEDDBLT_COLORFILL)
{
struct wined3d_color color;
TRACE("Color fill.\n");
goto fallback;
#ifndef VBOX_WITH_WDDM
return WINED3D_OK;
#else
{
hr = WINED3D_OK;
goto post_process;
}
#endif
}
else
{
TRACE("Color blit.\n");
/* Upload */
{
if (scale)
TRACE("Not doing upload because of scaling.\n");
else if (convert)
TRACE("Not doing upload because of format conversion.\n");
else
{
{
if (!surface_is_offscreen(dst_surface))
#ifndef VBOX_WITH_WDDM
return WINED3D_OK;
#else
hr = WINED3D_OK;
goto post_process;
#endif
}
}
}
/* Use present for back -> front blits. The idea behind this is
* that present is potentially faster than a blit, in particular
* when FBO blits aren't available. Some ddraw applications like
* Half-Life and Prince of Persia 3D use Blt() from the backbuffer
* to the frontbuffer instead of doing a Flip(). D3D8 and D3D9
* applications can't blit directly to the frontbuffer. */
{
TRACE("Using present for backbuffer -> frontbuffer blit.\n");
/* Set the swap effect to COPY, we don't want the backbuffer
* to become undefined. */
#ifndef VBOX_WITH_WDDM
return WINED3D_OK;
#else
hr = WINED3D_OK;
goto post_process;
#endif
}
{
TRACE("Using FBO blit.\n");
#ifndef VBOX_WITH_WDDM
return WINED3D_OK;
#else
hr = WINED3D_OK;
goto post_process;
#endif
}
{
TRACE("Using arbfp blit.\n");
#ifndef VBOX_WITH_WDDM
return WINED3D_OK;
#else
{
hr = WINED3D_OK;
goto post_process;
}
#endif
}
}
}
#ifdef DEBUG_misha
/* test ! */
Assert(0);
#endif
/* Special cases for render targets. */
{
#ifndef VBOX_WITH_WDDM
return WINED3D_OK;
#else
{
hr = WINED3D_OK;
goto post_process;
}
#endif
}
cpu:
#ifdef DEBUG_misha
/* test ! */
Assert(0);
#endif
/* For the rest call the X11 surface implementation. For render targets
* this should be implemented OpenGL accelerated in BltOverride, other
* blits are rather rare. */
#ifndef VBOX_WITH_WDDM
#else
goto post_process;
#endif
#ifdef VBOX_WITH_WDDM
return hr;
#endif
}
struct wined3d_surface *render_target)
{
/* TODO: Check surface sizes, pools, etc. */
return WINED3DERR_INVALIDCALL;
}
/* Context activation is done by the caller. */
static void surface_remove_pbo(struct wined3d_surface *surface, const struct wined3d_gl_info *gl_info)
{
{
}
else
{
surface->resource.heapMemory = HeapAlloc(GetProcessHeap(), 0, surface->resource.size + RESOURCE_ALIGNMENT);
ERR("Surface %p has heapMemory %p and flags %#x.\n",
}
checkGLcall("glBindBufferARB(GL_PIXEL_UNPACK_BUFFER, surface->pbo)");
checkGLcall("glGetBufferSubDataARB");
checkGLcall("glDeleteBuffersARB");
}
{
{
{
{
ERR("Failed to allocate memory.\n");
return FALSE;
}
}
{
ERR("Surface %p has heapMemory %p and flags %#x.\n",
}
(BYTE *)(((ULONG_PTR)surface->resource.heapMemory + (RESOURCE_ALIGNMENT - 1)) & ~(RESOURCE_ALIGNMENT - 1));
}
else
{
}
return TRUE;
}
/* Do not call while under the GL lock. */
{
const struct wined3d_gl_info *gl_info;
struct wined3d_context *context;
{
/* Default pool resources are supposed to be destroyed before Reset is called.
* Implicit resources stay however. So this means we have an implicit render target
* or depth stencil. The content may be destroyed, but we still have to tear down
* opengl resources, so we cannot leave early.
*
* Put the surfaces into sysmem, and reset the content. The D3D content is undefined,
* but we can't set the sysmem INDRAWABLE because when we're rendering the swapchain
* or the depth stencil into an FBO the texture or render buffer will be removed
* and all flags get lost
*/
/* We also get here when the ddraw swapchain is destroyed, for example
* for a mode switch. In this case this surface won't necessarily be
* an implicit surface. We have to mark it lost so that the
* application can restore it after the mode switch. */
}
else
{
/* Load the surface into system memory */
}
/* Destroy PBOs, but load them into real sysmem before */
/* Destroy fbo render buffers. This is needed for implicit render targets, for
* all application-created targets the application has to release the surface
* before calling _Reset
*/
LIST_FOR_EACH_ENTRY_SAFE(entry, entry2, &surface->renderbuffers, struct wined3d_renderbuffer_entry, entry)
{
}
/* If we're in a texture, the texture name belongs to the texture.
* Otherwise, destroy it. */
{
#ifdef VBOX_WITH_WDDM
#else
#endif
surface->texture_name = 0;
#ifdef VBOX_WITH_WDDM
#else
#endif
surface->texture_name_srgb = 0;
}
if (surface->rb_multisample)
{
surface->rb_multisample = 0;
}
if (surface->rb_resolved)
{
surface->rb_resolved = 0;
}
}
static const struct wined3d_resource_ops surface_resource_ops =
{
};
static const struct wined3d_surface_ops surface_ops =
{
};
/*****************************************************************************
* Initializes the GDI surface, aka creates the DIB section we render to
* The DIB section creation is done by calling GetDC, which will create the
* section and releasing the dc to allow the app to use it. The dib section
* will stay until the surface is released
*
* GDI surfaces do not need to be a power of 2 in size, so the pow2 sizes
* are set to the real sizes to save memory. The NONPOW2 flag is unset to
* avoid confusion in the shared surface code.
*
* Returns:
* WINED3D_OK on success
* The return values of called methods on failure
*
*****************************************************************************/
{
{
ERR("Overlays not yet supported by GDI surfaces.\n");
return WINED3DERR_INVALIDCALL;
}
/* Sysmem textures have memory already allocated - release it,
* this avoids an unnecessary memcpy. */
{
}
/* We don't mind the nonpow2 stuff in GDI. */
return WINED3D_OK;
}
{
if (!palette) return;
{
unsigned int i;
TRACE("Updating the DC's palette.\n");
for (i = 0; i < 256; ++i)
{
col[i].rgbReserved = 0;
}
}
/* Update the image because of the palette change. Some games like e.g.
* Red Alert call SetEntries a lot to implement fading. */
/* Tell the swapchain to update the screen. */
}
{
TRACE("surface %p, rect %s, flags %#x.\n",
{
/* This happens on gdi surfaces if the application set a user pointer
* and resets it. Recreate the DIB section. */
{
return;
}
}
}
{
/* Tell the swapchain to update the screen. */
}
static const struct wined3d_surface_ops gdi_surface_ops =
{
};
{
if(srgb)
{
}
else
{
}
{
/* FIXME: We shouldn't need to remove SFLAG_INTEXTURE if the
* surface has no texture name yet. See if we can get rid of this. */
{
#ifndef VBOX_WITH_WDDM
#endif
}
}
#ifdef VBOX_WITH_WDDM
if (VBOXSHRC_IS_SHARED(surface))
{
|| new_name == 0 /* on cleanup */);
}
#endif
}
{
{
if (target == GL_TEXTURE_RECTANGLE_ARB)
{
}
{
}
}
}
/* This call just downloads data, the caller is responsible for binding the
* correct texture. */
/* Context activation is done by the caller. */
static void surface_download_data(struct wined3d_surface *surface, const struct wined3d_gl_info *gl_info)
{
/* Only support read back of converted P8 surfaces. */
{
ERR("Trying to read back converted surface %p with format %s.\n", surface, debug_d3dformat(format->id));
return;
}
{
TRACE("(%p) : Calling glGetCompressedTexImageARB level %d, format %#x, type %#x, data %p.\n",
{
checkGLcall("glBindBufferARB");
checkGLcall("glGetCompressedTexImageARB");
checkGLcall("glBindBufferARB");
}
else
{
checkGLcall("glGetCompressedTexImageARB");
}
}
else
{
void *mem;
int src_pitch = 0;
int dst_pitch = 0;
/* In case of P8 the index is stored in the alpha component if the primary render target uses P8. */
{
}
{
}
else
{
}
TRACE("(%p) : Calling glGetTexImage level %d, format %#x, type %#x, data %p\n",
{
checkGLcall("glBindBufferARB");
checkGLcall("glGetTexImage");
checkGLcall("glBindBufferARB");
}
else
{
checkGLcall("glGetTexImage");
}
{
UINT y;
/*
* Some games (e.g. warhammer 40k) don't work properly with the odd pitches, preventing
* the surface pitch from being used to box non-power2 textures. Instead we have to use a hack to
* repack the texture so that the bpp * width pitch can be used instead of bpp * pow2width.
*
* We're doing this...
*
* instead of boxing the texture :
* |<-texture width ->| -->pow2width| /\
* |111111111111111111| | |
* |222 Texture 222222| boxed empty | texture height
* |3333 Data 33333333| | |
* |444444444444444444| | \/
* ----------------------------------- |
* | boxed empty | boxed empty | pow2height
* | | | \/
* -----------------------------------
*
*
* we're repacking the data to the expected texture width
*
* |<-texture width ->| -->pow2width| /\
* |111111111111111111222222222222222| |
* |222333333333333333333444444444444| texture height
* |444444 | |
* | | \/
* | | |
* | empty | pow2height
* | | \/
* -----------------------------------
*
* == is the same as
*
* |<-texture width ->| /\
* |111111111111111111|
* |222222222222222222|texture height
* |333333333333333333|
* |444444444444444444| \/
* --------------------
*
* this also means that any references to allocatedMemory should work with the data as if were a
* standard texture with a non-power2 width instead of texture boxed up to be a power2 texture.
*
* internally the texture is still stored in a boxed format so any references to textureName will
* get a boxed texture with width pow2width and not a texture of width resource.width.
*
* Performance should not be an issue, because applications normally do not lock the surfaces when
* rendering. If an app does, the SFLAG_DYNLOCK flag will kick in and the memory copy won't be released,
* and doesn't have to be re-read. */
TRACE("(%p) : Repacking the surface data from pitch %d to pitch %d\n", surface, src_pitch, dst_pitch);
{
/* skip the first row */
}
}
}
/* Surface has now been downloaded */
}
/* This call just uploads data, the caller is responsible for binding the
* correct texture. */
/* Context activation is done by the caller. */
static void surface_upload_data(struct wined3d_surface *surface, const struct wined3d_gl_info *gl_info,
{
TRACE("surface %p, gl_info %p, format %s, src_rect %s, src_pitch %u, dst_point %s, srgb %#x, data {%#x:%p}.\n",
{
WARN("Uploading a surface that is currently mapped, setting SFLAG_PIN_SYSMEM.\n");
}
{
}
if (data->buffer_object)
{
checkGLcall("glBindBufferARB");
}
{
if (srgb)
else
TRACE("glCompressedTexSubImage2DARB, target %#x, level %d, x %d, y %d, w %d, h %d, "
if (row_length == src_pitch)
{
}
else
{
/* glCompressedTexSubImage2DARB() ignores pixel store state, so we
* can't use the unpack row length like below. */
{
y += format->block_height;
}
}
checkGLcall("glCompressedTexSubImage2DARB");
}
else
{
TRACE("glTexSubImage2D, target %#x, level %d, x %d, y %d, w %d, h %d, format %#x, type %#x, addr %p.\n",
checkGLcall("glTexSubImage2D");
}
if (data->buffer_object)
{
checkGLcall("glBindBufferARB");
}
{
unsigned int i;
for (i = 0; i < device->context_count; ++i)
{
}
}
}
static HRESULT d3dfmt_get_conv(const struct wined3d_surface *surface, BOOL need_alpha_ck, BOOL use_texturing,
{
/* Copy the default values from the surface. Below we might perform fixups */
/* TODO: get rid of color keying desc fixups by using e.g. a table. */
/* Ok, now look if we have to do any conversion */
{
case WINED3DFMT_P8_UINT:
/* Below the call to blit_supported is disabled for Wine 1.2
* because the function isn't operating correctly yet. At the
* moment 8-bit blits are handled in software and if certain GL
* extensions are around, surface conversion is performed at
* upload time. The blit_supported call recognizes it as a
* destination fixup. This type of upload 'fixup' and 8-bit to
* 8-bit blits need to be handled by the blit_shader.
* TODO: get rid of this #if 0. */
#if 0
blit_supported = device->blitter->blit_supported(&device->adapter->gl_info, WINED3D_BLIT_OP_COLOR_BLIT,
#endif
blit_supported = gl_info->supported[EXT_PALETTED_TEXTURE] || gl_info->supported[ARB_FRAGMENT_PROGRAM];
/* Use conversion when the blit_shader backend supports it. It only supports this in case of
* texturing. Further also use conversion in case of color keying.
* Paletted textures can be emulated using shaders but only do that for 2D purposes e.g. situations
* in which the main render target uses p8. Some games like GTA Vice City use P8 for texturing which
* conflicts with this.
*/
|| colorkey_active || !use_texturing)
{
if (colorkey_active)
else
}
break;
case WINED3DFMT_B2G3R3_UNORM:
/* **********************
GL_UNSIGNED_BYTE_3_3_2
********************** */
if (colorkey_active) {
/* This texture format will never be used.. So do not care about color keying
up until the point in time it will be needed :-) */
FIXME(" ColorKeying not supported in the RGB 332 format !\n");
}
break;
case WINED3DFMT_B5G6R5_UNORM:
if (colorkey_active)
{
}
break;
if (colorkey_active)
{
}
break;
case WINED3DFMT_B8G8R8_UNORM:
if (colorkey_active)
{
}
break;
if (colorkey_active)
{
}
break;
if (colorkey_active)
{
}
break;
default:
break;
}
if (*conversion_type != WINED3D_CT_NONE)
{
}
return WINED3D_OK;
}
{
return TRUE;
/* This assumes power of two block sizes, but NPOT block sizes would be
* silly anyway. */
return TRUE;
return FALSE;
}
{
const struct wined3d_format *src_format;
const struct wined3d_format *dst_format;
const struct wined3d_gl_info *gl_info;
struct wined3d_context *context;
struct wined3d_bo_address data;
struct wined3d_format format;
POINT p;
TRACE("dst_surface %p, dst_point %s, src_surface %p, src_rect %s.\n",
{
WARN("Source and destination surfaces should have the same format.\n");
return WINED3DERR_INVALIDCALL;
}
if (!dst_point)
{
p.x = 0;
p.y = 0;
dst_point = &p;
}
{
WARN("Invalid destination point.\n");
return WINED3DERR_INVALIDCALL;
}
if (!src_rect)
{
r.left = 0;
r.top = 0;
src_rect = &r;
}
{
WARN("Invalid source rectangle.\n");
return WINED3DERR_INVALIDCALL;
}
{
WARN("Destination out of bounds.\n");
return WINED3DERR_INVALIDCALL;
}
if ((src_format->flags & WINED3DFMT_FLAG_BLOCKS) && !surface_check_block_align(src_surface, src_rect))
{
WARN("Source rectangle not block-aligned.\n");
return WINED3DERR_INVALIDCALL;
}
if ((dst_format->flags & WINED3DFMT_FLAG_BLOCKS) && !surface_check_block_align(dst_surface, &dst_rect))
{
WARN("Destination rectangle not block-aligned.\n");
return WINED3DERR_INVALIDCALL;
}
/* Use wined3d_surface_blt() instead of uploading directly if we need conversion. */
return wined3d_surface_blt(dst_surface, &dst_rect, src_surface, src_rect, 0, NULL, WINED3D_TEXF_POINT);
/* Only load the surface for partial updates. For newly allocated texture
* the texture wouldn't be the current location, and we'd upload zeroes
* just to overwrite them again. */
else
surface_upload_data(dst_surface, gl_info, src_format, src_rect, src_pitch, dst_point, FALSE, &data);
return WINED3D_OK;
}
/* This call just allocates the texture, the caller is responsible for binding
* the correct texture. */
/* Context activation is done by the caller. */
static void surface_allocate_surface(struct wined3d_surface *surface, const struct wined3d_gl_info *gl_info,
{
#ifdef VBOX_WITH_WDDM
{
ERR("trying to allocate shared openned resource!!, ignoring..\n");
return;
}
#endif
if (srgb)
{
}
{
}
else
{
}
if (!internal)
{
}
TRACE("(%p) : Creating surface (target %#x) level %d, d3d format %s, internal format %#x, width %d, height %d, gl format %#x, gl type=%#x\n",
{
{
/* In some cases we want to disable client storage.
* SFLAG_NONPOW2 has a bigger opengl texture than the client memory, and different pitches
* SFLAG_DIBSECTION: Dibsections may have read / write protections on the memory. Avoid issues...
* SFLAG_CONVERTED: The conversion destination memory is freed after loading the surface
* allocatedMemory == NULL: Not defined in the extension. Seems to disable client storage effectively
*/
checkGLcall("glPixelStorei(GL_UNPACK_CLIENT_STORAGE_APPLE, GL_FALSE)");
}
else
{
/* Point OpenGL to our allocated texture memory. Do not use
* resource.allocatedMemory here because it might point into a
* PBO. Instead use heapMemory, but get the alignment right. */
}
}
{
checkGLcall("glCompressedTexImage2DARB");
}
else
{
checkGLcall("glTexImage2D");
}
{
checkGLcall("glPixelStorei(GL_UNPACK_CLIENT_STORAGE_APPLE, GL_TRUE)");
}
}
/* In D3D the depth stencil dimensions have to be greater than or equal to the
* render target dimensions. With FBOs, the dimensions have to be an exact match. */
/* TODO: We should synchronize the renderbuffer's content with the texture's content. */
/* Context activation is done by the caller. */
void surface_set_compatible_renderbuffer(struct wined3d_surface *surface, const struct wined3d_surface *rt)
{
struct wined3d_renderbuffer_entry *entry;
GLuint renderbuffer = 0;
unsigned int src_width, src_height;
{
}
else
{
}
/* A depth stencil smaller than the render target is not valid */
/* Remove any renderbuffer set if the sizes match */
{
return;
}
/* Look if we've already got a renderbuffer of the correct dimensions */
{
{
break;
}
}
if (!renderbuffer)
{
}
checkGLcall("set_compatible_renderbuffer");
}
{
if (!swapchain)
{
return GL_NONE;
}
{
if (swapchain->render_to_fbo)
{
TRACE("Returning GL_COLOR_ATTACHMENT0\n");
return GL_COLOR_ATTACHMENT0;
}
TRACE("Returning GL_BACK\n");
return GL_BACK;
}
{
TRACE("Returning GL_FRONT\n");
return GL_FRONT;
}
FIXME("Higher back buffer, returning GL_BACK\n");
return GL_BACK;
}
/* Slightly inefficient way to handle multiple dirty rects but it works :) */
{
/* No partial locking for textures yet. */
if (dirty_rect)
{
}
else
{
}
/* if the container is a texture then mark it dirty. */
{
TRACE("Passing to container.\n");
}
}
{
{
ERR("Not supported on scratch surfaces.\n");
return WINED3DERR_INVALIDCALL;
}
/* Reload if either the texture and sysmem have different ideas about the
* color key, or the actual key values changed. */
|| surface->gl_color_key.color_space_high_value != surface->src_blt_color_key.color_space_high_value)))
{
TRACE("Reloading because of color keying\n");
/* To perform the color key conversion we need a sysmem copy of
* the surface. Make sure we have it. */
/* Make sure the texture is reloaded because of the color key change,
* this kills performance though :( */
/* TODO: This is not necessarily needed with hw palettized texture support. */
/* Switching color keying on / off may change the internal format. */
if (ck_changed)
}
{
TRACE("Reloading because surface is dirty.\n");
}
else
{
TRACE("surface is already in texture\n");
return WINED3D_OK;
}
/* No partial locking for textures yet. */
return WINED3D_OK;
}
/* See also float_16_to_32() in wined3d_private.h */
static inline unsigned short float_32_to_16(const float *in)
{
int exp = 0;
unsigned int mantissa;
unsigned short ret;
/* Deal with special numbers */
if (*in == 0.0f)
return 0x0000;
return 0x7c01;
{
do
{
exp--;
}
{
do
{
tmp /= 2.0f;
exp++;
}
++mantissa; /* Round to nearest, away from zero. */
{
}
else if (exp <= 0)
{
/* exp == 0: Non-normalized mantissa. Returns 0x0000 (=0.0) for too small numbers. */
while (exp <= 0)
{
++exp;
}
}
else
{
}
return ret;
}
{
TRACE("surface %p, swapchain %p, container %p.\n",
return refcount;
}
/* Do not call while under the GL lock. */
{
TRACE("surface %p, swapchain %p, container %p.\n",
if (!refcount)
{
}
return refcount;
}
{
}
{
}
{
{
ERR("D3D not initialized.\n");
return;
}
}
{
}
{
}
{
switch (flags)
{
case WINEDDGBS_CANBLT:
case WINEDDGBS_ISBLTDONE:
return WINED3D_OK;
default:
return WINED3DERR_INVALIDCALL;
}
}
{
/* XXX: DDERR_INVALIDSURFACETYPE */
switch (flags)
{
case WINEDDGFS_CANFLIP:
case WINEDDGFS_ISFLIPDONE:
return WINED3D_OK;
default:
return WINED3DERR_INVALIDCALL;
}
}
{
/* D3D8 and 9 loose full devices, ddraw only surfaces. */
}
{
return WINED3D_OK;
}
void CDECL wined3d_surface_set_palette(struct wined3d_surface *surface, struct wined3d_palette *palette)
{
{
TRACE("Nop palette change.\n");
return;
}
if (palette)
{
}
}
{
if (flags & WINEDDCKEY_COLORSPACE)
{
return WINED3DERR_INVALIDCALL;
}
/* Dirtify the surface, but only if a key was changed. */
if (color_key)
{
switch (flags & ~WINEDDCKEY_COLORSPACE)
{
case WINEDDCKEY_DESTBLT:
break;
case WINEDDCKEY_DESTOVERLAY:
break;
case WINEDDCKEY_SRCOVERLAY:
break;
case WINEDDCKEY_SRCBLT:
break;
}
}
else
{
switch (flags & ~WINEDDCKEY_COLORSPACE)
{
case WINEDDCKEY_DESTBLT:
break;
case WINEDDCKEY_DESTOVERLAY:
break;
case WINEDDCKEY_SRCOVERLAY:
break;
case WINEDDCKEY_SRCBLT:
break;
}
}
return WINED3D_OK;
}
{
}
{
{
/* Since compressed formats are block based, pitch means the amount of
* bytes to the next row of block rather than the next row of pixels. */
}
else
{
}
return pitch;
}
{
#ifndef VBOX_WITH_WDDM
{
WARN("Surface is mapped or the DC is in use.\n");
return WINED3DERR_INVALIDCALL;
}
/* Render targets depend on their hdc, and we can't create an hdc on a user pointer. */
{
ERR("Not supported on render targets.\n");
return WINED3DERR_INVALIDCALL;
}
{
/* Do I have to copy the old surface content? */
{
}
{
}
/* Now the surface memory is most up do date. Invalidate drawable and texture. */
/* For client textures OpenGL has to be notified. */
/* Now free the old memory if any. */
}
{
/* HeapMemory should be NULL already. */
ERR("User pointer surface has heap memory allocated.\n");
if (!mem)
{
}
}
return WINED3D_OK;
#else
ERR("unsupported!");
return E_FAIL;
#endif
}
{
LONG w, h;
{
WARN("Not an overlay surface.\n");
return WINEDDERR_NOTAOVERLAYSURFACE;
}
return WINED3D_OK;
}
HRESULT CDECL wined3d_surface_get_overlay_position(const struct wined3d_surface *surface, LONG *x, LONG *y)
{
{
TRACE("Not an overlay surface.\n");
return WINEDDERR_NOTAOVERLAYSURFACE;
}
if (!surface->overlay_dest)
{
TRACE("Overlay not visible.\n");
*x = 0;
*y = 0;
return WINEDDERR_OVERLAYNOTVISIBLE;
}
TRACE("Returning position %d, %d.\n", *x, *y);
return WINED3D_OK;
}
{
{
TRACE("Not an overlay surface.\n");
return WINEDDERR_NOTAOVERLAYSURFACE;
}
return WINED3D_OK;
}
{
TRACE("surface %p, src_rect %s, dst_surface %p, dst_rect %s, flags %#x, fx %p.\n",
{
WARN("Not an overlay surface.\n");
return WINEDDERR_NOTAOVERLAYSURFACE;
}
else if (!dst_surface)
{
WARN("Dest surface is NULL.\n");
return WINED3DERR_INVALIDCALL;
}
if (src_rect)
{
}
else
{
}
if (dst_rect)
{
}
else
{
}
{
}
if (flags & WINEDDOVER_SHOW)
{
{
}
}
else if (flags & WINEDDOVER_HIDE)
{
/* tests show that the rectangles are erased on hide */
}
return WINED3D_OK;
}
{
UINT resource_size = wined3d_format_calculate_size(format, device->surface_alignment, width, height);
TRACE("surface %p, width %u, height %u, format %s, multisample_type %#x, multisample_quality %u.\n",
if (!resource_size)
return WINED3DERR_INVALIDCALL;
if (device->d3d_initialized)
{
}
{
}
else
{
}
else
if (!surface_init_sysmem(surface))
return E_OUTOFMEMORY;
return WINED3D_OK;
}
{
unsigned short *dst_s;
const float *src_f;
unsigned int x, y;
for (y = 0; y < h; ++y)
{
for (x = 0; x < w; ++x)
{
}
}
}
{
static const unsigned char convert_5to8[] =
{
0x00, 0x08, 0x10, 0x19, 0x21, 0x29, 0x31, 0x3a,
0x42, 0x4a, 0x52, 0x5a, 0x63, 0x6b, 0x73, 0x7b,
0x84, 0x8c, 0x94, 0x9c, 0xa5, 0xad, 0xb5, 0xbd,
0xc5, 0xce, 0xd6, 0xde, 0xe6, 0xef, 0xf7, 0xff,
};
static const unsigned char convert_6to8[] =
{
0x00, 0x04, 0x08, 0x0c, 0x10, 0x14, 0x18, 0x1c,
0x20, 0x24, 0x28, 0x2d, 0x31, 0x35, 0x39, 0x3d,
0x41, 0x45, 0x49, 0x4d, 0x51, 0x55, 0x59, 0x5d,
0x61, 0x65, 0x69, 0x6d, 0x71, 0x75, 0x79, 0x7d,
0x82, 0x86, 0x8a, 0x8e, 0x92, 0x96, 0x9a, 0x9e,
0xa2, 0xa6, 0xaa, 0xae, 0xb2, 0xb6, 0xba, 0xbe,
0xc2, 0xc6, 0xca, 0xce, 0xd2, 0xd7, 0xdb, 0xdf,
0xe3, 0xe7, 0xeb, 0xef, 0xf3, 0xf7, 0xfb, 0xff,
};
unsigned int x, y;
for (y = 0; y < h; ++y)
{
for (x = 0; x < w; ++x)
{
dst_line[x] = 0xff000000
}
}
}
/* We use this for both B8G8R8A8 -> B8G8R8X8 and B8G8R8X8 -> B8G8R8A8, since
* in both cases we're just setting the X / Alpha channel to 0xff. */
{
unsigned int x, y;
for (y = 0; y < h; ++y)
{
for (x = 0; x < w; ++x)
{
}
}
}
static inline BYTE cliptobyte(int x)
{
}
{
unsigned int x, y;
for (y = 0; y < h; ++y)
{
for (x = 0; x < w; ++x)
{
/* YUV to RGB conversion formulas from http://en.wikipedia.org/wiki/YUV:
* C = Y - 16; D = U - 128; E = V - 128;
* R = cliptobyte((298 * C + 409 * E + 128) >> 8);
* G = cliptobyte((298 * C - 100 * D - 208 * E + 128) >> 8);
* B = cliptobyte((298 * C + 516 * D + 128) >> 8);
* Two adjacent YUY2 pixels are stored as four bytes: Y0 U Y1 V .
* U and V are shared between the pixels. */
if (!(x & 1)) /* For every even pixel, read new U and V. */
{
}
dst_line[x] = 0xff000000
/* Scale RGB values to 0..255 range,
* then clip them if still not in range (may be negative),
* then shift them within DWORD if necessary. */
src_line += 2;
}
}
}
{
unsigned int x, y;
for (y = 0; y < h; ++y)
{
for (x = 0; x < w; ++x)
{
/* YUV to RGB conversion formulas from http://en.wikipedia.org/wiki/YUV:
* C = Y - 16; D = U - 128; E = V - 128;
* R = cliptobyte((298 * C + 409 * E + 128) >> 8);
* G = cliptobyte((298 * C - 100 * D - 208 * E + 128) >> 8);
* B = cliptobyte((298 * C + 516 * D + 128) >> 8);
* Two adjacent YUY2 pixels are stored as four bytes: Y0 U Y1 V .
* U and V are shared between the pixels. */
if (!(x & 1)) /* For every even pixel, read new U and V. */
{
}
/* Scale RGB values to 0..255 range,
* then clip them if still not in range (may be negative),
* then shift them within DWORD if necessary. */
src_line += 2;
}
}
}
struct d3dfmt_converter_desc
{
void (*convert)(const BYTE *src, BYTE *dst, DWORD pitch_in, DWORD pitch_out, unsigned int w, unsigned int h);
};
static const struct d3dfmt_converter_desc converters[] =
{
};
enum wined3d_format_id to)
{
unsigned int i;
for (i = 0; i < (sizeof(converters) / sizeof(*converters)); ++i)
{
return &converters[i];
}
return NULL;
}
/*****************************************************************************
* surface_convert_format
*
* Creates a duplicate of a surface in a different format. Is used by Blt to
* blit between surfaces with different formats.
*
* Parameters
* source: Source surface
* fmt: Requested destination format
*
*****************************************************************************/
static struct wined3d_surface *surface_convert_format(struct wined3d_surface *source, enum wined3d_format_id to_fmt)
{
const struct d3dfmt_converter_desc *conv;
if (!conv)
{
FIXME("Cannot find a conversion function from format %s to %s.\n",
return NULL;
}
/* FIXME: Multisampled conversion? */
#ifdef VBOX_WITH_WDDM
if (FAILED(hr = wined3d_surface_create(source->resource.device, source->resource.width, source->resource.height,
)))
#else
if (FAILED(hr = wined3d_surface_create(source->resource.device, source->resource.width, source->resource.height,
#endif
{
ERR("Failed to create a destination surface for conversion.\n");
return NULL;
}
{
ERR("Failed to lock the source surface.\n");
return NULL;
}
{
ERR("Failed to lock the destination surface.\n");
return NULL;
}
return ret;
}
{
unsigned int x, y;
/* Do first row */
#define COLORFILL_ROW(type) \
do { \
for (x = 0; x < width; ++x) \
} while(0)
switch (bpp)
{
case 1:
break;
case 2:
break;
case 3:
{
for (x = 0; x < width; ++x, d += 3)
{
d[0] = (color ) & 0xff;
}
break;
}
case 4:
break;
default:
return WINED3DERR_NOTAVAILABLE;
}
/* Now copy first row. */
for (y = 1; y < height; ++y)
{
}
return WINED3D_OK;
}
{
return surface_from_resource(resource);
}
{
{
WARN("Trying to unmap unmapped surface.\n");
return WINEDDERR_NOTLOCKED;
}
#ifdef VBOX_WITH_WDDM
#endif
return WINED3D_OK;
}
{
TRACE("surface %p, map_desc %p, rect %s, flags %#x.\n",
{
WARN("Surface is already mapped.\n");
return WINED3DERR_INVALIDCALL;
}
{
WARN("Map rect %s is misaligned for %ux%u blocks.\n",
return WINED3DERR_INVALIDCALL;
}
#ifdef VBOX_WITH_WDDM
#endif
WARN("Trying to lock unlockable surface.\n");
/* Performance optimization: Count how often a surface is mapped, if it is
* mapped regularly do not throw away the system memory copy. This avoids
* the need to download the surface from OpenGL all the time. The surface
* is still downloaded if the OpenGL texture is changed. */
{
{
TRACE("Surface is mapped regularly, not freeing the system memory copy any more.\n");
}
}
else
map_desc->slice_pitch = 0;
if (!rect)
{
}
else
{
if ((format->flags & (WINED3DFMT_FLAG_BLOCKS | WINED3DFMT_FLAG_BROKEN_PITCH)) == WINED3DFMT_FLAG_BLOCKS)
{
/* Compressed textures are block based, so calculate the offset of
* the block that contains the top-left pixel of the locked rectangle. */
}
else
{
}
}
return WINED3D_OK;
}
{
struct wined3d_map_desc map;
{
ERR("Not supported on surfaces with application-provided memory.\n");
return WINEDDERR_NODC;
}
/* Give more detailed info for ddraw. */
return WINEDDERR_DCALREADYCREATED;
/* Can't GetDC if the surface is locked. */
return WINED3DERR_INVALIDCALL;
/* Create a DIB section if there isn't a dc yet. */
{
{
}
return WINED3DERR_INVALIDCALL;
/* Use the DIB section from now on if we are not using a PBO. */
{
}
}
/* Map the surface. */
{
return hr;
}
/* Sync the DIB with the PBO. This can't be done earlier because Map()
* activates the allocatedMemory. */
#ifdef VBOX_WITH_WINE_FIX_PBOPSM
#else
#endif
{
/* GetDC on palettized formats is unsupported in D3D9, and the method
* is missing in D3D8, so this should only be used for DX <=7
* surfaces (with non-device palettes). */
{
}
else
{
}
if (pal)
{
unsigned int i;
for (i = 0; i < 256; ++i)
{
col[i].rgbReserved = 0;
}
}
}
return WINED3D_OK;
}
{
return WINEDDERR_NODC;
{
WARN("Application tries to release invalid DC %p, surface DC is %p.\n",
return WINEDDERR_NODC;
}
/* Copy the contents of the DIB over to the PBO. */
#ifdef VBOX_WITH_WINE_FIX_PBOPSM
if (((surface->flags & (SFLAG_PBO | SFLAG_PIN_SYSMEM)) == (SFLAG_PBO | SFLAG_PIN_SYSMEM)) && surface->resource.allocatedMemory)
#else
#endif
/* We locked first, so unlock now. */
return WINED3D_OK;
}
HRESULT CDECL wined3d_surface_flip(struct wined3d_surface *surface, struct wined3d_surface *override, DWORD flags)
{
if (flags)
{
if (!once++)
else
}
{
ERR("Not supported on swapchain surfaces.\n");
return WINEDDERR_NOTFLIPPABLE;
}
/* Flipping is only supported on render targets and overlays. */
{
WARN("Tried to flip a non-render target, non-overlay surface.\n");
return WINEDDERR_NOTFLIPPABLE;
}
/* Update overlays if they're visible. */
return surface_draw_overlay(surface);
return WINED3D_OK;
}
/* Do not call while under the GL lock. */
{
{
}
else
{
struct wined3d_context *context;
/* TODO: Use already acquired context when possible. */
#ifndef VBOX
{
/* Tell opengl to try and keep this texture in video ram (well mostly) */
tmp = 0.9f;
}
#else
/* chromium code on host fails to resolve texture name to texture obj,
* most likely because the texture does not get created until it is bound
* @todo: investigate */
#endif
}
}
/* Read the framebuffer back into the surface */
static void read_from_framebuffer(struct wined3d_surface *surface, const RECT *rect, void *dest, UINT pitch)
{
const struct wined3d_gl_info *gl_info;
struct wined3d_context *context;
int i;
/* Select the correct read buffer, and give some debug output.
* There is no need to keep track of the current read buffer or reset it, every part of the code
* that reads sets the read buffer as desired.
*/
if (surface_is_offscreen(surface))
{
/* Mapping the primary render target which is not on a swapchain.
* Read from the back buffer. */
TRACE("Mapping offscreen render target.\n");
}
else
{
/* Onscreen surfaces are always part of a swapchain */
checkGLcall("glReadBuffer");
}
/* TODO: Get rid of the extra rectangle comparison and construction of a full surface rectangle */
if (!rect)
{
local_rect.left = 0;
local_rect.top = 0;
}
else
{
local_rect = *rect;
}
/* TODO: Get rid of the extra GetPitch call, LockRect does that too. Cache the pitch */
{
case WINED3DFMT_P8_UINT:
{
{
/* In case of P8 render targets the index is stored in the alpha component */
}
else
{
/* GL can't return palettized data, so read ARGB pixels into a
* separate block of memory and convert them into palettized format
* in software. Slow, but if the app means to use palettized render
* targets and locks it...
*
* Use GL_RGB, GL_UNSIGNED_BYTE to read the surface for performance reasons
* Don't use GL_BGR as in the WINED3DFMT_R8G8B8 case, instead watch out
* for the color channels when palettizing the colors.
*/
pitch *= 3;
if (!mem)
{
ERR("Out of memory\n");
return;
}
}
}
break;
default:
}
{
checkGLcall("glBindBufferARB");
if (mem)
{
ERR("mem not null for pbo -- unexpected\n");
}
}
/* Save old pixel store pack state */
checkGLcall("glGetIntegerv");
checkGLcall("glGetIntegerv");
checkGLcall("glGetIntegerv");
/* Setup pixel store pack state -- to glReadPixels into the correct place */
checkGLcall("glPixelStorei");
checkGLcall("glPixelStorei");
checkGLcall("glPixelStorei");
checkGLcall("glReadPixels");
/* Reset previous pixel store pack state */
checkGLcall("glPixelStorei");
checkGLcall("glPixelStorei");
checkGLcall("glPixelStorei");
{
checkGLcall("glBindBufferARB");
/* Check if we need to flip the image. If we need to flip use glMapBufferARB
* to get a pointer to it and perform the flipping in software. This is a lot
* faster than calling glReadPixels for each line. In case we want more speed
* we should rerender it flipped in a FBO and read the data back from the FBO. */
if (!srcIsUpsideDown)
{
checkGLcall("glBindBufferARB");
checkGLcall("glMapBufferARB");
}
}
/* TODO: Merge this with the palettization loop below for P8 targets */
if(!srcIsUpsideDown) {
/* glReadPixels returns the image upside down, and there is no way to prevent this.
Flip the lines in software */
if(!row) {
ERR("Out of memory\n");
return;
}
}
/* Unmap the temp PBO buffer */
{
}
}
/* For P8 textures we need to perform an inverse palette lookup. This is
* done by searching for a palette index which matches the RGB value.
* Note this isn't guaranteed to work when there are multiple entries for
* the same color but we have no choice. In case of P8 render targets,
* the index is stored in the alpha component so no conversion is needed. */
{
int x, y, c;
{
}
else
{
ERR("Palette is missing, cannot perform inverse palette lookup\n");
return;
}
/* start lines pixels */
for(c = 0; c < 256; c++) {
{
break;
}
}
}
}
}
}
/* Read the framebuffer contents into a texture. Note that this function
* doesn't do any kind of flipping. Using this on an onscreen surface will
* result in a flipped D3D texture. */
{
const struct wined3d_gl_info *gl_info;
struct wined3d_context *context;
if (surface_is_offscreen(surface))
else
checkGLcall("glReadBuffer");
checkGLcall("glCopyTexSubImage2D");
}
/* Context activation is done by the caller. */
{
struct wined3d_format format;
}
/* Context activation is done by the caller. */
void surface_prepare_texture(struct wined3d_surface *surface, struct wined3d_context *context, BOOL srgb)
{
{
UINT i;
for (i = 0; i < sub_count; ++i)
{
}
return;
}
}
void surface_prepare_rb(struct wined3d_surface *surface, const struct wined3d_gl_info *gl_info, BOOL multisample)
{
if (multisample)
{
if (surface->rb_multisample)
return;
gl_info->fbo_ops.glRenderbufferStorageMultisample(GL_RENDERBUFFER, surface->resource.multisample_type,
}
else
{
if (surface->rb_resolved)
return;
}
}
{
const struct wined3d_gl_info *gl_info;
struct wined3d_context *context;
UINT w, h;
/* Activate the correct context for the render target */
if (!surface_is_offscreen(surface))
{
#ifndef VBOX_WINE_WITH_SINGLE_CONTEXT
#else
#endif
}
else
{
/* Primary offscreen render target */
TRACE("Offscreen render target.\n");
}
checkGLcall("glRasterPos3i");
/* If not fullscreen, we need to skip a number of bytes to find the next row of data */
{
checkGLcall("glBindBufferARB");
}
checkGLcall("glDrawPixels");
{
checkGLcall("glBindBufferARB");
}
checkGLcall("glPixelStorei(GL_UNPACK_ROW_LENGTH, 0)");
}
{
/* FIXME: Is this really how color keys are supposed to work? I think it
* makes more sense to compare the individual channels. */
}
void d3dfmt_p8_init_palette(const struct wined3d_surface *surface, BYTE table[256][4], BOOL colorkey)
{
unsigned int i;
/* Old games like StarCraft, C&C, Red Alert and others use P8 render targets.
* Reading back the RGB output each lockrect (each frame as they lock the whole screen)
* is slow. Further RGB->P8 conversion is not possible because palettes can have
* duplicate entries. Store the color key in the unused alpha component to speed the
* download up and to make conversion unneeded. */
if (!pal)
{
FIXME("No palette set.\n");
if (index_in_alpha)
{
/* Guarantees that memory representation remains correct after sysmem<->texture transfers even if
* there's no palette at this time. */
}
}
else
{
/* Get the surface's palette */
for (i = 0; i < 256; ++i)
{
/* When index_in_alpha is set the palette index is stored in the
* alpha component. In case of a readback we can then read
* GL_ALPHA. Color keying is handled in BltOverride using a
* GL_ALPHA_TEST using GL_NOT_EQUAL. In case of index_in_alpha the
* color key itself is passed to glAlphaFunc in other cases the
* alpha component of pixels that should be masked away is set to 0. */
if (index_in_alpha)
table[i][3] = i;
else
}
}
}
static HRESULT d3dfmt_convert_surface(const BYTE *src, BYTE *dst, UINT pitch, UINT width, UINT height,
{
TRACE("src %p, dst %p, pitch %u, width %u, height %u, outpitch %u, conversion_type %#x, surface %p.\n",
switch (conversion_type)
{
case WINED3D_CT_NONE:
{
break;
}
case WINED3D_CT_PALETTED:
case WINED3D_CT_PALETTED_CK:
{
unsigned int x, y;
for (y = 0; y < height; y++)
{
/* This is an 1 bpp format, using the width here is fine */
for (x = 0; x < width; x++) {
}
}
}
break;
case WINED3D_CT_CK_565:
{
/* Converting the 565 format in 5551 packed to emulate color-keying.
Note : in all these conversion, it would be best to average the averaging
pixels to get the color of the pixel that will be color-keyed to
prevent 'color bleeding'. This will be done later on if ever it is
too visible.
Note2: Nvidia documents say that their driver does not support alpha + color keying
on the same surface and disables color keying in such a case
*/
unsigned int x, y;
TRACE("Color keyed 565\n");
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++ ) {
*Dest |= 0x0001;
Dest++;
}
}
}
break;
case WINED3D_CT_CK_5551:
{
/* Converting X1R5G5B5 format to R5G5B5A1 to emulate color-keying. */
unsigned int x, y;
TRACE("Color keyed 5551\n");
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++ ) {
else
Dest++;
}
}
}
break;
case WINED3D_CT_CK_RGB24:
{
/* Converting R8G8B8 format to R8G8B8A8 with color-keying. */
unsigned int x, y;
for (y = 0; y < height; y++)
{
for (x = 0; x < width; x++) {
dstcolor |= 0xff;
source += 3;
dest += 4;
}
}
}
break;
case WINED3D_CT_RGB32_888:
{
/* Converting X8R8G8B8 format to R8G8B8A8 with color-keying. */
unsigned int x, y;
for (y = 0; y < height; y++)
{
for (x = 0; x < width; x++) {
dstcolor |= 0xff;
source += 4;
dest += 4;
}
}
}
break;
case WINED3D_CT_CK_ARGB32:
{
unsigned int x, y;
for (y = 0; y < height; ++y)
{
for (x = 0; x < width; ++x)
{
color &= ~0xff000000;
source += 4;
dest += 4;
}
}
}
break;
default:
}
return WINED3D_OK;
}
{
/* Flip the surface contents */
/* Flip the DC */
{
}
/* Flip the DIBsection */
{
}
/* Flip the surface data */
{
void* tmp;
}
/* Flip the PBO */
{
}
/* Flip the opengl texture */
{
}
{
}
}
/* Does a direct frame buffer -> texture copy. Stretching is done with single
* pixel copy calls. */
static void fb_copy_to_texture_direct(struct wined3d_surface *dst_surface, struct wined3d_surface *src_surface,
{
const struct wined3d_gl_info *gl_info;
struct wined3d_context *context;
if (dst_surface->container)
else
/* Make sure that the top pixel is always above the bottom pixel, and keep a separate upside down flag
* glCopyTexSubImage is a bit picky about the parameters we pass to it
*/
upsidedown = TRUE;
}
/* Bind the target texture */
{
TRACE("Reading from an offscreen target\n");
upsidedown = !upsidedown;
}
else
{
}
checkGLcall("glReadBuffer");
{
FIXME("Doing a pixel by pixel copy from the framebuffer to a texture, expect major performance issues\n");
ERR("Texture filtering not supported in direct blit.\n");
}
{
ERR("Texture filtering not supported in direct blit\n");
}
if (upsidedown
{
/* Upside down copy without stretching is nice, one glCopyTexSubImage call will do. */
}
else
{
/* I have to process this row by row to swap the image,
* otherwise it would be upside down, so stretching in y direction
* doesn't cost extra time
*
* However, stretching in x direction can be avoided if not necessary
*/
{
/* Well, that stuff works, but it's very slow.
* find a better way instead
*/
{
}
}
else
{
}
}
}
checkGLcall("glCopyTexSubImage2D");
/* The texture is now most up to date - If the surface is a render target and has a drawable, this
* path is never entered
*/
}
/* Uses the hardware to stretch and flip the image */
static void fb_copy_to_texture_hwstretch(struct wined3d_surface *dst_surface, struct wined3d_surface *src_surface,
{
const struct wined3d_gl_info *gl_info;
struct wined3d_context *context;
TRACE("Using hwstretch blit\n");
/* Activate the Proper context for reading from the source surface, set it up for blitting */
{
/* Get it a description */
}
/* Try to use an aux buffer for drawing the rectangle. This way it doesn't need restoring.
* This way we don't have to wait for the 2nd readback to finish to leave this function.
*/
{
/* Got more than one aux buffer? Use the 2nd aux buffer */
}
{
/* Only one aux buffer, but it isn't used (Onscreen rendering, or non-aux orm)? Use it! */
}
if (noBackBufferBackup)
{
checkGLcall("glGenTextures");
}
else
{
/* Backup the back buffer and copy the source buffer into a texture to draw an upside down stretched quad. If
* we are reading from the back buffer, the backup can be used as source texture
*/
checkGLcall("glEnable(texture_target)");
/* For now invalidate the texture copy of the back buffer. Drawable and sysmem copy are untouched */
}
/* Make sure that the top pixel is always above the bottom pixel, and keep a separate upside down flag
* glCopyTexSubImage is a bit picky about the parameters we pass to it
*/
upsidedown = TRUE;
}
if (src_offscreen)
{
TRACE("Reading from an offscreen target\n");
upsidedown = !upsidedown;
}
else
{
}
/* TODO: Only back up the part that will be overwritten */
checkGLcall("glCopyTexSubImage2D");
/* No issue with overriding these - the sampler is dirty due to blit usage */
checkGLcall("glTexParameteri");
checkGLcall("glTexParameteri");
{
}
else
{
checkGLcall("glReadBuffer(GL_FRONT)");
checkGLcall("glGenTextures(1, &src)");
/* TODO: Only copy the part that will be read. Use src_rect->left, src_rect->bottom as origin, but with the width watch
* out for power of 2 sizes
*/
checkGLcall("glTexImage2D");
checkGLcall("glTexParameteri");
checkGLcall("glTexParameteri");
checkGLcall("glReadBuffer(GL_BACK)");
if (texture_target != GL_TEXTURE_2D)
{
}
}
checkGLcall("glEnd and previous");
if (!upsidedown)
{
}
else
{
}
{
}
/* draw the source texture stretched and upside down. The correct surface is bound already */
/* bottom left */
/* top left */
/* top right */
/* bottom right */
checkGLcall("glEnd and previous");
{
}
/* Now read the stretched and upside down image into the destination texture */
0,
0, 0, /* We blitted the image to the origin */
checkGLcall("glCopyTexSubImage2D");
if (drawBuffer == GL_BACK)
{
/* Write the back buffer backup back. */
if (backup)
{
if (texture_target != GL_TEXTURE_2D)
{
}
}
else
{
{
}
}
/* top left */
/* bottom left */
/* bottom right */
/* top right */
}
checkGLcall("glDisable(texture_target)");
/* Cleanup */
{
checkGLcall("glDeleteTextures(1, &src)");
}
if (backup)
{
checkGLcall("glDeleteTextures(1, &backup)");
}
/* The texture is now most up to date - If the surface is a render target and has a drawable, this
* path is never entered
*/
}
/* Front buffer coordinates are always full screen coordinates, but our GL
* drawable is limited to the window's client area. The sysmem and texture
* copies do have the full screen size. Note that GL has a bottom-left
* origin, while D3D has a top-left origin. */
void surface_translate_drawable_coords(const struct wined3d_surface *surface, HWND window, RECT *rect)
{
{
#ifndef VBOX_WITH_WINE_FIXES
#else
# ifdef VBOX_WINE_STRICT
ERR("should not be here!");
# else
WARN("should not be here!");
# endif
#endif
}
else
{
}
}
{
const struct wined3d_gl_info *gl_info;
struct wined3d_context *context;
src_rect = *src_rect_in;
dst_rect = *dst_rect_in;
/* Make sure the surface is up-to-date. This should probably use
* surface_load_location() and worry about the destination surface too,
* unless we're overwriting it completely. */
/* Activate the destination context, set it up for blitting */
if (!surface_is_offscreen(dst_surface))
#ifndef VBOX_WINE_WITH_SINGLE_CONTEXT
#else
#endif
if (color_key)
{
checkGLcall("glEnable(GL_ALPHA_TEST)");
/* When the primary render target uses P8, the alpha component
* contains the palette index. Which means that the colorkey is one of
* the palette entries. In other cases pixels that should be masked
* away have alpha set to 0. */
else
checkGLcall("glAlphaFunc");
}
else
{
checkGLcall("glDisable(GL_ALPHA_TEST)");
}
if (color_key)
{
checkGLcall("glDisable(GL_ALPHA_TEST)");
}
/* Leave the opengl state valid for blitting */
}
/* Do not call while under the GL lock. */
HRESULT surface_color_fill(struct wined3d_surface *s, const RECT *rect, const struct wined3d_color *color)
{
const struct blit_shader *blitter;
if (!blitter)
{
FIXME("No blitter is capable of performing the requested color fill operation.\n");
return WINED3DERR_INVALIDCALL;
}
}
/* Do not call while under the GL lock. */
static HRESULT IWineD3DSurfaceImpl_BltOverride(struct wined3d_surface *dst_surface, const RECT *dst_rect,
{
TRACE("dst_surface %p, dst_rect %s, src_surface %p, src_rect %s, flags %#x, blt_fx %p, filter %s.\n",
/* Get the swapchain. One of the surfaces has to be a primary surface */
{
WARN("Destination is in sysmem, rejecting gl blt\n");
return WINED3DERR_INVALIDCALL;
}
if (src_surface)
{
{
WARN("Src is in sysmem, rejecting gl blt\n");
return WINED3DERR_INVALIDCALL;
}
}
else
{
}
/* Early sort out of cases where no render target is used */
if (!dst_swapchain && !src_swapchain
{
TRACE("No surface is render target, not using hardware blit.\n");
return WINED3DERR_INVALIDCALL;
}
/* No destination color keying supported */
{
/* Can we support that with glBlendFunc if blitting to the frame buffer? */
TRACE("Destination color key not supported in accelerated Blit, falling back to software\n");
return WINED3DERR_INVALIDCALL;
}
{
FIXME("Implement hardware blit between two surfaces on the same swapchain\n");
return WINED3DERR_INVALIDCALL;
}
if (dst_swapchain && src_swapchain)
{
FIXME("Implement hardware blit between two different swapchains\n");
return WINED3DERR_INVALIDCALL;
}
if (dst_swapchain)
{
/* Handled with regular texture -> swapchain blit */
TRACE("Blit from active render target to a swapchain\n");
}
{
FIXME("Implement blit from a swapchain to the active render target\n");
return WINED3DERR_INVALIDCALL;
}
{
/* Blit from render target to texture */
/* P8 read back is not implemented */
{
TRACE("P8 read back not supported by frame buffer to texture blit\n");
return WINED3DERR_INVALIDCALL;
}
{
TRACE("Color keying not supported by frame buffer to texture blit\n");
return WINED3DERR_INVALIDCALL;
/* Destination color key is checked above */
}
else
/* Blt is a pretty powerful call, while glCopyTexSubImage2D is not. glCopyTexSubImage cannot
* flip the image nor scale it.
*
* -> If the app asks for a unscaled, upside down copy, just perform one glCopyTexSubImage2D call
* -> If the app wants a image width an unscaled width, copy it line per line
* -> If the app wants a image that is scaled on the x axis, and the destination rectangle is smaller
* than the frame buffer, draw an upside down scaled image onto the fb, read it back and restore the
* back buffer. This is slower than reading line per line, thus not used for flipping
* -> If the app wants a scaled image with a dest rect that is bigger than the fb, it has to be copied
* pixel by pixel. */
{
TRACE("No stretching in x direction, using direct framebuffer -> texture copy.\n");
}
else
{
TRACE("Using hardware stretching to flip / stretch the texture.\n");
}
{
}
else
{
}
return WINED3D_OK;
}
else if (src_surface)
{
/* Blit from offscreen surface to render target */
{
FIXME("Unsupported blit operation falling back to software\n");
return WINED3DERR_INVALIDCALL;
}
/* Color keying: Check if we have to do a color keyed blt,
* and if not check if a color key is activated.
*
* Just modify the color keying parameters in the surface and restore them afterwards
* The surface keeps track of the color key last used to load the opengl surface.
* PreLoad will catch the change to the flags and color key and reload if necessary.
*/
if (flags & WINEDDBLT_KEYSRC)
{
/* Use color key from surface */
}
else if (flags & WINEDDBLT_KEYSRCOVERRIDE)
{
/* Use color key from DDBltFx */
}
else
{
/* Do not use color key */
}
/* Restore the color key parameters */
return WINED3D_OK;
}
/* Default: Fall back to the generic blt. Not an error, a TRACE is enough */
TRACE("Didn't find any usable render target setup for hw blit, falling back to software\n");
return WINED3DERR_INVALIDCALL;
}
/* Context activation is done by the caller. */
static void surface_depth_blt(const struct wined3d_surface *surface, struct wined3d_context *context,
{
GLint old_binding = 0;
gl_info->gl_ops.gl.p_glPushAttrib(GL_ENABLE_BIT | GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT | GL_VIEWPORT_BIT);
{
gl_info->gl_ops.gl.p_glGetTexParameteriv(info.bind_target, GL_TEXTURE_COMPARE_MODE_ARB, &compare_mode);
if (compare_mode != GL_NONE)
}
if (compare_mode != GL_NONE)
}
{
{
{
TRACE("Passing to container.\n");
}
}
}
/* Context activation is done by the caller. */
void surface_load_ds_location(struct wined3d_surface *surface, struct wined3d_context *context, DWORD location)
{
GLsizei w, h;
/* TODO: Make this work for modes other than FBO */
{
}
else
{
}
{
return;
}
if (surface->current_renderbuffer)
{
FIXME("Not supported with fixed up depth stencil.\n");
return;
}
{
TRACE("Surface was discarded, no need copy data.\n");
switch (location)
{
case SFLAG_INTEXTURE:
break;
case SFLAG_INRB_MULTISAMPLE:
break;
case SFLAG_INDRAWABLE:
/* Nothing to do */
break;
default:
}
return;
}
{
FIXME("No up to date depth stencil location.\n");
return;
}
if (location == SFLAG_INTEXTURE)
{
GLint old_binding = 0;
* than the offscreen surface. Don't overwrite the offscreen surface
* with undefined data. */
TRACE("Copying onscreen depth buffer to depth texture.\n");
if (!device->depth_blt_texture)
/* Note that we use depth_blt here as well, rather than glCopyTexImage2D
* directly on the FBO texture. That's because we need to flip. */
{
}
else
{
}
/* We use GL_DEPTH_COMPONENT instead of the surface's specific
* internal format, because the internal format might include stencil
* data. In principle we should copy stencil data as well, but unless
* the driver supports stencil export it's hard to do, and doesn't
* seem to be needed in practice. If the hardware doesn't support
* writing stencil data, the glCopyTexImage2D() call might trigger
* software fallbacks. */
/* Do the actual blit */
checkGLcall("depth_blt");
}
else if (location == SFLAG_INDRAWABLE)
{
TRACE("Copying depth texture to onscreen depth buffer.\n");
checkGLcall("depth_blt");
}
else
{
}
}
{
struct wined3d_surface *overlay;
TRACE("surface %p, location %s, persistent %#x.\n",
&& (location & SFLAG_INDRAWABLE))
ERR("Trying to invalidate the SFLAG_INDRAWABLE location of an offscreen surface.\n");
if (persistent)
{
{
{
TRACE("Passing to container.\n");
}
}
#ifdef VBOX_WITH_WDDM
{
/* sometimes wine can call ModifyLocation(SFLAG_INTEXTURE, TRUE) for surfaces that do not yet have
* ogl texture backend assigned, e.g. when doing ColorFill right after surface creation
* to prevent wine state breakage that could occur later on in that case, we check
* whether tex gen is needed here and generate it accordingly */
if (!surface->texture_name)
{
if (location & SFLAG_INTEXTURE)
{
Assert(0);
// struct wined3d_context *context = NULL;
// IWineD3DDeviceImpl *device = This->resource.device;
// const struct wined3d_gl_info *gl_info;
//
// if (!device->isInDraw) context = context_acquire(device, NULL, CTXUSAGE_RESOURCELOAD);
// gl_info = context->gl_info;
//
// surface_prepare_texture(This, gl_info, FALSE);
//
// if (context) context_release(context);
}
}
if (!surface->texture_name_srgb)
{
if (location & SFLAG_INSRGBTEX)
{
Assert(0);
// struct wined3d_context *context = NULL;
// IWineD3DDeviceImpl *device = This->resource.device;
// const struct wined3d_gl_info *gl_info;
//
// if (!device->isInDraw) context = context_acquire(device, NULL, CTXUSAGE_RESOURCELOAD);
// gl_info = context->gl_info;
//
// surface_prepare_texture(This, gl_info, TRUE);
//
// if (context) context_release(context);
}
}
}
#endif
/* Redraw emulated overlays, if any */
{
{
}
}
}
else
{
if ((surface->flags & (SFLAG_INTEXTURE | SFLAG_INSRGBTEX)) && (location & (SFLAG_INTEXTURE | SFLAG_INSRGBTEX)))
{
{
TRACE("Passing to container\n");
}
}
}
#ifdef VBOX_WITH_WDDM
/* with the shared resource only texture can be considered valid
* to make sure changes done to the resource in the other device context are visible
* because the resource contents is shared via texture.
* This is why we ensure texture location is the one and only which is always valid */
} else {
}
}
{
} else {
}
}
#endif
{
}
}
{
switch (location)
{
case SFLAG_INSYSMEM:
return WINED3D_RESOURCE_ACCESS_CPU;
case SFLAG_INDRAWABLE:
case SFLAG_INSRGBTEX:
case SFLAG_INTEXTURE:
case SFLAG_INRB_MULTISAMPLE:
case SFLAG_INRB_RESOLVED:
return WINED3D_RESOURCE_ACCESS_GPU;
default:
return 0;
}
}
{
/* Download the surface to system memory. */
{
struct wined3d_context *context;
/* TODO: Use already acquired context when possible. */
return;
}
{
return;
}
FIXME("Can't load surface %p with location flags %#x into sysmem.\n",
}
{
struct wined3d_format format;
{
ERR("Trying to load offscreen surface into SFLAG_INDRAWABLE.\n");
return WINED3DERR_INVALIDCALL;
}
{
RECT r;
return WINED3D_OK;
}
{
/* This needs colorspace conversion from sRGB to RGB. We take the slow
* path through sysmem. */
}
/* Don't use PBOs for converted surfaces. During PBO conversion we look at
* SFLAG_CONVERTED but it isn't set (yet) in all cases where it is getting
* called. */
{
struct wined3d_context *context;
/* TODO: Use already acquired context when possible. */
}
{
/* Stick to the alignment for the converted surface too, makes it
* easier to load the surface. */
{
return E_OUTOFMEMORY;
}
}
else
{
}
/* Don't delete PBO memory. */
return WINED3D_OK;
}
{
struct wined3d_context *context;
struct wined3d_bo_address data;
struct wined3d_format format;
{
return WINED3D_OK;
}
{
if (srgb)
else
return WINED3D_OK;
}
{
DWORD src_location = surface->flags & SFLAG_INRB_RESOLVED ? SFLAG_INRB_RESOLVED : SFLAG_INRB_MULTISAMPLE;
return WINED3D_OK;
}
/* Upload from system memory */
if (srgb)
{
{
/* Performance warning... */
}
}
else
{
{
/* Performance warning... */
}
}
{
WARN("Trying to load a texture from sysmem, but SFLAG_INSYSMEM is not set.\n");
/* Lets hope we get it from somewhere... */
}
/* TODO: Use already acquired context when possible. */
{
}
/* Don't use PBOs for converted surfaces. During PBO conversion we look at
* SFLAG_CONVERTED but it isn't set (yet) in all cases it is getting
* called. */
{
}
{
/* This code is entered for texture formats which need a fixup. */
/* Stick to the alignment for the converted surface too, makes it easier to load the surface */
{
return E_OUTOFMEMORY;
}
}
{
/* This code is only entered for color keying fixups */
/* Stick to the alignment for the converted surface too, makes it easier to load the surface */
{
return E_OUTOFMEMORY;
}
}
else
{
}
/* Don't delete PBO memory. */
return WINED3D_OK;
}
{
ERR("Trying to resolve multisampled surface %p, but location SFLAG_INRB_MULTISAMPLE not current.\n", surface);
}
{
TRACE("surface %p, location %s, rect %s.\n", surface, debug_surflocation(location), wine_dbgstr_rect(rect));
{
{
#ifndef VBOX_WITH_WDDM
return WINED3D_OK;
#else
goto post_process;
#endif
}
{
/* Already up to date, nothing to do. */
#ifndef VBOX_WITH_WDDM
return WINED3D_OK;
#else
goto post_process;
#endif
}
else
{
return WINED3DERR_INVALIDCALL;
}
}
{
TRACE("Location already up to date.\n");
#ifndef VBOX_WITH_WDDM
return WINED3D_OK;
#else
goto post_process;
#endif
}
if (WARN_ON(d3d_surface))
{
WARN("Operation requires %#x access, but surface only has %#x.\n",
}
{
return WINED3DERR_DEVICELOST;
}
switch (location)
{
case SFLAG_INSYSMEM:
break;
case SFLAG_INDRAWABLE:
return hr;
break;
case SFLAG_INRB_RESOLVED:
break;
case SFLAG_INTEXTURE:
case SFLAG_INSRGBTEX:
return hr;
break;
default:
break;
}
#ifdef VBOX_WITH_WDDM
{
/* with the shared resource only texture can be considered valid
* to make sure changes done to the resource in the other device context are visible
* because the resource contents is shared via texture.
* One can load and use other locations as needed,
* but they should be reloaded each time on each usage */
/* @todo: SFLAG_INSRGBTEX ?? */
// if (in_fbo)
// {
// surface->flags |= SFLAG_INDRAWABLE;
// }
}
{
}
else
#endif
{
if (!rect)
{
}
{
}
}
return WINED3D_OK;
}
{
struct wined3d_swapchain *swapchain;
/* Not on a swapchain - must be offscreen */
return TRUE;
/* The front buffer is always onscreen */
/* If the swapchain is rendered to an FBO, the backbuffer is
* offscreen, otherwise onscreen */
return swapchain->render_to_fbo;
}
/* Context activation is done by the caller. */
/* This function is used in case of 8bit paletted textures using GL_EXT_paletted_texture */
/* Context activation is done by the caller. */
static void ffp_blit_p8_upload_palette(const struct wined3d_surface *surface, const struct wined3d_gl_info *gl_info)
{
else
TRACE("Using GL_EXT_PALETTED_TEXTURE for 8-bit paletted texture support\n");
}
/* Context activation is done by the caller. */
static HRESULT ffp_blit_set(void *blit_priv, struct wined3d_context *context, const struct wined3d_surface *surface)
{
else
/* When EXT_PALETTED_TEXTURE is around, palette conversion is done by the GPU
* else the surface is converted in software at upload time in LoadLocation.
*/
checkGLcall("glEnable(target)");
return WINED3D_OK;
}
/* Context activation is done by the caller. */
{
checkGLcall("glDisable(GL_TEXTURE_2D)");
{
checkGLcall("glDisable(GL_TEXTURE_CUBE_MAP_ARB)");
}
{
checkGLcall("glDisable(GL_TEXTURE_RECTANGLE_ARB)");
}
}
const RECT *src_rect, DWORD src_usage, enum wined3d_pool src_pool, const struct wined3d_format *src_format,
const RECT *dst_rect, DWORD dst_usage, enum wined3d_pool dst_pool, const struct wined3d_format *dst_format)
{
enum complex_fixup src_fixup;
switch (blit_op)
{
return FALSE;
{
TRACE("Checking support for fixup:\n");
}
{
TRACE("Destination fixups are not supported\n");
return FALSE;
}
{
TRACE("P8 fixup supported\n");
return TRUE;
}
/* We only support identity conversions. */
{
TRACE("[OK]\n");
return TRUE;
}
TRACE("[FAILED]\n");
return FALSE;
if (dst_pool == WINED3D_POOL_SYSTEM_MEM)
return FALSE;
{
if (!((dst_format->flags & WINED3DFMT_FLAG_FBO_ATTACHABLE) || (dst_usage & WINED3DUSAGE_RENDERTARGET)))
return FALSE;
}
else if (!(dst_usage & WINED3DUSAGE_RENDERTARGET))
{
TRACE("Color fill not supported\n");
return FALSE;
}
/* FIXME: We should reject color fills on formats with fixups,
* but this would break P8 color fills for example. */
return TRUE;
return TRUE;
default:
return FALSE;
}
}
/* Do not call while under the GL lock. */
static HRESULT ffp_blit_color_fill(struct wined3d_device *device, struct wined3d_surface *dst_surface,
{
device_clear_render_targets(device, 1, &fb, 1, dst_rect, &draw_rect, WINED3DCLEAR_TARGET, color, 0.0f, 0);
return WINED3D_OK;
}
/* Do not call while under the GL lock. */
{
device_clear_render_targets(device, 0, &fb, 1, rect, &draw_rect, WINED3DCLEAR_ZBUFFER, 0, depth, 0);
return WINED3D_OK;
}
const struct blit_shader ffp_blit = {
};
{
return WINED3D_OK;
}
/* Context activation is done by the caller. */
{
}
/* Context activation is done by the caller. */
static HRESULT cpu_blit_set(void *blit_priv, struct wined3d_context *context, const struct wined3d_surface *surface)
{
return WINED3D_OK;
}
/* Context activation is done by the caller. */
{
}
const RECT *src_rect, DWORD src_usage, enum wined3d_pool src_pool, const struct wined3d_format *src_format,
const RECT *dst_rect, DWORD dst_usage, enum wined3d_pool dst_pool, const struct wined3d_format *dst_format)
{
if (blit_op == WINED3D_BLIT_OP_COLOR_FILL)
{
return TRUE;
}
return FALSE;
}
{
UINT x, y;
if (!flags)
{
{
}
return WINED3D_OK;
}
{
{
case WINED3DFMT_DXT1:
{
struct block
{
};
for (x = 0; x < row_block_count; ++x)
{
}
}
return WINED3D_OK;
case WINED3DFMT_DXT3:
{
struct block
{
};
for (x = 0; x < row_block_count; ++x)
{
}
}
return WINED3D_OK;
default:
FIXME("Compressed flip not implemented for format %s.\n",
return E_NOTIMPL;
}
}
FIXME("Unsupported blit on compressed surface (format %s, flags %#x, DDFX %#x).\n",
return E_NOTIMPL;
}
{
int x, y;
TRACE("dst_surface %p, dst_rect %s, src_surface %p, src_rect %s, flags %#x, fx %p, filter %s.\n",
if (src_surface == dst_surface)
{
}
else
{
if (src_surface)
{
{
if (!src_surface)
{
/* The conv function writes a FIXME */
WARN("Cannot convert source surface format to dest format.\n");
goto release;
}
}
}
else
{
}
}
if (src_surface)
if (src_surface != dst_surface)
else
{
if (src_surface == dst_surface)
{
FIXME("Only plain blits supported on compressed surfaces.\n");
goto release;
}
{
WARN("Stretching not supported on compressed surfaces.\n");
goto release;
}
{
WARN("Source rectangle not block-aligned.\n");
goto release;
}
{
WARN("Destination rectangle not block-aligned.\n");
goto release;
}
goto release;
}
/* First, all the 'source-less' blits */
if (flags & WINEDDBLT_COLORFILL)
{
flags &= ~WINEDDBLT_COLORFILL;
}
if (flags & WINEDDBLT_DEPTHFILL)
{
FIXME("DDBLT_DEPTHFILL needs to be implemented!\n");
}
if (flags & WINEDDBLT_ROP)
{
/* Catch some degenerate cases here. */
{
case BLACKNESS:
break;
case 0xaa0029: /* No-op */
break;
case WHITENESS:
break;
case SRCCOPY: /* Well, we do that below? */
break;
default:
goto error;
}
flags &= ~WINEDDBLT_ROP;
}
if (flags & WINEDDBLT_DDROPS)
{
}
/* Now the 'with source' blits. */
if (src_surface)
{
goto release;
{
/* Can happen when d3d9 apps do a StretchRect() call which isn't handled in GL. */
}
if (!flags)
{
/* No effects, we can cheat here. */
{
{
/* No stretching in either direction. This needs to be as
* fast as possible. */
/* Check for overlapping surfaces. */
{
/* No overlap, or dst above src, so copy from top downwards. */
for (y = 0; y < dstheight; ++y)
{
}
}
{
/* Copy from bottom upwards. */
for (y = 0; y < dstheight; ++y)
{
}
}
else
{
/* Src and dst overlapping on the same line, use memmove. */
for (y = 0; y < dstheight; ++y)
{
}
}
}
else
{
/* Stretching in y direction only. */
{
}
}
}
else
{
/* Stretching in X direction. */
int last_sy = -1;
{
{
/* This source row is the same as last source row -
* Copy the already stretched row. */
}
else
{
#define STRETCH_ROW(type) \
do { \
d[x] = s[sx >> 16]; \
} while(0)
switch(bpp)
{
case 1:
break;
case 2:
break;
case 4:
break;
case 3:
{
const BYTE *s;
{
d[0] = (pixel ) & 0xff;
d += 3;
}
break;
}
default:
goto error;
}
}
}
}
}
else
{
if (flags & (WINEDDBLT_KEYSRC | WINEDDBLT_KEYDEST | WINEDDBLT_KEYSRCOVERRIDE | WINEDDBLT_KEYDESTOVERRIDE))
{
/* The color keying flags are checked for correctness in ddraw */
if (flags & WINEDDBLT_KEYSRC)
{
}
else if (flags & WINEDDBLT_KEYSRCOVERRIDE)
{
}
if (flags & WINEDDBLT_KEYDEST)
{
/* Destination color keys are taken from the source surface! */
}
else if (flags & WINEDDBLT_KEYDESTOVERRIDE)
{
}
if (bpp == 1)
{
keymask = 0xff;
}
else
{
| masks[1]
| masks[2];
}
flags &= ~(WINEDDBLT_KEYSRC | WINEDDBLT_KEYDEST | WINEDDBLT_KEYSRCOVERRIDE | WINEDDBLT_KEYDESTOVERRIDE);
}
if (flags & WINEDDBLT_DDFX)
{
{
/* I don't think we need to do anything about this flag */
WARN("flags=DDBLT_DDFX nothing done for WINEDDBLTFX_ARITHSTRETCHY\n");
}
{
tmp = dBottomRight;
dBottomLeft = tmp;
}
{
dBottomLeft = tmp;
dBottomRight = tmp;
}
{
/* I don't think we need to do anything about this flag */
WARN("flags=DDBLT_DDFX nothing done for WINEDDBLTFX_NOTEARING\n");
}
{
tmp = dBottomRight;
tmp = dBottomLeft;
}
{
}
{
dBottomLeft = tmp;
}
{
/* I don't think we need to do anything about this flag */
WARN("flags=WINEDDBLT_DDFX nothing done for WINEDDBLTFX_ZBUFFERBASEDEST\n");
}
flags &= ~(WINEDDBLT_DDFX);
}
#define COPY_COLORKEY_FX(type) \
do { \
const type *s; \
{ \
dx = d; \
{ \
{ \
} \
} \
} \
} while(0)
switch (bpp)
{
case 1:
break;
case 2:
break;
case 4:
break;
case 3:
{
const BYTE *s;
{
dx = d;
{
{
}
}
d += dstyinc;
}
break;
}
default:
FIXME("%s color-keyed blit not implemented for bpp %u!\n",
goto error;
}
}
}
{
}
/* Release the converted surface, if any. */
return hr;
}
/* Do not call while under the GL lock. */
static HRESULT cpu_blit_color_fill(struct wined3d_device *device, struct wined3d_surface *dst_surface,
{
}
/* Do not call while under the GL lock. */
{
FIXME("Depth filling not implemented by cpu_blit.\n");
return WINED3DERR_INVALIDCALL;
}
const struct blit_shader cpu_blit = {
};
static HRESULT surface_init(struct wined3d_surface *surface, UINT alignment, UINT width, UINT height,
#ifdef VBOX_WITH_WDDM
, void *pvClientMem
#endif
)
{
unsigned int resource_size;
if (multisample_quality > 0)
{
multisample_quality = 0;
}
/* Quick lockable sanity check.
* TODO: remove this after surfaces, usage and lockability have been debugged properly
* this function is too deep to need to care about things like this.
* Levels need to be checked too, since they all affect what can be done. */
switch (pool)
{
case WINED3D_POOL_SCRATCH:
if (!lockable)
{
FIXME("Called with a pool of SCRATCH and a lockable of FALSE "
"which are mutually exclusive, setting lockable to TRUE.\n");
}
break;
case WINED3D_POOL_SYSTEM_MEM:
if (!lockable)
FIXME("Called with a pool of SYSTEMMEM and a lockable of FALSE, this is acceptable but unexpected.\n");
break;
case WINED3D_POOL_MANAGED:
if (usage & WINED3DUSAGE_DYNAMIC)
FIXME("Called with a pool of MANAGED and a usage of DYNAMIC which are mutually exclusive.\n");
break;
case WINED3D_POOL_DEFAULT:
if (lockable && !(usage & (WINED3DUSAGE_DYNAMIC | WINED3DUSAGE_RENDERTARGET | WINED3DUSAGE_DEPTHSTENCIL)))
WARN("Creating a lockable surface with a POOL of DEFAULT, that doesn't specify DYNAMIC usage.\n");
break;
default:
break;
};
FIXME("Trying to create a render target that isn't in the default pool.\n");
/* FIXME: Check that the format is supported by the device. */
if (!resource_size)
return WINED3DERR_INVALIDCALL;
else
#ifdef VBOX_WITH_WDDM
#endif
);
{
return hr;
}
#ifdef VBOX_WITH_WDDM
/* this will be a nop for the non-shared resource,
* for the shared resource this will ensure the surface is initialized properly */
#endif
/* "Standalone" surface. */
/* Flags */
if (flags & WINED3D_SURFACE_DISCARD)
if (flags & WINED3D_SURFACE_PIN_SYSMEM)
/* I'm not sure if this qualifies as a hack or as an optimization. It
* seems reasonable to assume that lockable render targets will get
* locked, so we might as well set SFLAG_DYNLOCK right at surface
* creation. However, the other reason we want to do this is that several
* ddraw applications access surface memory while the surface isn't
* mapped. The SFLAG_DYNLOCK behaviour of keeping SYSMEM around for
* future locks prevents these from crashing. */
#ifdef VBOX_WITH_WDDM
#endif
/* Mark the texture as dirty so that it gets loaded first time around. */
TRACE("surface %p, memory %p, size %u\n",
/* Call the private setup routine */
{
return hr;
}
/* Similar to lockable rendertargets above, creating the DIB section
* during surface initialization prevents the sysmem pointer from changing
* after a wined3d_surface_getdc() call. */
{
}
#ifdef VBOX_WITH_WDDM
if (VBOXSHRC_IS_SHARED(surface))
{
if (!VBOXSHRC_IS_SHARED_OPENED(surface))
{
Assert(!(*shared_handle));
}
else
{
#ifdef DEBUG_misha
ERR("test this!");
#endif
}
/* flush to ensure the texture is allocated/referenced before it is used/released by another
/* flush to ensure the texture is allocated/referenced before it is used/released by another
}
else
{
}
#endif
return hr;
}
#ifdef VBOX_WITH_WDDM
, void *pvClientMem
#endif
)
{
struct wined3d_surface *object;
TRACE("device %p, width %u, height %u, format %s\n",
TRACE("surface %p, usage %s (%#x), pool %s, multisample_type %#x, multisample_quality %u\n",
if (!object)
return WINED3DERR_OUTOFVIDEOMEMORY;
#ifdef VBOX_WITH_WDDM
)))
#else
#endif
{
return hr;
}
return hr;
}
#ifdef VBOX_WITH_WDDM
{
struct wined3d_texture *texture;
{
}
if (!surface->texture_name)
{
ERR("no texture name!");
return E_FAIL;
}
return S_OK;
}
{
{
return hr;
}
return S_OK;
}
#endif