DevE1000.cpp revision aa842cf37c9d778a25c1acf8bab91528c54bb996
/* $Id$ */
/** @file
* DevE1000 - Intel 82540EM Ethernet Controller Emulation.
*
* Implemented in accordance with the specification:
*
*
* 317453-002 Revision 3.5
*
* @todo IPv6 checksum offloading support
* @todo Flexible Filter / Wakeup (optional?)
*/
/*
* Copyright (C) 2007-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_DEV_E1000
#include <iprt/semaphore.h>
#include "VBoxDD.h"
#include "DevEEPROM.h"
#include "DevE1000Phy.h"
/* Options *******************************************************************/
/** @def E1K_INIT_RA0
* E1K_INIT_RA0 forces E1000 to set the first entry in Receive Address filter
* table to MAC address obtained from CFGM. Most guests read MAC address from
* EEPROM and write it to RA[0] explicitly, but Mac OS X seems to depend on it
* being already set (see @bugref{4657}).
*/
#define E1K_INIT_RA0
/** @def E1K_LSC_ON_SLU
* E1K_LSC_ON_SLU causes E1000 to generate Link Status Change interrupt when
* the guest driver brings up the link via STATUS.LU bit. Again the only guest
* that requires it is Mac OS X (see @bugref{4657}).
*/
#define E1K_LSC_ON_SLU
/** @def E1K_ITR_ENABLED
* E1K_ITR_ENABLED reduces the number of interrupts generated by E1000 if a
* guest driver requested it by writing non-zero value to the Interrupt
* Throttling Register (see section 13.4.18 in "8254x Family of Gigabit
* Ethernet Controllers Software Developer’s Manual").
*/
//#define E1K_ITR_ENABLED
/** @def E1K_TX_DELAY
* E1K_TX_DELAY aims to improve guest-host transfer rate for TCP streams by
* preventing packets to be sent immediately. It allows to send several
* packets in a batch reducing the number of acknowledgments. Note that it
* effectively disables R0 TX path, forcing sending in R3.
*/
//#define E1K_TX_DELAY 150
/** @def E1K_USE_TX_TIMERS
* E1K_USE_TX_TIMERS aims to reduce the number of generated TX interrupts if a
* guest driver set the delays via the Transmit Interrupt Delay Value (TIDV)
* register. Enabling it showed no positive effects on existing guests so it
* stays disabled. See sections 3.2.7.1 and 3.4.3.1 in "8254x Family of Gigabit
* Ethernet Controllers Software Developer’s Manual" for more detailed
* explanation.
*/
//#define E1K_USE_TX_TIMERS
/** @def E1K_NO_TAD
* E1K_NO_TAD disables one of two timers enabled by E1K_USE_TX_TIMERS, the
* Transmit Absolute Delay time. This timer sets the maximum time interval
* during which TX interrupts can be postponed (delayed). It has no effect
* if E1K_USE_TX_TIMERS is not defined.
*/
//#define E1K_NO_TAD
/** @def E1K_REL_DEBUG
* E1K_REL_DEBUG enables debug logging of l1, l2, l3 in release build.
*/
//#define E1K_REL_DEBUG
/** @def E1K_INT_STATS
* E1K_INT_STATS enables collection of internal statistics used for
* debugging of delayed interrupts, etc.
*/
//#define E1K_INT_STATS
/** @def E1K_WITH_MSI
* E1K_WITH_MSI enables rudimentary MSI support. Not implemented.
*/
//#define E1K_WITH_MSI
/** @def E1K_WITH_TX_CS
* E1K_WITH_TX_CS protects e1kXmitPending with a critical section.
*/
#define E1K_WITH_TX_CS
/** @def E1K_WITH_TXD_CACHE
* E1K_WITH_TXD_CACHE causes E1000 to fetch multiple TX descriptors in a
* single physical memory read (or two if it wraps around the end of TX
* descriptor ring). It is required for proper functioning of bandwidth
* resource control as it allows to compute exact sizes of packets prior
* to allocating their buffers (see @bugref{5582}).
*/
#define E1K_WITH_TXD_CACHE
/** @def E1K_WITH_RXD_CACHE
* E1K_WITH_RXD_CACHE causes E1000 to fetch multiple RX descriptors in a
* single physical memory read (or two if it wraps around the end of RX
* descriptor ring). Intel's packet driver for DOS needs this option in
* order to work properly (see @bugref{6217}).
*/
#define E1K_WITH_RXD_CACHE
/* End of Options ************************************************************/
#ifdef E1K_WITH_TXD_CACHE
/**
* E1K_TXD_CACHE_SIZE specifies the maximum number of TX descriptors stored
* in the state structure. It limits the amount of descriptors loaded in one
* batch read. For example, Linux guest may use up to 20 descriptors per
* TSE packet. The largest TSE packet seen (Windows guest) was 45 descriptors.
*/
# define E1K_TXD_CACHE_SIZE 64u
#endif /* E1K_WITH_TXD_CACHE */
#ifdef E1K_WITH_RXD_CACHE
/**
* E1K_RXD_CACHE_SIZE specifies the maximum number of RX descriptors stored
* in the state structure. It limits the amount of descriptors loaded in one
* batch read. For example, XP guest adds 15 RX descriptors at a time.
*/
# define E1K_RXD_CACHE_SIZE 16u
#endif /* E1K_WITH_RXD_CACHE */
/* Little helpers ************************************************************/
#define htonl(x) ASMByteSwapU32(x)
#ifndef DEBUG
# ifdef E1K_REL_DEBUG
# define DEBUG
//# define E1kLog3(a) do {} while (0)
# else
# define E1kLog(a) do {} while (0)
# define E1kLog2(a) do {} while (0)
# define E1kLog3(a) do {} while (0)
# define E1kLogX(x, a) do {} while (0)
# endif
#else
//# define E1kLog(a) do {} while (0)
//# define E1kLog2(a) do {} while (0)
//# define E1kLog3(a) do {} while (0)
#endif
#if 0
#else
# define E1kLogRel(a) do { } while (0)
#endif
//#undef DEBUG
#define E1K_RELOCATE(p, o) *(RTHCUINTPTR *)&p += o
#define E1K_INC_CNT32(cnt) \
do { \
if (cnt < UINT32_MAX) \
cnt++; \
} while (0)
do { \
u64Cnt = UINT64_MAX; \
} while (0)
#ifdef E1K_INT_STATS
#else /* E1K_INT_STATS */
# define E1K_INC_ISTAT_CNT(cnt) do { } while (0)
#endif /* E1K_INT_STATS */
/*****************************************************************************/
#define E1K_CHIP_82540EM 0
#define E1K_CHIP_82543GC 1
#define E1K_CHIP_82545EM 2
/** Different E1000 chips. */
static const struct E1kChips
{
const char *pcszName;
} g_Chips[] =
{
/* Vendor Device SSVendor SubSys Name */
{ 0x8086,
/* Temporary code, as MSI-aware driver dislike 0x100E. How to do that right? */
#ifdef E1K_WITH_MSI
0x105E,
#else
0x100E,
#endif
0x8086, 0x001E, "82540EM" }, /* Intel 82540EM-A in Intel PRO/1000 MT Desktop */
{ 0x8086, 0x1004, 0x8086, 0x1004, "82543GC" }, /* Intel 82543GC in Intel PRO/1000 T Server */
{ 0x8086, 0x100F, 0x15AD, 0x0750, "82545EM" } /* Intel 82545EM-A in VMWare Network Adapter */
};
/* The size of register area mapped to I/O space */
#define E1K_IOPORT_SIZE 0x8
/* The size of memory-mapped register area */
#define E1K_MM_SIZE 0x20000
#define E1K_MAX_TX_PKT_SIZE 16288
#define E1K_MAX_RX_PKT_SIZE 16384
/*****************************************************************************/
/** Gets the specfieid bits from the register. */
#define SET_BITS(reg, bits, bitval) do { reg = (reg & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
#define SET_BITS_V(val, reg, bits, bitval) do { val = (val & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
#define EERD_DATA_SHIFT 16
#define EERD_ADDR_SHIFT 8
#define MDIC_DATA_SHIFT 0
#define MDIC_REG_SHIFT 16
#define MDIC_PHY_SHIFT 21
#define RCTL_LBM_SHIFT 6
#define RCTL_RDMTS_SHIFT 8
#define RCTL_MO_SHIFT 12
#define RCTL_BSIZE_SHIFT 16
typedef struct
{
unsigned rxa : 7;
unsigned rxa_r : 9;
unsigned txa : 16;
} PBAST;
#define TXDCTL_WTHRESH_MASK 0x003F0000
#define TXDCTL_WTHRESH_SHIFT 16
#define TXDCTL_LWTHRESH_MASK 0xFE000000
#define TXDCTL_LWTHRESH_SHIFT 25
#define RXCSUM_PCSS_SHIFT 0
/** @name Register access macros
* @remarks These ASSUME alocal variable @a pThis of type PE1KSTATE.
* @{ */
/** @} */
/**
* Indices of memory-mapped registers in register table.
*/
typedef enum
{
} E1kRegIndex;
#define E1K_NUM_OF_32BIT_REGS MTA_IDX
/** The number of registers with strictly increasing offset. */
/**
* Define E1000-specific EEPROM layout.
*/
struct E1kEEPROM
{
public:
#ifdef IN_RING3
/**
* Initialize EEPROM content.
*
* @param macAddr MAC address of E1000.
*/
{
/*
* bit 3 - full support for power management
* bit 10 - full duplex
*/
};
/**
* Compute the checksum as required by E1000 and store it
* in the last word.
*/
void updateChecksum()
{
uint16_t u16Checksum = 0;
};
/**
* First 6 bytes of EEPROM contain MAC address.
*
* @returns MAC address of E1000.
*/
{
};
{
}
{
}
{
}
{
}
{
}
#endif /* IN_RING3 */
};
#define E1K_SPEC_VLAN(s) (s & 0xFFF)
struct E1kRxDStatus
{
/** @name Descriptor Status field (3.2.3.1)
* @{ */
unsigned : 1;
/** @} */
/** @name Descriptor Errors field (3.2.3.2)
* (Only valid when fEOP and fDD are set.)
* @{ */
unsigned : 4; /**< Reserved, varies with different models... */
/** @} */
/** @name Descriptor Special field (3.2.3.3)
* @{ */
/** @} */
};
typedef struct E1kRxDStatus E1KRXDST;
struct E1kRxDesc_st
{
};
typedef struct E1kRxDesc_st E1KRXDESC;
#define E1K_DTYP_LEGACY -1
#define E1K_DTYP_CONTEXT 0
#define E1K_DTYP_DATA 1
struct E1kTDLegacy
{
struct TDLCmd_st
{
unsigned u16Length : 16;
unsigned u8CSO : 8;
/* CMD field : 8 */
unsigned fEOP : 1;
unsigned fIFCS : 1;
unsigned fIC : 1;
unsigned fRS : 1;
unsigned fRPS : 1;
unsigned fDEXT : 1;
unsigned fVLE : 1;
unsigned fIDE : 1;
} cmd;
struct TDLDw3_st
{
/* STA field */
unsigned fDD : 1;
unsigned fEC : 1;
unsigned fLC : 1;
unsigned fTURSV : 1;
/* RSV field */
unsigned u4RSV : 4;
/* CSS field */
unsigned u8CSS : 8;
/* Special field*/
unsigned u16Special: 16;
} dw3;
};
/**
*/
struct E1kTDContext
{
struct CheckSum_st
{
/** TSE: Header start. !TSE: Checksum start. */
unsigned u8CSS : 8;
/** Checksum offset - where to store it. */
unsigned u8CSO : 8;
/** Checksum ending (inclusive) offset, 0 = end of packet. */
unsigned u16CSE : 16;
} ip;
struct CheckSum_st tu;
struct TDCDw2_st
{
/** TSE: The total number of payload bytes for this context. Sans header. */
unsigned u20PAYLEN : 20;
/** The descriptor type - E1K_DTYP_CONTEXT (0). */
unsigned u4DTYP : 4;
/** TUCMD field, 8 bits
* @{ */
/** TSE: TCP (set) or UDP (clear). */
unsigned fTCP : 1;
/** TSE: IPv4 (set) or IPv6 (clear) - for finding the payload length field in
* the IP header. Does not affect the checksumming.
unsigned fIP : 1;
/** TSE: TCP segmentation enable. When clear the context describes */
unsigned fTSE : 1;
/** Report status (only applies to dw3.fDD for here). */
unsigned fRS : 1;
/** Reserved, MBZ. */
unsigned fRSV1 : 1;
/** Descriptor extension, must be set for this descriptor type. */
unsigned fDEXT : 1;
/** Reserved, MBZ. */
unsigned fRSV2 : 1;
/** Interrupt delay enable. */
unsigned fIDE : 1;
/** @} */
} dw2;
struct TDCDw3_st
{
/** Descriptor Done. */
unsigned fDD : 1;
/** Reserved, MBZ. */
unsigned u7RSV : 7;
unsigned u8HDRLEN : 8;
/** TSO: Maximum segment size. */
unsigned u16MSS : 16;
} dw3;
};
typedef struct E1kTDContext E1KTXCTX;
/**
*/
struct E1kTDData
{
struct TDDCmd_st
{
/** The total length of data pointed to by this descriptor. */
unsigned u20DTALEN : 20;
/** The descriptor type - E1K_DTYP_DATA (1). */
unsigned u4DTYP : 4;
/** @name DCMD field, 8 bits (3.3.7.1).
* @{ */
/** End of packet. Note TSCTFC update. */
unsigned fEOP : 1;
unsigned fIFCS : 1;
/** Use the TSE context when set and the normal when clear. */
unsigned fTSE : 1;
/** Report status (dw3.STA). */
unsigned fRS : 1;
unsigned fRPS : 1;
/** Descriptor extension, must be set for this descriptor type. */
unsigned fDEXT : 1;
* Insert dw3.SPECIAL after ethernet header. */
unsigned fVLE : 1;
/** Interrupt delay enable. */
unsigned fIDE : 1;
/** @} */
} cmd;
struct TDDDw3_st
{
/** @name STA field (3.3.7.2)
* @{ */
unsigned fTURSV : 1;
/** @} */
/** @name POPTS (Packet Option) field (3.3.7.3)
* @{ */
/** @} */
/** @name SPECIAL field - VLAN tag to be inserted after ethernet header.
* Requires fEOP, fVLE and CTRL.VME to be set.
* @{ */
/** @} */
} dw3;
};
union E1kTxDesc
{
struct E1kTDLegacy legacy;
struct E1kTDContext context;
};
#define RA_CTL_AS 0x0003
#define RA_CTL_AV 0x8000
union E1kRecAddr
{
struct RAArray
{
} array[16];
};
typedef union E1kRecAddr E1KRA;
/** @todo use+extend RTNETIPV4 */
struct E1kIpHeader
{
/* type of service / version / header length */
/* total length */
/* identification */
uint16_t ident;
/* fragment offset field */
/* time to live / protocol*/
/* checksum */
/* source IP address */
/* destination IP address */
};
/** @todo use+extend RTNETTCP */
struct E1kTcpHeader
{
};
#ifdef E1K_WITH_TXD_CACHE
/** The current Saved state version. */
# define E1K_SAVEDSTATE_VERSION 4
/** Saved state version for VirtualBox 4.2 with VLAN tag fields. */
# define E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG 3
#else /* !E1K_WITH_TXD_CACHE */
/** The current Saved state version. */
# define E1K_SAVEDSTATE_VERSION 3
#endif /* !E1K_WITH_TXD_CACHE */
/** Saved state version for VirtualBox 4.1 and earlier.
* These did not include VLAN tag fields. */
#define E1K_SAVEDSTATE_VERSION_VBOX_41 2
/** Saved state version for VirtualBox 3.0 and earlier.
* This did not include the configuration part nor the E1kEEPROM. */
#define E1K_SAVEDSTATE_VERSION_VBOX_30 1
/**
* Device state structure.
*
* Holds the current state of device.
*
* @implements PDMINETWORKDOWN
* @implements PDMINETWORKCONFIG
* @implements PDMILEDPORTS
*/
struct E1kState_st
{
/** The scatter / gather buffer used for the current outgoing packet - R3. */
/** The scatter / gather buffer used for the current outgoing packet - R0. */
/** The scatter / gather buffer used for the current outgoing packet - RC. */
#if HC_ARCH_BITS != 32
#endif
#ifdef E1K_WITH_TX_CS
#endif /* E1K_WITH_TX_CS */
/** Base address of memory-mapped registers. */
/** MAC address obtained from the configuration. */
/** Base port of I/O space region. */
/** EMT: */
/** EMT: Last time the interrupt was acknowledged. */
/** All: Used for eliminating spurious interrupts. */
bool fIntRaised;
/** EMT: false if the cable is disconnected by the GUI. */
bool fCableConnected;
/** EMT: */
bool fR0Enabled;
/** EMT: */
bool fRCEnabled;
/** EMT: Compute Ethernet CRC for RX packets. */
bool fEthernetCRC;
bool Alignment2[3];
/** Link up delay (in milliseconds). */
/** All: Device register storage. */
/** EMT: Offset of the register to be read via IO. */
/** EMT: Multicast Table Array. */
/** EMT: Receive Address registers. */
/** EMT: VLAN filter table array. */
/** EMT: Receive buffer size. */
/** EMT: Locked state -- no state alteration possible. */
bool fLocked;
/** EMT: */
bool fDelayInts;
/** All: */
bool fIntMaskUsed;
/** N/A: */
bool volatile fMaybeOutOfSpace;
/** EMT: Gets signalled when more RX descriptors become available. */
#ifdef E1K_WITH_RXD_CACHE
/** RX: Fetched RX descriptors. */
//uint64_t aRxDescAddr[E1K_RXD_CACHE_SIZE];
/** RX: Actual number of fetched RX descriptors. */
/** RX: Index in cache of RX descriptor being processed. */
#endif /* E1K_WITH_RXD_CACHE */
/** TX: Context used for TCP segmentation packets. */
/** TX: Context used for ordinary packets. */
#ifdef E1K_WITH_TXD_CACHE
/** TX: Fetched TX descriptors. */
/** TX: Actual number of fetched TX descriptors. */
/** TX: Index in cache of TX descriptor being processed. */
/** TX: Will this frame be sent as GSO. */
bool fGSO;
/** Alignment padding. */
bool fReserved;
/** TX: Number of bytes in next packet. */
#endif /* E1K_WITH_TXD_CACHE */
/** GSO context. u8Type is set to PDMNETWORKGSOTYPE_INVALID when not
* applicable to the current TSE mode. */
/** Scratch space for holding the loopback / fallback scatter / gather
* descriptor. */
union
{
} uTxFallback;
/** TX: Transmit packet buffer use for TSE fallback and loopback. */
/** TX: Number of bytes assembled in TX packet buffer. */
/** TX: False will force segmentation in e1000 instead of sending frames as GSO. */
bool fGSOEnabled;
/** TX: IP checksum has to be inserted if true. */
bool fIPcsum;
bool fTCPcsum;
/** TX: VLAN tag has to be inserted if true. */
bool fVTag;
/** TX: TCI part of VLAN tag to be inserted. */
/** TX TSE fallback: Number of payload bytes remaining in TSE context. */
/** TX TSE fallback: Number of header bytes remaining in TSE context. */
/** TX TSE fallback: Flags from template header. */
/** TX TSE fallback: Partial checksum from template header. */
/** ?: Emulated controller type. */
/** EMT: EEPROM emulation */
/** EMT: Physical interface emulation. */
#if 0
/** Alignment padding. */
#endif
#if defined(VBOX_WITH_STATISTICS)
#endif /* VBOX_WITH_STATISTICS */
#ifdef E1K_INT_STATS
/* Internal stats */
#endif /* E1K_INT_STATS */
};
typedef struct E1kState_st E1KSTATE;
/** Pointer to the E1000 device state. */
#ifndef VBOX_DEVICE_STRUCT_TESTCASE
/* Forward declarations ******************************************************/
static int e1kRegReadUnimplemented (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
static int e1kRegWriteUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
static int e1kRegReadAutoClear (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
static int e1kRegReadDefault (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
static int e1kRegWriteDefault (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
#if 0 /* unused */
#endif
/**
* Register map table.
*
* Override pfnRead and pfnWrite to get register-specific behavior.
*/
static const struct E1kRegMap_st
{
/** Register offset in the register space. */
/** Size in bytes. Registers of size > 4 are in fact tables. */
/** Readable bits. */
/** Writable bits. */
/** Read callback. */
/** Write callback. */
/** Abbreviated name. */
const char *abbrev;
/** Full name. */
const char *name;
{
/* offset size read mask write mask read callback write callback abbrev full name */
/*------- ------- ---------- ---------- ----------------------- ------------------------ ---------- ------------------------------*/
{ 0x00000, 0x00004, 0xDBF31BE9, 0xDBF31BE9, e1kRegReadDefault , e1kRegWriteCTRL , "CTRL" , "Device Control" },
{ 0x00008, 0x00004, 0x0000FDFF, 0x00000000, e1kRegReadDefault , e1kRegWriteUnimplemented, "STATUS" , "Device Status" },
{ 0x00010, 0x00004, 0x000027F0, 0x00000070, e1kRegReadEECD , e1kRegWriteEECD , "EECD" , "EEPROM/Flash Control/Data" },
{ 0x00014, 0x00004, 0xFFFFFF10, 0xFFFFFF00, e1kRegReadDefault , e1kRegWriteEERD , "EERD" , "EEPROM Read" },
{ 0x00018, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CTRL_EXT", "Extended Device Control" },
{ 0x0001c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FLA" , "Flash Access (N/A)" },
{ 0x00020, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteMDIC , "MDIC" , "MDI Control" },
{ 0x00028, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAL" , "Flow Control Address Low" },
{ 0x0002c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAH" , "Flow Control Address High" },
{ 0x00030, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCT" , "Flow Control Type" },
{ 0x00038, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "VET" , "VLAN EtherType" },
{ 0x000c0, 0x00004, 0x0001F6DF, 0x0001F6DF, e1kRegReadICR , e1kRegWriteICR , "ICR" , "Interrupt Cause Read" },
{ 0x000c4, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "ITR" , "Interrupt Throttling" },
{ 0x000c8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteICS , "ICS" , "Interrupt Cause Set" },
{ 0x000d0, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteIMS , "IMS" , "Interrupt Mask Set/Read" },
{ 0x000d8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteIMC , "IMC" , "Interrupt Mask Clear" },
{ 0x00100, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRCTL , "RCTL" , "Receive Control" },
{ 0x00170, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCTTV" , "Flow Control Transmit Timer Value" },
{ 0x00178, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXCW" , "Transmit Configuration Word (N/A)" },
{ 0x00180, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXCW" , "Receive Configuration Word (N/A)" },
{ 0x00400, 0x00004, 0x017FFFFA, 0x017FFFFA, e1kRegReadDefault , e1kRegWriteDefault , "TCTL" , "Transmit Control" },
{ 0x00410, 0x00004, 0x3FFFFFFF, 0x3FFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIPG" , "Transmit IPG" },
{ 0x00458, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "AIFS" , "Adaptive IFS Throttle - AIT" },
{ 0x00e00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LEDCTL" , "LED Control" },
{ 0x01000, 0x00004, 0xFFFF007F, 0x0000007F, e1kRegReadDefault , e1kRegWritePBA , "PBA" , "Packet Buffer Allocation" },
{ 0x02160, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTL" , "Flow Control Receive Threshold Low" },
{ 0x02168, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTH" , "Flow Control Receive Threshold High" },
{ 0x02410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFH" , "Receive Data FIFO Head" },
{ 0x02418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFT" , "Receive Data FIFO Tail" },
{ 0x02420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFHS" , "Receive Data FIFO Head Saved Register" },
{ 0x02428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFTS" , "Receive Data FIFO Tail Saved Register" },
{ 0x02430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFPC" , "Receive Data FIFO Packet Count" },
{ 0x02800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAL" , "Receive Descriptor Base Low" },
{ 0x02804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAH" , "Receive Descriptor Base High" },
{ 0x02808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDLEN" , "Receive Descriptor Length" },
{ 0x02810, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDH" , "Receive Descriptor Head" },
{ 0x02818, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRDT , "RDT" , "Receive Descriptor Tail" },
{ 0x02820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteRDTR , "RDTR" , "Receive Delay Timer" },
{ 0x02828, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXDCTL" , "Receive Descriptor Control" },
{ 0x0282c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "RADV" , "Receive Interrupt Absolute Delay Timer" },
{ 0x02c00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RSRPD" , "Receive Small Packet Detect Interrupt" },
{ 0x03000, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXDMAC" , "TX DMA Control (N/A)" },
{ 0x03410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFH" , "Transmit Data FIFO Head" },
{ 0x03418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFT" , "Transmit Data FIFO Tail" },
{ 0x03420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFHS" , "Transmit Data FIFO Head Saved Register" },
{ 0x03428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFTS" , "Transmit Data FIFO Tail Saved Register" },
{ 0x03430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFPC" , "Transmit Data FIFO Packet Count" },
{ 0x03800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAL" , "Transmit Descriptor Base Low" },
{ 0x03804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAH" , "Transmit Descriptor Base High" },
{ 0x03808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDLEN" , "Transmit Descriptor Length" },
{ 0x03810, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDH" , "Transmit Descriptor Head" },
{ 0x03818, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteTDT , "TDT" , "Transmit Descriptor Tail" },
{ 0x03820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIDV" , "Transmit Interrupt Delay Value" },
{ 0x03828, 0x00004, 0xFF3F3F3F, 0xFF3F3F3F, e1kRegReadDefault , e1kRegWriteDefault , "TXDCTL" , "Transmit Descriptor Control" },
{ 0x0382c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TADV" , "Transmit Absolute Interrupt Delay Timer" },
{ 0x03830, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TSPMT" , "TCP Segmentation Pad and Threshold" },
{ 0x04000, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CRCERRS" , "CRC Error Count" },
{ 0x04004, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ALGNERRC", "Alignment Error Count" },
{ 0x04008, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SYMERRS" , "Symbol Error Count" },
{ 0x0400c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXERRC" , "RX Error Count" },
{ 0x04010, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MPC" , "Missed Packets Count" },
{ 0x04014, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SCC" , "Single Collision Count" },
{ 0x04018, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ECOL" , "Excessive Collisions Count" },
{ 0x0401c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MCC" , "Multiple Collision Count" },
{ 0x04020, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LATECOL" , "Late Collisions Count" },
{ 0x04028, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "COLC" , "Collision Count" },
{ 0x04030, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "DC" , "Defer Count" },
{ 0x04034, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TNCRS" , "Transmit - No CRS" },
{ 0x04038, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SEC" , "Sequence Error Count" },
{ 0x0403c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CEXTERR" , "Carrier Extension Error Count" },
{ 0x04040, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RLEC" , "Receive Length Error Count" },
{ 0x04048, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONRXC" , "XON Received Count" },
{ 0x0404c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONTXC" , "XON Transmitted Count" },
{ 0x04050, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFRXC" , "XOFF Received Count" },
{ 0x04054, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFTXC" , "XOFF Transmitted Count" },
{ 0x04058, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRUC" , "FC Received Unsupported Count" },
{ 0x0405c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC64" , "Packets Received (64 Bytes) Count" },
{ 0x04060, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC127" , "Packets Received (65-127 Bytes) Count" },
{ 0x04064, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC255" , "Packets Received (128-255 Bytes) Count" },
{ 0x04068, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC511" , "Packets Received (256-511 Bytes) Count" },
{ 0x0406c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1023" , "Packets Received (512-1023 Bytes) Count" },
{ 0x04070, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1522" , "Packets Received (1024-Max Bytes)" },
{ 0x04074, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPRC" , "Good Packets Received Count" },
{ 0x04078, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPRC" , "Broadcast Packets Received Count" },
{ 0x0407c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPRC" , "Multicast Packets Received Count" },
{ 0x04080, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPTC" , "Good Packets Transmitted Count" },
{ 0x04088, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCL" , "Good Octets Received Count (Low)" },
{ 0x0408c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCH" , "Good Octets Received Count (Hi)" },
{ 0x04090, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCL" , "Good Octets Transmitted Count (Low)" },
{ 0x04094, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCH" , "Good Octets Transmitted Count (Hi)" },
{ 0x040a0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RNBC" , "Receive No Buffers Count" },
{ 0x040a4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RUC" , "Receive Undersize Count" },
{ 0x040a8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RFC" , "Receive Fragment Count" },
{ 0x040ac, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "ROC" , "Receive Oversize Count" },
{ 0x040b0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RJC" , "Receive Jabber Count" },
{ 0x040b4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPRC" , "Management Packets Received Count" },
{ 0x040b8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPDC" , "Management Packets Dropped Count" },
{ 0x040bc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPTC" , "Management Pkts Transmitted Count" },
{ 0x040c0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORL" , "Total Octets Received (Lo)" },
{ 0x040c4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORH" , "Total Octets Received (Hi)" },
{ 0x040c8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTL" , "Total Octets Transmitted (Lo)" },
{ 0x040cc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTH" , "Total Octets Transmitted (Hi)" },
{ 0x040d0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPR" , "Total Packets Received" },
{ 0x040d4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPT" , "Total Packets Transmitted" },
{ 0x040d8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC64" , "Packets Transmitted (64 Bytes) Count" },
{ 0x040dc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC127" , "Packets Transmitted (65-127 Bytes) Count" },
{ 0x040e0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC255" , "Packets Transmitted (128-255 Bytes) Count" },
{ 0x040e4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC511" , "Packets Transmitted (256-511 Bytes) Count" },
{ 0x040e8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1023" , "Packets Transmitted (512-1023 Bytes) Count" },
{ 0x040ec, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1522" , "Packets Transmitted (1024 Bytes or Greater) Count" },
{ 0x040f0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPTC" , "Multicast Packets Transmitted Count" },
{ 0x040f4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPTC" , "Broadcast Packets Transmitted Count" },
{ 0x040f8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTC" , "TCP Segmentation Context Transmitted Count" },
{ 0x040fc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTFC" , "TCP Segmentation Context Tx Fail Count" },
{ 0x05000, 0x00004, 0x000007FF, 0x000007FF, e1kRegReadDefault , e1kRegWriteDefault , "RXCSUM" , "Receive Checksum Control" },
{ 0x05800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUC" , "Wakeup Control" },
{ 0x05808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUFC" , "Wakeup Filter Control" },
{ 0x05810, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUS" , "Wakeup Status" },
{ 0x05820, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "MANC" , "Management Control" },
{ 0x05838, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IPAV" , "IP Address Valid" },
{ 0x05900, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPL" , "Wakeup Packet Length" },
{ 0x05200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n)" },
{ 0x05400, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n)" },
{ 0x05600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n)" },
{ 0x05840, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP4AT" , "IPv4 Address Table" },
{ 0x05880, 0x00010, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP6AT" , "IPv6 Address Table" },
{ 0x05a00, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPM" , "Wakeup Packet Memory" },
{ 0x05f00, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFLT" , "Flexible Filter Length Table" },
{ 0x09000, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFMT" , "Flexible Filter Mask Table" },
{ 0x09800, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFVT" , "Flexible Filter Value Table" },
{ 0x10000, 0x10000, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "PBM" , "Packet Buffer Memory (n)" },
{ 0x00040, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA82542" , "Receive Address (64-bit) (n) (82542)" },
{ 0x00200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA82542", "Multicast Table Array (n) (82542)" },
{ 0x00600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA82542", "VLAN Filter Table Array (n) (82542)" }
};
#ifdef DEBUG
/**
* Convert U32 value to hex string. Masked bytes are replaced with dots.
*
* @remarks The mask has byte (not bit) granularity (e.g. 000000FF).
*
* @returns The buffer.
*
* @param u32 The word to convert into string.
* @param mask Selects which bytes to convert.
* @param buf Where to put the result.
*/
{
{
if (mask & 0xF)
else
*ptr = '.';
}
buf[8] = 0;
return buf;
}
/**
* Returns timer name for debug purposes.
*
* @returns The timer name.
*
* @param pThis The device state structure.
* @param pTimer The timer to get the name for.
*/
{
return "TID";
return "TAD";
return "RID";
return "RAD";
return "Int";
return "TXD";
return "unknown";
}
#endif /* DEBUG */
/**
* Arm a timer.
*
* @param pThis Pointer to the device state structure.
* @param pTimer Pointer to the timer.
* @param uExpireIn Expiration interval in microseconds.
*/
{
return;
E1kLog2(("%s Arming %s timer to fire in %d usec...\n",
}
/**
* Cancel a timer.
*
* @param pThis Pointer to the device state structure.
* @param pTimer Pointer to the timer.
*/
{
E1kLog2(("%s Stopping %s timer...\n",
if (RT_FAILURE(rc))
{
E1kLog2(("%s e1kCancelTimer: TMTimerStop() failed with %Rrc\n",
}
}
#ifndef E1K_WITH_TX_CS
# define e1kCsTxLeave(ps) do { } while (0)
#else /* E1K_WITH_TX_CS */
#endif /* E1K_WITH_TX_CS */
#ifdef IN_RING3
/**
* Wakeup the RX thread.
*/
{
if ( pThis->fMaybeOutOfSpace
{
}
}
/**
* Hardware reset. Revert all registers to initial values.
*
* @param pThis The device state structure.
*/
{
#ifdef E1K_INIT_RA0
#endif /* E1K_INIT_RA0 */
/* Reset promiscuous mode */
#ifdef E1K_WITH_TXD_CACHE
{
pThis->nTxDFetched = 0;
pThis->iTxDCurrent = 0;
}
#endif /* E1K_WITH_TXD_CACHE */
#ifdef E1K_WITH_RXD_CACHE
{
}
#endif /* E1K_WITH_RXD_CACHE */
}
#endif /* IN_RING3 */
/**
* Compute Internet checksum.
*
* @remarks Refer to http://www.netfor2.com/checksum.html for short intro.
*
* @param pThis The device state structure.
* @param cpPacket The packet.
* @param cb The size of the packet.
* @param cszText A string denoting direction of packet transfer.
*
* @return The 1's complement of the 1's complement sum.
*
* @thread E1000_TX
*/
{
while (cb > 1)
{
cb -= 2;
}
if (cb)
while (csum >> 16)
return ~csum;
}
/**
* Dump a packet to debug log.
*
* @param pThis The device state structure.
* @param cpPacket The packet.
* @param cb The size of the packet.
* @param cszText A string denoting direction of packet transfer.
* @thread E1000_TX
*/
DECLINLINE(void) e1kPacketDump(PE1KSTATE pThis, const uint8_t *cpPacket, size_t cb, const char *cszText)
{
#ifdef DEBUG
{
Log4(("%s --- %s packet #%d: %RTmac => %RTmac (%d bytes) ---\n",
{
Log4(("%s --- IPv6: %RTnaipv6 => %RTnaipv6\n",
}
{
Log4(("%s --- IPv4:: %RTnaipv4 => %RTnaipv4\n",
}
}
#else
{
E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv6 => %RTnaipv6, seq=%x ack=%x\n",
else
E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv4 => %RTnaipv4, seq=%x ack=%x\n",
}
#endif
}
/**
* Determine the type of transmit descriptor.
*
* @returns Descriptor type. See E1K_DTYP_XXX defines.
*
* @param pDesc Pointer to descriptor union.
* @thread E1000_TX
*/
{
return E1K_DTYP_LEGACY;
}
/**
* Dump receive descriptor to debug log.
*
* @param pThis The device state structure.
* @param pDesc Pointer to the descriptor.
* @thread E1000_RX
*/
{
E1kLog2((" Address=%16LX Length=%04X Csum=%04X\n",
E1kLog2((" STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x\n",
}
/**
* Dump transmit descriptor to debug log.
*
* @param pThis The device state structure.
* @param pDesc Pointer to descriptor union.
* @param cszDir A string denoting direction of descriptor transfer
* @thread E1000_TX
*/
unsigned uLevel = RTLOGGRPFLAGS_LEVEL_2)
{
/*
* Unfortunately we cannot use our format handler here, we want R0 logging
* as well.
*/
switch (e1kGetDescType(pDesc))
{
case E1K_DTYP_CONTEXT:
break;
case E1K_DTYP_DATA:
break;
case E1K_DTYP_LEGACY:
break;
default:
E1kLog(("%s %s Invalid Transmit Descriptor %s\n",
break;
}
}
/**
* Raise interrupt if not masked.
*
* @param pThis The device state structure.
*/
{
return rc;
ICR |= u32IntCause;
{
#if 0
if (pThis->fDelayInts)
{
E1kLog2(("%s e1kRaiseInterrupt: Delayed. ICR=%08x\n",
#define E1K_LOST_IRQ_THRSLD 20
//#define E1K_LOST_IRQ_THRSLD 200000000
{
E1kLog2(("%s WARNING! Disabling delayed interrupt logic: delayed=%d, delivered=%d\n",
pThis->fIntMaskUsed = false;
pThis->uStatDisDly++;
}
}
else
#endif
if (pThis->fIntRaised)
{
E1kLog2(("%s e1kRaiseInterrupt: Already raised, skipped. ICR&IMS=%08x\n",
}
else
{
#ifdef E1K_ITR_ENABLED
/* interrupts/sec = 1 / (256 * 10E-9 * ITR) */
E1kLog2(("%s e1kRaiseInterrupt: tstamp - pThis->u64AckedAt = %d, ITR * 256 = %d\n",
//if (!!ITR && pThis->fIntMaskUsed && tstamp - pThis->u64AckedAt < ITR * 256)
{
E1kLog2(("%s e1kRaiseInterrupt: Too early to raise again: %d ns < %d ns.\n",
}
else
#endif
{
/* Since we are delivering the interrupt now
* there is no need to do it later -- stop the timer.
*/
/* Got at least one unmasked interrupt cause */
pThis->fIntRaised = true;
/* Raise(1) INTA(0) */
E1kLog(("%s e1kRaiseInterrupt: Raised. ICR&IMS=%08x\n",
}
}
}
else
{
E1kLog2(("%s e1kRaiseInterrupt: Not raising, ICR=%08x, IMS=%08x\n",
}
return VINF_SUCCESS;
}
/**
* Compute the physical address of the descriptor.
*
* @returns the physical address of the descriptor.
*
* @param baseHigh High-order 32 bits of descriptor table address.
* @param baseLow Low-order 32 bits of descriptor table address.
* @param idxDesc The descriptor index in the table.
*/
{
}
/**
* Advance the head pointer of the receive descriptor queue.
*
* @remarks RDH always points to the next available RX descriptor.
*
* @param pThis The device state structure.
*/
{
//e1kCsEnter(pThis, RT_SRC_POS);
RDH = 0;
/*
* Compute current receive queue length and fire RXDMT0 interrupt
* if we are low on receive buffers
*/
/*
* The minimum threshold is controlled by RDMTS bits of RCTL:
* 00 = 1/2 of RDLEN
* 01 = 1/4 of RDLEN
* 10 = 1/8 of RDLEN
* 11 = reserved
*/
if (uRQueueLen <= uMinRQThreshold)
{
E1kLogRel(("E1000: low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x\n", RDH, RDT, uRQueueLen, uMinRQThreshold));
E1kLog2(("%s Low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x, raise an interrupt\n",
}
E1kLog2(("%s e1kAdvanceRDH: at exit RDH=%x RDT=%x len=%x\n",
//e1kCsLeave(pThis);
}
#ifdef E1K_WITH_RXD_CACHE
/**
* Return the number of RX descriptor that belong to the hardware.
*
* @returns the number of available descriptors in RX ring.
* @param pThis The device state structure.
* @thread ???
*/
{
/**
* Make sure RDT won't change during computation. EMT may modify RDT at
* any moment.
*/
}
{
}
{
}
/**
* Load receive descriptors from guest memory. The caller needs to be in Rx
* critical section.
*
* We need two physical reads in case the tail wrapped around the end of RX
* descriptor ring.
*
* @returns the actual number of descriptors fetched.
* @param pThis The device state structure.
* @param pDesc Pointer to descriptor union.
* @param addr Physical address in guest context.
* @thread EMT, RX
*/
{
/* We've already loaded pThis->nRxDFetched descriptors past RDH. */
Assert(nDescsTotal != 0);
if (nDescsTotal == 0)
return 0;
E1kLog3(("%s e1kRxDPrefetch: nDescsAvailable=%u nDescsToFetch=%u "
"nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
if (nDescsToFetch == 0)
return 0;
// uint64_t addrBase = ((uint64_t)RDBAH << 32) + RDBAL;
// unsigned i, j;
// for (i = pThis->nRxDFetched; i < pThis->nRxDFetched + nDescsInSingleRead; ++i)
// {
// pThis->aRxDescAddr[i] = addrBase + (nFirstNotLoaded + i - pThis->nRxDFetched) * sizeof(E1KRXDESC);
// E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
// }
E1kLog3(("%s Fetched %u RX descriptors at %08x%08x(0x%x), RDLEN=%08x, RDH=%08x, RDT=%08x\n",
if (nDescsToFetch > nDescsInSingleRead)
{
// Assert(i == pThis->nRxDFetched + nDescsInSingleRead);
// for (j = 0; i < pThis->nRxDFetched + nDescsToFetch; ++i, ++j)
// {
// pThis->aRxDescAddr[i] = addrBase + j * sizeof(E1KRXDESC);
// E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
// }
E1kLog3(("%s Fetched %u RX descriptors at %08x%08x\n",
}
return nDescsToFetch;
}
/**
* Obtain the next RX descriptor from RXD cache, fetching descriptors from the
* RX ring if the cache is empty.
*
* Note that we cannot advance the cache pointer (iRxDCurrent) yet as it will
* go out of sync with RDH which will cause trouble when EMT checks if the
* cache is empty to do pre-fetch @bugref(6217).
*
* @param pThis The device state structure.
* @thread RX
*/
{
/* Check the cache first. */
/* Cache is empty, reset it and check if we can fetch more. */
if (e1kRxDPrefetch(pThis))
/* Out of Rx descriptors. */
return NULL;
}
/**
* Return the RX descriptor obtained with e1kRxDGet() and advance the cache
* pointer. The descriptor gets written back to the RXD ring.
*
* @param pThis The device state structure.
* @param pDesc The descriptor being "returned" to the RX ring.
* @thread RX
*/
{
pThis->iRxDCurrent++;
// Assert(pDesc >= pThis->aRxDescriptors);
// Assert(pDesc < pThis->aRxDescriptors + E1K_RXD_CACHE_SIZE);
// uint64_t addr = e1kDescAddr(RDBAH, RDBAL, RDH);
// uint32_t rdh = RDH;
// Assert(pThis->aRxDescAddr[pDesc - pThis->aRxDescriptors] == addr);
}
/**
* Store a fragment of received packet at the specifed address.
*
* @param pThis The device state structure.
* @param pDesc The next available RX descriptor.
* @param pvBuf The fragment.
* @param cb The size of the fragment.
*/
static DECLCALLBACK(void) e1kStoreRxFragment(PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
{
E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n",
}
#else /* !E1K_WITH_RXD_CACHE */
/**
* Store a fragment of received packet that fits into the next available RX
* buffer.
*
* @remarks Trigger the RXT0 interrupt if it is the last fragment of the packet.
*
* @param pThis The device state structure.
* @param pDesc The next available RX descriptor.
* @param pvBuf The fragment.
* @param cb The size of the fragment.
*/
static DECLCALLBACK(void) e1kStoreRxFragment(PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
{
E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n", pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
/* Write back the descriptor */
PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
/* Advance head */
//E1kLog2(("%s e1kStoreRxFragment: EOP=%d RDTR=%08X RADV=%08X\n", pThis->szPrf, pDesc->fEOP, RDTR, RADV));
{
/* Complete packet has been stored -- it is time to let the guest know. */
#ifdef E1K_USE_RX_TIMERS
if (RDTR)
{
/* Arm the timer to fire in RDTR usec (discard .024) */
/* If absolute timer delay is enabled and the timer is not running yet, arm it. */
}
else
{
#endif
/* 0 delay means immediate interrupt */
#ifdef E1K_USE_RX_TIMERS
}
#endif
}
}
#endif /* !E1K_WITH_RXD_CACHE */
/**
* Returns true if it is a broadcast packet.
*
* @returns true if destination address indicates broadcast.
* @param pvBuf The ethernet packet.
*/
{
}
/**
* Returns true if it is a multicast packet.
*
* @remarks returns true for broadcast packets as well.
* @returns true if destination address indicates multicast.
* @param pvBuf The ethernet packet.
*/
{
return (*(char*)pvBuf) & 1;
}
/**
* Set IXSM, IPCS and TCPCS flags according to the packet type.
*
* @remarks We emulate checksum offloading for major packets types only.
*
* @returns VBox status code.
* @param pThis The device state structure.
* @param pFrame The available data.
* @param cb Number of bytes available in the buffer.
* @param status Bit fields containing status info.
*/
static int e1kRxChecksumOffload(PE1KSTATE pThis, const uint8_t *pFrame, size_t cb, E1KRXDST *pStatus)
{
/** @todo
* It is not safe to bypass checksum verification for packets coming
* from real wire. We currently unable to tell where packets are
* coming from so we tell the driver to ignore our checksum flags
* and do verification in software.
*/
#if 0
switch (uEtherType)
{
case 0x800: /* IPv4 */
{
break;
}
case 0x86DD: /* IPv6 */
break;
default: /* ARP, VLAN, etc. */
break;
}
#else
#endif
return VINF_SUCCESS;
}
/**
* Pad and store received packet.
*
* @remarks Make sure that the packet appears to upper layer as one coming
* from real Ethernet: pad it and insert FCS.
*
* @returns VBox status code.
* @param pThis The device state structure.
* @param pvBuf The available data.
* @param cb Number of bytes available in the buffer.
* @param status Bit fields containing status info.
*/
{
#if defined(IN_RING3) /** @todo Remove this extra copying, it's gonna make us run out of kernel / hypervisor stack! */
return rc;
{
/* VLAN packet -- strip VLAN tag in VLAN mode */
{
cb -= 4;
E1kLog3(("%s Stripped tag for VLAN %u (cb=%u)\n",
}
else
}
else
/* Pad short packets */
if (cb < 60)
{
cb = 60;
}
{
/*
* Add FCS if CRC stripping is not enabled. Since the value of CRC
* is ignored by most of drivers we may as well save us the trouble
* of calculating it (see EthernetCRC CFGM parameter).
*/
if (pThis->fEthernetCRC)
}
/* Compute checksum of complete packet */
/* Update stats */
if (e1kIsBroadcast(pvBuf))
else if (e1kIsMulticast(pvBuf))
/* Update octet receive counter */
if (cb == 64)
else if (cb < 128)
else if (cb < 256)
else if (cb < 512)
else if (cb < 1024)
else
#ifdef E1K_WITH_RXD_CACHE
while (cb > 0)
{
{
E1kLog(("%s Out of receive buffers, dropping the packet "
"(cb=%u, in_cache=%u, RDH=%x RDT=%x)\n",
break;
}
#else /* !E1K_WITH_RXD_CACHE */
{
E1kLog(("%s Out of receive buffers, dropping the packet\n",
}
/* Store the packet to receive buffers */
{
/* Load the descriptor pointed by head */
#endif /* !E1K_WITH_RXD_CACHE */
if (pDesc->u64BufAddr)
{
/* Update descriptor */
/*
* We need to leave Rx critical section here or we risk deadlocking
* with EMT in e1kRegWriteRDT when the write is to an unallocated
* page or has an access handler associated with it.
* Note that it is safe to leave the critical section here since
* e1kRegWriteRDT() never modifies RDH. It never touches already
* fetched RxD cache entries either.
*/
{
return rc;
}
else
{
#ifdef E1K_WITH_RXD_CACHE
return rc;
cb = 0;
#else /* !E1K_WITH_RXD_CACHE */
return VINF_SUCCESS;
#endif /* !E1K_WITH_RXD_CACHE */
}
/*
* Note: RDH is advanced by e1kStoreRxFragment if E1K_WITH_RXD_CACHE
* is not defined.
*/
}
#ifdef E1K_WITH_RXD_CACHE
/* Write back the descriptor. */
#else /* !E1K_WITH_RXD_CACHE */
else
{
/* Write back the descriptor. */
}
#endif /* !E1K_WITH_RXD_CACHE */
}
if (cb > 0)
#ifdef E1K_WITH_RXD_CACHE
/* Complete packet has been stored -- it is time to let the guest know. */
# ifdef E1K_USE_RX_TIMERS
if (RDTR)
{
/* Arm the timer to fire in RDTR usec (discard .024) */
/* If absolute timer delay is enabled and the timer is not running yet, arm it. */
}
else
{
# endif /* E1K_USE_RX_TIMERS */
/* 0 delay means immediate interrupt */
# ifdef E1K_USE_RX_TIMERS
}
# endif /* E1K_USE_RX_TIMERS */
#endif /* E1K_WITH_RXD_CACHE */
return VINF_SUCCESS;
#else
return VERR_INTERNAL_ERROR_2;
#endif
}
/**
* Bring the link up after the configured delay, 5 seconds by default.
*
* @param pThis The device state structure.
* @thread any
*/
{
E1kLog(("%s Will bring up the link in %d seconds...\n",
}
#if 0 /* unused */
/**
* Read handler for Device Status register.
*
* Get the link status from PHY.
*
* @returns VBox status code.
*
* @param pThis The device state structure.
* @param offset Register offset in memory-mapped frame.
* @param index Register index in register array.
* @param mask Used to implement partial reads (8 and 16-bit).
*/
{
E1kLog(("%s e1kRegReadCTRL: mdio dir=%s mdc dir=%s mdc=%d\n",
{
/* MDC is high and MDIO pin is used for input, read MDIO pin from PHY */
else
E1kLog(("%s e1kRegReadCTRL: Phy::readMDIO(%d)\n",
}
else
{
/* MDIO pin is used for output, ignore it */
}
return VINF_SUCCESS;
}
#endif /* unused */
/**
* Write handler for Device Control register.
*
* Handles reset.
*
* @param pThis The device state structure.
* @param offset Register offset in memory-mapped frame.
* @param index Register index in register array.
* @param value The value to store.
* @param mask Used to implement partial writes (8 and 16-bit).
* @thread EMT
*/
{
int rc = VINF_SUCCESS;
if (value & CTRL_RESET)
{ /* RST */
#ifndef IN_RING3
return VINF_IOM_R3_IOPORT_WRITE;
#else
#endif
}
else
{
&& pThis->fCableConnected
{
/* The driver indicates that we should bring up the link */
/* Do so in 5 seconds (by default). */
/*
* Change the status (but not PHY status) anyway as Windows expects
* it for 82543GC.
*/
}
{
}
E1kLog(("%s e1kRegWriteCTRL: mdio dir=%s mdc dir=%s mdc=%s mdio=%d\n",
{
if (value & CTRL_MDIO_DIR)
{
/* MDIO direction pin is set to output and MDC is high, write MDIO pin value to PHY */
}
else
{
else
E1kLog(("%s e1kRegWriteCTRL: Phy::readMDIO(%d)\n",
}
}
}
return rc;
}
/**
*
* Handles EEPROM access requests; forwards writes to EEPROM device if access has been granted.
*
* @param pThis The device state structure.
* @param offset Register offset in memory-mapped frame.
* @param index Register index in register array.
* @param value The value to store.
* @param mask Used to implement partial writes (8 and 16-bit).
* @thread EMT
*/
{
#ifdef IN_RING3
/* So far we are concerned with lower byte only */
{
/* Access to EEPROM granted -- forward 4-wire bits to EEPROM device */
/* Note: 82543GC does not need to request EEPROM access */
}
if (value & EECD_EE_REQ)
else
EECD &= ~EECD_EE_GNT;
//e1kRegWriteDefault(pThis, offset, index, value );
return VINF_SUCCESS;
#else /* !IN_RING3 */
return VINF_IOM_R3_MMIO_WRITE;
#endif /* !IN_RING3 */
}
/**
*
* Lower 4 bits come from EEPROM device if EEPROM access has been granted.
*
* @returns VBox status code.
*
* @param pThis The device state structure.
* @param offset Register offset in memory-mapped frame.
* @param index Register index in register array.
* @param mask Used to implement partial reads (8 and 16-bit).
* @thread EMT
*/
{
#ifdef IN_RING3
if (RT_SUCCESS(rc))
{
{
/* Note: 82543GC does not need to request EEPROM access */
/* Access to EEPROM granted -- get 4-wire bits to EEPROM device */
}
}
return rc;
#else /* !IN_RING3 */
return VINF_IOM_R3_MMIO_READ;
#endif /* !IN_RING3 */
}
/**
* Write handler for EEPROM Read register.
*
* Handles EEPROM word access requests, reads EEPROM and stores the result
* into DATA field.
*
* @param pThis The device state structure.
* @param offset Register offset in memory-mapped frame.
* @param index Register index in register array.
* @param value The value to store.
* @param mask Used to implement partial writes (8 and 16-bit).
* @thread EMT
*/
{
#ifdef IN_RING3
/* Make use of 'writable' and 'readable' masks. */
/* DONE and DATA are set only if read was triggered by START. */
if (value & EERD_START)
{
}
return VINF_SUCCESS;
#else /* !IN_RING3 */
return VINF_IOM_R3_MMIO_WRITE;
#endif /* !IN_RING3 */
}
/**
* Write handler for MDI Control register.
*
*
* @param pThis The device state structure.
* @param offset Register offset in memory-mapped frame.
* @param index Register index in register array.
* @param value The value to store.
* @param mask Used to implement partial writes (8 and 16-bit).
* @thread EMT
*/
{
if (value & MDIC_INT_EN)
{
E1kLog(("%s ERROR! Interrupt at the end of an MDI cycle is not supported yet.\n",
}
else if (value & MDIC_READY)
{
E1kLog(("%s ERROR! Ready bit is not reset by software during write operation.\n",
}
{
E1kLog(("%s ERROR! Access to invalid PHY detected, phy=%d.\n",
}
else
{
/* Store the value */
/* Forward op to PHY */
if (value & MDIC_OP_READ)
else
/* Let software know that we are done */
MDIC |= MDIC_READY;
}
return VINF_SUCCESS;
}
/**
* Write handler for Interrupt Cause Read register.
*
* Bits corresponding to 1s in 'value' will be cleared in ICR register.
*
* @param pThis The device state structure.
* @param offset Register offset in memory-mapped frame.
* @param index Register index in register array.
* @param value The value to store.
* @param mask Used to implement partial writes (8 and 16-bit).
* @thread EMT
*/
{
return VINF_SUCCESS;
}
/**
* Read handler for Interrupt Cause Read register.
*
* Reading this register acknowledges all interrupts.
*
* @returns VBox status code.
*
* @param pThis The device state structure.
* @param offset Register offset in memory-mapped frame.
* @param index Register index in register array.
* @param mask Not used.
* @thread EMT
*/
{
return rc;
if (RT_SUCCESS(rc))
{
if (value)
{
/*
* Not clearing ICR causes QNX to hang as it reads ICR in a loop
* with disabled interrupts.
*/
//if (IMS)
if (1)
{
/*
* Interrupts were enabled -- we are supposedly at the very
* beginning of interrupt handler
*/
/* Clear all pending interrupts */
ICR = 0;
pThis->fIntRaised = false;
/* Lower(0) INTA(0) */
if (pThis->fIntMaskUsed)
pThis->fDelayInts = true;
}
else
{
/*
* Interrupts are disabled -- in windows guests ICR read is done
* just before re-enabling interrupts
*/
E1kLog(("%s e1kRegReadICR: Suppressing auto-clear due to disabled interrupts (%08x)\n", pThis->szPrf, ICR));
}
}
}
return rc;
}
/**
* Write handler for Interrupt Cause Set register.
*
* Bits corresponding to 1s in 'value' will be set in ICR register.
*
* @param pThis The device state structure.
* @param offset Register offset in memory-mapped frame.
* @param index Register index in register array.
* @param value The value to store.
* @param mask Used to implement partial writes (8 and 16-bit).
* @thread EMT
*/
{
}
/**
* Write handler for Interrupt Mask Set register.
*
* Will trigger pending interrupts.
*
* @param pThis The device state structure.
* @param offset Register offset in memory-mapped frame.
* @param index Register index in register array.
* @param value The value to store.
* @param mask Used to implement partial writes (8 and 16-bit).
* @thread EMT
*/
{
/* Mask changes, we need to raise pending interrupts. */
{
E1kLog2(("%s e1kRegWriteIMS: IRQ pending (%08x), arming late int timer...\n",
/* Raising an interrupt immediately causes win7 to hang upon NIC reconfiguration, see @bugref{5023}. */
}
return VINF_SUCCESS;
}
/**
* Write handler for Interrupt Mask Clear register.
*
* Bits corresponding to 1s in 'value' will be cleared in IMS register.
*
* @param pThis The device state structure.
* @param offset Register offset in memory-mapped frame.
* @param index Register index in register array.
* @param value The value to store.
* @param mask Used to implement partial writes (8 and 16-bit).
* @thread EMT
*/
{
return rc;
if (pThis->fIntRaised)
{
/*
* Technically we should reset fIntRaised in ICR read handler, but it will cause
* Windows to freeze since it may receive an interrupt while still in the very beginning
* of interrupt handler.
*/
/* Lower(0) INTA(0) */
pThis->fIntRaised = false;
}
return VINF_SUCCESS;
}
/**
* Write handler for Receive Control register.
*
* @param pThis The device state structure.
* @param offset Register offset in memory-mapped frame.
* @param index Register index in register array.
* @param value The value to store.
* @param mask Used to implement partial writes (8 and 16-bit).
* @thread EMT
*/
{
/* Update promiscuous mode */
{
/* Promiscuity has changed, pass the knowledge on. */
#ifndef IN_RING3
return VINF_IOM_R3_IOPORT_WRITE;
#else
#endif
}
/* Adjust receive buffer size */
cbRxBuf *= 16;
E1kLog2(("%s e1kRegWriteRCTL: Setting receive buffer size to %d (old %d)\n",
/* Update the register */
return VINF_SUCCESS;
}
/**
* Write handler for Packet Buffer Allocation register.
*
* TXA = 64 - RXA.
*
* @param pThis The device state structure.
* @param offset Register offset in memory-mapped frame.
* @param index Register index in register array.
* @param value The value to store.
* @param mask Used to implement partial writes (8 and 16-bit).
* @thread EMT
*/
{
return VINF_SUCCESS;
}
/**
* Write handler for Receive Descriptor Tail register.
*
* @remarks Write into RDT forces switch to HC and signal to
* e1kR3NetworkDown_WaitReceiveAvail().
*
* @returns VBox status code.
*
* @param pThis The device state structure.
* @param offset Register offset in memory-mapped frame.
* @param index Register index in register array.
* @param value The value to store.
* @param mask Used to implement partial writes (8 and 16-bit).
* @thread EMT
*/
{
#ifndef IN_RING3
/* XXX */
// return VINF_IOM_R3_MMIO_WRITE;
#endif
{
#ifdef E1K_WITH_RXD_CACHE
/*
* We need to fetch descriptors now as RDT may go whole circle
* before we attempt to store a received packet. For example,
* Intel's DOS drivers use 2 (!) RX descriptors with the total ring
* size being only 8 descriptors! Note that we fetch descriptors
* only when the cache is empty to reduce the number of memory reads
* in case of frequent RDT writes. Don't fetch anything when the
* receiver is disabled either as RDH, RDT, RDLEN can be in some
* messed up state.
* Note that despite the cache may seem empty, meaning that there are
* no more available descriptors in it, it may still be used by RX
* thread which has not yet written the last descriptor back but has
* temporarily released the RX lock in order to write the packet body
* to descriptor's buffer. At this point we still going to do prefetch
* but it won't actually fetch anything if there are no unused slots in
* our "empty" cache (nRxDFetched==E1K_RXD_CACHE_SIZE). We must not
* reset the cache here even if it appears empty. It will be reset at
* a later point in e1kRxDGet().
*/
#endif /* E1K_WITH_RXD_CACHE */
if (RT_SUCCESS(rc))
{
/** @todo bird: Use SUPSem* for this so we can signal it in ring-0 as well
* without requiring any context switches. We should also check the
* wait condition before bothering to queue the item as we're currently
* queuing thousands of items per second here in a normal transmit
* scenario. Expect performance changes when fixing this! */
#ifdef IN_RING3
/* Signal that we have more receive descriptors available. */
#else
if (pItem)
#endif
}
}
return rc;
}
/**
* Write handler for Receive Delay Timer register.
*
* @param pThis The device state structure.
* @param offset Register offset in memory-mapped frame.
* @param index Register index in register array.
* @param value The value to store.
* @param mask Used to implement partial writes (8 and 16-bit).
* @thread EMT
*/
{
{
/* Flush requested, cancel both timers and raise interrupt */
#ifdef E1K_USE_RX_TIMERS
#endif
}
return VINF_SUCCESS;
}
{
/**
* Make sure TDT won't change during computation. EMT may modify TDT at
* any moment.
*/
}
#ifdef IN_RING3
#ifdef E1K_TX_DELAY
/**
* Transmit Delay Timer handler.
*
* @remarks We only get here when the timer expires.
*
* @param pDevIns Pointer to device instance structure.
* @param pTimer Pointer to the timer.
* @param pvUser NULL.
* @thread EMT
*/
{
#ifdef E1K_INT_STATS
#endif
}
#endif /* E1K_TX_DELAY */
#ifdef E1K_USE_TX_TIMERS
/**
* Transmit Interrupt Delay Timer handler.
*
* @remarks We only get here when the timer expires.
*
* @param pDevIns Pointer to device instance structure.
* @param pTimer Pointer to the timer.
* @param pvUser NULL.
* @thread EMT
*/
{
/* Cancel absolute delay timer as we have already got attention */
#ifndef E1K_NO_TAD
#endif /* E1K_NO_TAD */
}
/**
* Transmit Absolute Delay Timer handler.
*
* @remarks We only get here when the timer expires.
*
* @param pDevIns Pointer to device instance structure.
* @param pTimer Pointer to the timer.
* @param pvUser NULL.
* @thread EMT
*/
{
/* Cancel interrupt delay timer as we have already got attention */
}
#endif /* E1K_USE_TX_TIMERS */
#ifdef E1K_USE_RX_TIMERS
/**
* Receive Interrupt Delay Timer handler.
*
* @remarks We only get here when the timer expires.
*
* @param pDevIns Pointer to device instance structure.
* @param pTimer Pointer to the timer.
* @param pvUser NULL.
* @thread EMT
*/
{
/* Cancel absolute delay timer as we have already got attention */
}
/**
* Receive Absolute Delay Timer handler.
*
* @remarks We only get here when the timer expires.
*
* @param pDevIns Pointer to device instance structure.
* @param pTimer Pointer to the timer.
* @param pvUser NULL.
* @thread EMT
*/
{
/* Cancel interrupt delay timer as we have already got attention */
}
#endif /* E1K_USE_RX_TIMERS */
/**
* Late Interrupt Timer handler.
*
* @param pDevIns Pointer to device instance structure.
* @param pTimer Pointer to the timer.
* @param pvUser NULL.
* @thread EMT
*/
{
#if 0
pThis->iStatIntLost--;
#endif
}
/**
* Link Up Timer handler.
*
* @param pDevIns Pointer to device instance structure.
* @param pTimer Pointer to the timer.
* @param pvUser NULL.
* @thread EMT
*/
{
/*
* This can happen if we set the link status to down when the Link up timer was
* already armed (shortly after e1kLoadDone() or when the cable was disconnected
* and connect+disconnect the cable very quick.
*/
if (!pThis->fCableConnected)
return;
}
#endif /* IN_RING3 */
/**
* Sets up the GSO context according to the TSE new context descriptor.
*
* @param pGso The GSO context to setup.
* @param pCtx The context descriptor.
*/
{
/*
* See if the context descriptor describes something that could be TCP or
* UDP over IPv[46].
*/
{
return;
}
if (RT_UNLIKELY( pCtx->tu.u8CSS < (size_t)pCtx->ip.u8CSS + (pCtx->dw2.fIP ? RTNETIPV4_MIN_LEN : RTNETIPV6_MIN_LEN) ))
{
return;
}
{
return;
}
/* The end of the TCP/UDP checksum should stop at the end of the packet or at least after the headers. */
{
return;
}
/* IPv4 checksum offset. */
if (RT_UNLIKELY( pCtx->dw2.fIP && (size_t)pCtx->ip.u8CSO - pCtx->ip.u8CSS != RT_UOFFSETOF(RTNETIPV4, ip_sum) ))
{
return;
}
{
E1kLog(("e1kSetupGsoCtx: TUCSO=%#x TUCSS=%#x TCP=%d\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS, pCtx->dw2.fTCP));
return;
}
/*
* Because of internal networking using a 16-bit size field for GSO context
* plus frame, we have to make sure we don't exceed this.
*/
{
E1kLog(("e1kSetupGsoCtx: HDRLEN(=%#x) + PAYLEN(=%#x) = %#x, max is %#x\n",
pCtx->dw3.u8HDRLEN, pCtx->dw2.u20PAYLEN, pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN, VBOX_MAX_GSO_SIZE));
return;
}
/*
* We're good for now - we'll do more checks when seeing the data.
* So, figure the type of offloading and setup the context.
*/
{
{
}
else
{
}
/** @todo Detect IPv4-IPv6 tunneling (need test setup since linux doesn't do
* this yet it seems)... */
}
else
{
else
}
E1kLog2(("e1kSetupGsoCtx: mss=%#x hdr=%#x hdrseg=%#x hdr1=%#x hdr2=%#x %s\n",
pGso->cbMaxSeg, pGso->cbHdrsTotal, pGso->cbHdrsSeg, pGso->offHdr1, pGso->offHdr2, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pGso->u8Type) ));
}
/**
* Checks if we can use GSO processing for the current TSE frame.
*
* @param pThis The device state structure.
* @param pGso The GSO context.
* @param pData The first data descriptor of the frame.
* @param pCtx The TSO context descriptor.
*/
DECLINLINE(bool) e1kCanDoGso(PE1KSTATE pThis, PCPDMNETWORKGSO pGso, E1KTXDAT const *pData, E1KTXCTX const *pCtx)
{
{
E1kLog2(("e1kCanDoGso: !TSE\n"));
return false;
}
{
E1kLog(("e1kCanDoGso: VLE\n"));
return false;
}
{
E1kLog3(("e1kCanDoGso: GSO disabled via CFGM\n"));
return false;
}
{
{
E1kLog(("e1kCanDoGso: !IXSM (IPv4)\n"));
return false;
}
{
E1kLog(("e1kCanDoGso: !TXSM (IPv4)\n"));
return false;
}
/** @todo what more check should we perform here? Ethernet frame type? */
E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
return true;
{
E1kLog(("e1kCanDoGso: IXSM (IPv6)\n"));
return false;
}
{
E1kLog(("e1kCanDoGso: TXSM (IPv6)\n"));
return false;
}
/** @todo what more check should we perform here? Ethernet frame type? */
E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
return true;
default:
E1kLog2(("e1kCanDoGso: e1kSetupGsoCtx failed\n"));
return false;
}
}
/**
* Frees the current xmit buffer.
*
* @param pThis The device state structure.
*/
{
if (pSg)
{
{
if (pDrv)
}
else
{
/* loopback */
}
}
}
#ifndef E1K_WITH_TXD_CACHE
/**
* Allocates an xmit buffer.
*
* @returns See PDMINETWORKUP::pfnAllocBuf.
* @param pThis The device state structure.
* @param cbMin The minimum frame size.
* @param fExactSize Whether cbMin is exact or if we have to max it
* out to the max MTU size.
* @param fGso Whether this is a GSO frame or not.
*/
{
/* Adjust cbMin if necessary. */
if (!fExactSize)
/* Deal with existing buffer (descriptor screw up, reset, etc). */
/*
* Allocate the buffer.
*/
{
if (RT_UNLIKELY(!pDrv))
return VERR_NET_DOWN;
if (RT_FAILURE(rc))
{
/* Suspend TX as we are out of buffers atm */
STATUS |= STATUS_TXOFF;
return rc;
}
}
else
{
/* Create a loopback using the fallback buffer and preallocated SG. */
pSg->cbAvailable = 0;
}
return VINF_SUCCESS;
}
#else /* E1K_WITH_TXD_CACHE */
/**
* Allocates an xmit buffer.
*
* @returns See PDMINETWORKUP::pfnAllocBuf.
* @param pThis The device state structure.
* @param cbMin The minimum frame size.
* @param fExactSize Whether cbMin is exact or if we have to max it
* out to the max MTU size.
* @param fGso Whether this is a GSO frame or not.
*/
{
/* Deal with existing buffer (descriptor screw up, reset, etc). */
/*
* Allocate the buffer.
*/
{
{
/* Zero packet, no need for the buffer */
return VINF_SUCCESS;
}
if (RT_UNLIKELY(!pDrv))
return VERR_NET_DOWN;
if (RT_FAILURE(rc))
{
/* Suspend TX as we are out of buffers atm */
STATUS |= STATUS_TXOFF;
return rc;
}
E1kLog3(("%s Allocated buffer for TX packet: cb=%u %s%s\n",
}
else
{
/* Create a loopback using the fallback buffer and preallocated SG. */
pSg->cbAvailable = 0;
}
return VINF_SUCCESS;
}
#endif /* E1K_WITH_TXD_CACHE */
/**
* Checks if it's a GSO buffer or not.
*
* @returns true / false.
* @param pTxSg The scatter / gather buffer.
*/
{
#if 0
if (!pTxSg)
E1kLog(("e1kXmitIsGsoBuf: pTxSG is NULL\n"));
E1kLog(("e1kXmitIsGsoBuf: pvUser is NULL\n"));
#endif
}
#ifndef E1K_WITH_TXD_CACHE
/**
* Load transmit descriptor from guest memory.
*
* @param pThis The device state structure.
* @param pDesc Pointer to descriptor union.
* @param addr Physical address in guest context.
* @thread E1000_TX
*/
{
}
#else /* E1K_WITH_TXD_CACHE */
/**
* Load transmit descriptors from guest memory.
*
* We need two physical reads in case the tail wrapped around the end of TX
* descriptor ring.
*
* @returns the actual number of descriptors fetched.
* @param pThis The device state structure.
* @param pDesc Pointer to descriptor union.
* @param addr Physical address in guest context.
* @thread E1000_TX
*/
{
/* We've already loaded pThis->nTxDFetched descriptors past TDH. */
E1kLog3(("%s e1kTxDLoadMore: nDescsAvailable=%u nDescsToFetch=%u "
"nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
if (nDescsToFetch == 0)
return 0;
E1kLog3(("%s Fetched %u TX descriptors at %08x%08x(0x%x), TDLEN=%08x, TDH=%08x, TDT=%08x\n",
if (nDescsToFetch > nDescsInSingleRead)
{
E1kLog3(("%s Fetched %u TX descriptors at %08x%08x\n",
}
return nDescsToFetch;
}
/**
* Load transmit descriptors from guest memory only if there are no loaded
* descriptors.
*
* @returns true if there are descriptors in cache.
* @param pThis The device state structure.
* @param pDesc Pointer to descriptor union.
* @param addr Physical address in guest context.
* @thread E1000_TX
*/
{
if (pThis->nTxDFetched == 0)
return e1kTxDLoadMore(pThis) != 0;
return true;
}
#endif /* E1K_WITH_TXD_CACHE */
/**
* Write back transmit descriptor to guest memory.
*
* @param pThis The device state structure.
* @param pDesc Pointer to descriptor union.
* @param addr Physical address in guest context.
* @thread E1000_TX
*/
{
/* Only the last half of the descriptor has to be written back. */
}
/**
* Transmit complete frame.
*
* @remarks We skip the FCS since we're not responsible for sending anything to
* a real ethernet wire.
*
* @param pThis The device state structure.
* @param fOnWorkerThread Whether we're on a worker thread or an EMT.
* @thread E1000_TX
*/
{
#ifdef E1K_INT_STATS
if (cbFrame <= 1514)
else if (cbFrame <= 2962)
else if (cbFrame <= 4410)
else if (cbFrame <= 5858)
else if (cbFrame <= 7306)
else if (cbFrame <= 8754)
else if (cbFrame <= 16384)
else if (cbFrame <= 32768)
else
#endif /* E1K_INT_STATS */
/* Add VLAN tag */
{
E1kLog3(("%s Inserting VLAN tag %08x\n",
cbFrame += 4;
}
/* E1kLog2(("%s < < < Outgoing packet. Dump follows: > > >\n"
"%.*Rhxd\n"
"%s < < < < < < < < < < < < < End of dump > > > > > > > > > > > >\n",
pThis->szPrf, cbFrame, pSg->aSegs[0].pvSeg, pThis->szPrf));*/
/* Update the stats */
/* Update octet transmit counter */
if (cbFrame == 64)
else if (cbFrame < 128)
else if (cbFrame < 256)
else if (cbFrame < 512)
else if (cbFrame < 1024)
else
/*
* Dump and send the packet.
*/
int rc = VERR_NET_DOWN;
{
if (pDrv)
{
/* Release critical section to avoid deadlock in CanReceive */
//e1kCsLeave(pThis);
//e1kCsEnter(pThis, RT_SRC_POS);
}
}
else if (pSg)
{
/** @todo do we actually need to check that we're in loopback mode here? */
{
rc = VINF_SUCCESS;
}
}
else
rc = VERR_NET_DOWN;
if (RT_FAILURE(rc))
{
/** @todo handle VERR_NET_DOWN and VERR_NET_NO_BUFFER_SPACE. Signal error ? */
}
}
/**
* Compute and write internet checksum (e1kCSum16) at the specified offset.
*
* @param pThis The device state structure.
* @param pPkt Pointer to the packet.
* @param u16PktLen Total length of the packet.
* @param cso Offset in packet to write checksum at.
* @param css Offset in packet to start computing
* checksum from.
* @param cse Offset in packet to stop computing
* checksum at.
* @thread E1000_TX
*/
static void e1kInsertChecksum(PE1KSTATE pThis, uint8_t *pPkt, uint16_t u16PktLen, uint8_t cso, uint8_t css, uint16_t cse)
{
{
E1kLog2(("%s css(%X) is greater than packet length-1(%X), checksum is not inserted\n",
return;
}
{
E1kLog2(("%s cso(%X) is greater than packet length-2(%X), checksum is not inserted\n",
return;
}
if (cse == 0)
}
/**
* Add a part of descriptor's buffer to transmit frame.
*
* @remarks data.u64BufAddr is used unconditionally for both data
* and legacy descriptors since it is identical to
* legacy.u64BufAddr.
*
* @param pThis The device state structure.
* @param pDesc Pointer to the descriptor to transmit.
* @param u16Len Length of buffer to the end of segment.
* @param fSend Force packet sending.
* @param fOnWorkerThread Whether we're on a worker thread or an EMT.
* @thread E1000_TX
*/
#ifndef E1K_WITH_TXD_CACHE
static void e1kFallbackAddSegment(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
{
/* TCP header being transmitted */
/* IP header being transmitted */
E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
E1kLog3(("%s Dump of the segment:\n"
"%.*Rhxd\n"
"%s --- End of dump ---\n",
E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
if (pThis->u16HdrRemain > 0)
{
/* The header was not complete, check if it is now */
{
/* The rest is payload */
pThis->u16HdrRemain = 0;
/* Save partial checksum and flags */
/* Clear FIN and PSH flags now and set them only in the last segment */
}
else
{
/* Still not */
E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
return;
}
}
if (fSend)
{
/* Leave ethernet header intact */
/* IP Total Length = payload + headers - ethernet header */
E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
/* Update IP Checksum */
/* Update TCP flags */
/* Restore original FIN and PSH flags for the last segment */
if (pThis->u32PayRemain == 0)
{
}
/* Add TCP length to partial pseudo header sum */
while (csum >> 16)
/* Compute final checksum */
/*
* Transmit it. If we've use the SG already, allocate a new one before
* we copy of the data.
*/
e1kXmitAllocBuf(pThis, pThis->u16TxPktLen + (pThis->fVTag ? 4 : 0), true /*fExactSize*/, false /*fGso*/);
{
}
/* Update Sequence Number */
/* Increment IP identification */
}
}
#else /* E1K_WITH_TXD_CACHE */
static int e1kFallbackAddSegment(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
{
int rc = VINF_SUCCESS;
/* TCP header being transmitted */
/* IP header being transmitted */
E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
E1kLog3(("%s Dump of the segment:\n"
"%.*Rhxd\n"
"%s --- End of dump ---\n",
E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
if (pThis->u16HdrRemain > 0)
{
/* The header was not complete, check if it is now */
{
/* The rest is payload */
pThis->u16HdrRemain = 0;
/* Save partial checksum and flags */
/* Clear FIN and PSH flags now and set them only in the last segment */
}
else
{
/* Still not */
E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
return rc;
}
}
if (fSend)
{
/* Leave ethernet header intact */
/* IP Total Length = payload + headers - ethernet header */
E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
/* Update IP Checksum */
/* Update TCP flags */
/* Restore original FIN and PSH flags for the last segment */
if (pThis->u32PayRemain == 0)
{
}
/* Add TCP length to partial pseudo header sum */
while (csum >> 16)
/* Compute final checksum */
/*
* Transmit it.
*/
{
}
/* Update Sequence Number */
/* Increment IP identification */
/* Allocate new buffer for the next segment. */
if (pThis->u32PayRemain)
{
}
}
return rc;
}
#endif /* E1K_WITH_TXD_CACHE */
#ifndef E1K_WITH_TXD_CACHE
/**
* TCP segmentation offloading fallback: Add descriptor's buffer to transmit
* frame.
*
* We construct the frame in the fallback buffer first and the copy it to the SG
* buffer before passing it down to the network driver code.
*
* @returns true if the frame should be transmitted, false if not.
*
* @param pThis The device state structure.
* @param pDesc Pointer to the descriptor to transmit.
* @param cbFragment Length of descriptor's buffer.
* @param fOnWorkerThread Whether we're on a worker thread or an EMT.
* @thread E1000_TX
*/
static bool e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC* pDesc, uint32_t cbFragment, bool fOnWorkerThread)
{
Assert(u16MaxPktLen != 0);
/*
* Carve out segments.
*/
do
{
/* Calculate how many bytes we have left in this TCP segment */
if (cb > cbFragment)
{
/* This descriptor fits completely into current segment */
cb = cbFragment;
e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
}
else
{
/*
* Rewind the packet tail pointer to the beginning of payload,
* so we continue writing right beyond the header.
*/
}
cbFragment -= cb;
} while (cbFragment > 0);
{
/* End of packet, next segment will contain header. */
if (pThis->u32PayRemain != 0)
pThis->u16TxPktLen = 0;
}
return false;
}
#else /* E1K_WITH_TXD_CACHE */
/**
* TCP segmentation offloading fallback: Add descriptor's buffer to transmit
* frame.
*
* We construct the frame in the fallback buffer first and the copy it to the SG
* buffer before passing it down to the network driver code.
*
* @returns error code
*
* @param pThis The device state structure.
* @param pDesc Pointer to the descriptor to transmit.
* @param cbFragment Length of descriptor's buffer.
* @param fOnWorkerThread Whether we're on a worker thread or an EMT.
* @thread E1000_TX
*/
{
int rc = VINF_SUCCESS;
Assert(u16MaxPktLen != 0);
/*
* Carve out segments.
*/
do
{
/* Calculate how many bytes we have left in this TCP segment */
{
/* This descriptor fits completely into current segment */
rc = e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
}
else
{
/*
* Rewind the packet tail pointer to the beginning of payload,
* so we continue writing right beyond the header.
*/
}
{
/* End of packet, next segment will contain header. */
if (pThis->u32PayRemain != 0)
pThis->u16TxPktLen = 0;
}
return false;
}
#endif /* E1K_WITH_TXD_CACHE */
/**
* Add descriptor's buffer to transmit frame.
*
* This deals with GSO and normal frames, e1kFallbackAddToFrame deals with the
* TSE frames we cannot handle as GSO.
*
* @returns true on success, false on failure.
*
* @param pThis The device state structure.
* @param PhysAddr The physical address of the descriptor buffer.
* @param cbFragment Length of descriptor's buffer.
* @thread E1000_TX
*/
{
{
E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", pThis->szPrf, cbNewPkt, E1K_MAX_TX_PKT_SIZE));
return false;
}
{
E1kLog(("%s Transmit packet is too large: %u > %u(max)/GSO\n", pThis->szPrf, cbNewPkt, pTxSg->cbAvailable));
return false;
}
{
}
return true;
}
/**
* Write the descriptor back to guest memory and notify the guest.
*
* @param pThis The device state structure.
* @param pDesc Pointer to the descriptor have been transmitted.
* @param addr Physical address of the descriptor in guest memory.
* @thread E1000_TX
*/
{
/*
* We fake descriptor write-back bursting. Descriptors are written back as they are
* processed.
*/
/* Let's pretend we process descriptors. Write back with DD set. */
/*
* Prior to r71586 we tried to accomodate the case when write-back bursts
* are enabled without actually implementing bursting by writing back all
* descriptors, even the ones that do not have RS set. This caused kernel
* panics with Linux SMP kernels, as the e1000 driver tried to free up skb
* associated with written back descriptor if it happened to be a context
* descriptor since context descriptors do not have skb associated to them.
* Starting from r71586 we write back only the descriptors with RS set,
* which is a little bit different from what the real hardware does in
* case there is a chain of data descritors where some of them have RS set
* and others do not. It is very uncommon scenario imho.
* We need to check RPS as well since some legacy drivers use it instead of
* RS even with newer cards.
*/
{
{
#ifdef E1K_USE_TX_TIMERS
{
//if (pThis->fIntRaised)
//{
// /* Interrupt is already pending, no need for timers */
// ICR |= ICR_TXDW;
//}
//else {
/* Arm the timer to fire in TIVD usec (discard .024) */
# ifndef E1K_NO_TAD
/* If absolute timer delay is enabled and the timer is not running yet, arm it. */
E1kLog2(("%s Checking if TAD timer is running\n",
# endif /* E1K_NO_TAD */
}
else
{
E1kLog2(("%s No IDE set, cancel TAD timer and raise interrupt\n",
# ifndef E1K_NO_TAD
/* Cancel both timers if armed and fire immediately. */
# endif /* E1K_NO_TAD */
#endif /* E1K_USE_TX_TIMERS */
#ifdef E1K_USE_TX_TIMERS
}
#endif /* E1K_USE_TX_TIMERS */
}
}
else
{
}
}
#ifndef E1K_WITH_TXD_CACHE
/**
* Process Transmit Descriptor.
*
* E1000 supports three types of transmit descriptors:
* - legacy data descriptors of older format (context-less).
* - data the same as legacy but providing new offloading capabilities.
* - context sets up the context for following data descriptors.
*
* @param pThis The device state structure.
* @param pDesc Pointer to descriptor union.
* @param addr Physical address of descriptor in guest memory.
* @param fOnWorkerThread Whether we're on a worker thread or an EMT.
* @thread E1000_TX
*/
{
int rc = VINF_SUCCESS;
#ifdef E1K_USE_TX_TIMERS
#endif /* E1K_USE_TX_TIMERS */
switch (e1kGetDescType(pDesc))
{
case E1K_DTYP_CONTEXT:
{
}
else
{
}
E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
break;
case E1K_DTYP_DATA:
{
{
/** @todo Same as legacy when !TSE. See below. */
break;
}
&pThis->StatTxDescData);
/*
* The last descriptor of non-TSE packet must contain VLE flag.
* TSE packets have VLE flag in the first descriptor. The later
* case is taken care of a bit later when cbVTag gets assigned.
*
*/
{
}
/*
* First fragment: Allocate new buffer and save the IXSM and TXSM
* packet options as these are only valid in the first fragment.
*/
if (pThis->u16TxPktLen == 0)
{
{
/* 2) pDesc->data.cmd.fTSE && pThis->u16TxPktLen == 0 */
}
else
cbVTag = 4;
rc = e1kXmitAllocBuf(pThis, pThis->contextTSE.dw2.u20PAYLEN + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
true /*fExactSize*/, true /*fGso*/);
else
/**
* @todo: Perhaps it is not that simple for GSO packets! We may
* need to unwind some changes.
*/
if (RT_FAILURE(rc))
{
break;
}
/** @todo Is there any way to indicating errors other than collisions? Like
* VERR_NET_DOWN. */
}
/*
* Add the descriptor data to the frame. If the frame is complete,
* transmit it and reset the u16TxPktLen field.
*/
{
{
if ( fRc
&& pThis->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
{
}
else
{
if (fRc)
}
pThis->u16TxPktLen = 0;
}
}
{
{
{
}
else
pThis->u16TxPktLen = 0;
}
}
else
{
}
break;
}
case E1K_DTYP_LEGACY:
{
break;
}
/* First fragment: allocate new buffer. */
if (pThis->u16TxPktLen == 0)
{
else
cbVTag = 4;
/** @todo reset status bits? */
rc = e1kXmitAllocBuf(pThis, pDesc->legacy.cmd.u16Length + cbVTag, pDesc->legacy.cmd.fEOP, false /*fGso*/);
if (RT_FAILURE(rc))
{
break;
}
/** @todo Is there any way to indicating errors other than collisions? Like
* VERR_NET_DOWN. */
}
/* Add fragment to frame. */
{
/* Last fragment: Transmit and reset the packet storage counter. */
{
/** @todo Offload processing goes here. */
pThis->u16TxPktLen = 0;
}
}
/* Last fragment + failure: free the buffer and reset the storage counter. */
{
pThis->u16TxPktLen = 0;
}
break;
default:
E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
break;
}
return rc;
}
#else /* E1K_WITH_TXD_CACHE */
/**
* Process Transmit Descriptor.
*
* E1000 supports three types of transmit descriptors:
* - legacy data descriptors of older format (context-less).
* - data the same as legacy but providing new offloading capabilities.
* - context sets up the context for following data descriptors.
*
* @param pThis The device state structure.
* @param pDesc Pointer to descriptor union.
* @param addr Physical address of descriptor in guest memory.
* @param fOnWorkerThread Whether we're on a worker thread or an EMT.
* @param cbPacketSize Size of the packet as previously computed.
* @thread E1000_TX
*/
bool fOnWorkerThread)
{
int rc = VINF_SUCCESS;
#ifdef E1K_USE_TX_TIMERS
#endif /* E1K_USE_TX_TIMERS */
switch (e1kGetDescType(pDesc))
{
case E1K_DTYP_CONTEXT:
/* The caller have already updated the context */
break;
case E1K_DTYP_DATA:
{
&pThis->StatTxDescData);
{
}
else
{
/*
* Add the descriptor data to the frame. If the frame is complete,
* transmit it and reset the u16TxPktLen field.
*/
{
{
if ( fRc
&& pThis->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
{
}
else
{
if (fRc)
}
pThis->u16TxPktLen = 0;
}
}
{
{
{
}
else
pThis->u16TxPktLen = 0;
}
}
else
{
}
}
break;
}
case E1K_DTYP_LEGACY:
{
}
else
{
/* Add fragment to frame. */
{
/* Last fragment: Transmit and reset the packet storage counter. */
{
{
0);
}
pThis->u16TxPktLen = 0;
}
}
/* Last fragment + failure: free the buffer and reset the storage counter. */
{
pThis->u16TxPktLen = 0;
}
}
break;
default:
E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
break;
}
return rc;
}
{
{
}
else
{
}
E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
}
{
LogFlow(("%s e1kLocateTxPacket: ENTER cbTxAlloc=%d\n",
/* Check if we have located the packet already. */
{
LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
return true;
}
bool fTSE = false;
{
switch (e1kGetDescType(pDesc))
{
case E1K_DTYP_CONTEXT:
continue;
case E1K_DTYP_LEGACY:
/* Skip empty descriptors. */
break;
break;
case E1K_DTYP_DATA:
/* Skip empty descriptors. */
break;
if (cbPacket == 0)
{
/*
* The first fragment: save IXSM and TXSM options
* as these are only valid in the first fragment.
*/
/*
* TSE descriptors have VLE bit properly set in
* the first fragment.
*/
if (fTSE)
{
}
}
break;
default:
AssertMsgFailed(("Impossible descriptor type!"));
}
{
/*
* Non-TSE descriptors have VLE bit properly set in
* the last fragment.
*/
if (!fTSE)
{
}
/*
* Compute the required buffer size. If we cannot do GSO but still
* have to do segmentation we allocate the first segment only.
*/
cbPacket :
LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
return true;
}
}
{
/* All descriptors were empty, we need to process them as a dummy packet */
LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d, zero packet!\n",
return true;
}
LogFlow(("%s e1kLocateTxPacket: RET false cbTxAlloc=%d\n",
return false;
}
{
int rc = VINF_SUCCESS;
LogFlow(("%s e1kXmitPacket: ENTER current=%d fetched=%d\n",
{
E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
if (RT_FAILURE(rc))
break;
TDH = 0;
{
E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
}
++pThis->iTxDCurrent;
break;
}
LogFlow(("%s e1kXmitPacket: RET %Rrc current=%d fetched=%d\n",
return rc;
}
#endif /* E1K_WITH_TXD_CACHE */
#ifndef E1K_WITH_TXD_CACHE
/**
* Transmit pending descriptors.
*
* @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
*
* @param pThis The E1000 state.
* @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
*/
{
int rc = VINF_SUCCESS;
/* Check if transmitter is enabled. */
return VINF_SUCCESS;
/*
* Grab the xmit lock of the driver as well as the E1K device state.
*/
{
if (pDrv)
{
if (RT_FAILURE(rc))
{
return rc;
}
}
/*
* Process all pending descriptors.
* Note! Do not process descriptors in locked state
*/
{
E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
/* If we failed to transmit descriptor we will try it again later */
if (RT_FAILURE(rc))
break;
TDH = 0;
{
E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
}
}
/// @todo: uncomment: pThis->uStatIntTXQE++;
/// @todo: uncomment: e1kRaiseInterrupt(pThis, ICR_TXQE);
/*
* Release the lock.
*/
if (pDrv)
}
return rc;
}
#else /* E1K_WITH_TXD_CACHE */
{
for (i = 0; i < cDescs; ++i)
{
if (i == tdh)
LogRel((">>> "));
}
LogRel(("-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
else
for (i = 0; i < pThis->nTxDFetched; ++i)
{
if (i == pThis->iTxDCurrent)
LogRel((">>> "));
LogRel(("%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs), &pThis->aTxDescriptors[i]));
}
}
/**
* Transmit pending descriptors.
*
* @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
*
* @param pThis The E1000 state.
* @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
*/
{
int rc = VINF_SUCCESS;
/* Check if transmitter is enabled. */
return VINF_SUCCESS;
/*
* Grab the xmit lock of the driver as well as the E1K device state.
*/
if (pDrv)
{
if (RT_FAILURE(rc))
return rc;
}
/*
* Process all pending descriptors.
* Note! Do not process descriptors in locked state
*/
{
/*
* fIncomplete is set whenever we try to fetch additional descriptors
* for an incomplete packet. If fail to locate a complete packet on
* the next iteration we need to reset the cache or we risk to get
* stuck in this loop forever.
*/
bool fIncomplete = false;
{
while (e1kLocateTxPacket(pThis))
{
fIncomplete = false;
/* Found a complete packet, allocate it. */
/* If we're out of bandwidth we'll come back later. */
if (RT_FAILURE(rc))
goto out;
/* Copy the packet to allocated buffer and send it. */
/* If we're out of bandwidth we'll come back later. */
if (RT_FAILURE(rc))
goto out;
}
if (RT_UNLIKELY(fIncomplete))
{
static bool fTxDCacheDumped = false;
/*
* The descriptor cache is full, but we were unable to find
* a complete packet in it. Drop the cache and hope that
* the guest driver can recover from network card error.
*/
LogRel(("%s No complete packets in%s TxD cache! "
"Fetched=%d, current=%d, TX len=%d.\n",
e1kGetTxLen(pThis)));
if (!fTxDCacheDumped)
{
fTxDCacheDumped = true;
}
/*
* Returning an error at this point means Guru in R0
* (see @bugref{6428}).
*/
# ifdef IN_RING3
# else /* !IN_RING3 */
# endif /* !IN_RING3 */
goto out;
}
if (u8Remain > 0)
{
Log4(("%s Incomplete packet at %d. Already fetched %d, "
"%d more are available\n",
/*
* A packet was partially fetched. Move incomplete packet to
* the beginning of cache buffer, then load more descriptors.
*/
pThis->iTxDCurrent = 0;
fIncomplete = true;
}
else
pThis->nTxDFetched = 0;
pThis->iTxDCurrent = 0;
}
{
E1kLog2(("%s Out of transmit descriptors, raise ICR.TXD_LOW\n",
}
out:
/// @todo: uncomment: pThis->uStatIntTXQE++;
/// @todo: uncomment: e1kRaiseInterrupt(pThis, ICR_TXQE);
}
/*
* Release the lock.
*/
if (pDrv)
return rc;
}
#endif /* E1K_WITH_TXD_CACHE */
#ifdef IN_RING3
/**
* @interface_method_impl{PDMINETWORKDOWN,pfnXmitPending}
*/
{
/* Resume suspended transmission */
STATUS &= ~STATUS_TXOFF;
}
/**
* Callback for consuming from transmit queue. It gets called in R3 whenever
*
* @returns true
* @param pDevIns Pointer to device instance structure.
* @param pItem Pointer to the element being dequeued (not used).
* @thread ???
*/
{
return true;
}
/**
* Handler for the wakeup signaller queue.
*/
{
return true;
}
#endif /* IN_RING3 */
/**
* Write handler for Transmit Descriptor Tail register.
*
* @param pThis The device state structure.
* @param offset Register offset in memory-mapped frame.
* @param index Register index in register array.
* @param value The value to store.
* @param mask Used to implement partial writes (8 and 16-bit).
* @thread EMT
*/
{
/* All descriptors starting with head and not including tail belong to us. */
/* Process them. */
E1kLog2(("%s e1kRegWriteTDT: TDBAL=%08x, TDBAH=%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
/* Ignore TDT writes when the link is down. */
{
Log5(("E1000: TDT write: TDH=%08x, TDT=%08x, %d descriptors to process\n", TDH, TDT, e1kGetTxLen(pThis)));
E1kLog(("%s e1kRegWriteTDT: %d descriptors to process\n",
/* Transmit pending packets if possible, defer it if we cannot do it
in the current context. */
#ifdef E1K_TX_DELAY
{
{
#ifdef E1K_INT_STATS
#endif
}
return rc;
}
/* We failed to enter the TX critical section -- transmit as usual. */
#endif /* E1K_TX_DELAY */
#ifndef IN_RING3
{
if (RT_UNLIKELY(pItem))
}
else
#endif
{
if (rc == VERR_TRY_AGAIN)
rc = VINF_SUCCESS;
else if (rc == VERR_SEM_BUSY)
}
}
return rc;
}
/**
* Write handler for Multicast Table Array registers.
*
* @param pThis The device state structure.
* @param offset Register offset in memory-mapped frame.
* @param index Register index in register array.
* @param value The value to store.
* @thread EMT
*/
{
return VINF_SUCCESS;
}
/**
* Read handler for Multicast Table Array registers.
*
* @returns VBox status code.
*
* @param pThis The device state structure.
* @param offset Register offset in memory-mapped frame.
* @param index Register index in register array.
* @thread EMT
*/
{
return VINF_SUCCESS;
}
/**
* Write handler for Receive Address registers.
*
* @param pThis The device state structure.
* @param offset Register offset in memory-mapped frame.
* @param index Register index in register array.
* @param value The value to store.
* @thread EMT
*/
{
AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])] = value;
return VINF_SUCCESS;
}
/**
* Read handler for Receive Address registers.
*
* @returns VBox status code.
*
* @param pThis The device state structure.
* @param offset Register offset in memory-mapped frame.
* @param index Register index in register array.
* @thread EMT
*/
{
*pu32Value = pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])];
return VINF_SUCCESS;
}
/**
* Write handler for VLAN Filter Table Array registers.
*
* @param pThis The device state structure.
* @param offset Register offset in memory-mapped frame.
* @param index Register index in register array.
* @param value The value to store.
* @thread EMT
*/
{
return VINF_SUCCESS;
}
/**
* Read handler for VLAN Filter Table Array registers.
*
* @returns VBox status code.
*
* @param pThis The device state structure.
* @param offset Register offset in memory-mapped frame.
* @param index Register index in register array.
* @thread EMT
*/
{
return VINF_SUCCESS;
}
/**
* Read handler for unimplemented registers.
*
* Merely reports reads from unimplemented registers.
*
* @returns VBox status code.
*
* @param pThis The device state structure.
* @param offset Register offset in memory-mapped frame.
* @param index Register index in register array.
* @thread EMT
*/
static int e1kRegReadUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
{
E1kLog(("%s At %08X read (00000000) attempt from unimplemented register %s (%s)\n",
*pu32Value = 0;
return VINF_SUCCESS;
}
/**
* Default register read handler with automatic clear operation.
*
* Retrieves the value of register from register array in device state structure.
* Then resets all bits.
*
* @remarks The 'mask' parameter is simply ignored as masking and shifting is
* done in the caller.
*
* @returns VBox status code.
*
* @param pThis The device state structure.
* @param offset Register offset in memory-mapped frame.
* @param index Register index in register array.
* @thread EMT
*/
static int e1kRegReadAutoClear(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
{
return rc;
}
/**
* Default register read handler.
*
* Retrieves the value of register from register array in device state structure.
* Bits corresponding to 0s in 'readable' mask will always read as 0s.
*
* @remarks The 'mask' parameter is simply ignored as masking and shifting is
* done in the caller.
*
* @returns VBox status code.
*
* @param pThis The device state structure.
* @param offset Register offset in memory-mapped frame.
* @param index Register index in register array.
* @thread EMT
*/
{
return VINF_SUCCESS;
}
/**
* Write handler for unimplemented registers.
*
* Merely reports writes to unimplemented registers.
*
* @param pThis The device state structure.
* @param offset Register offset in memory-mapped frame.
* @param index Register index in register array.
* @param value The value to store.
* @thread EMT
*/
static int e1kRegWriteUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
{
E1kLog(("%s At %08X write attempt (%08X) to unimplemented register %s (%s)\n",
return VINF_SUCCESS;
}
/**
* Default register write handler.
*
* Stores the value to the register array in device state structure. Only bits
* corresponding to 1s both in 'writable' and 'mask' will be stored.
*
* @returns VBox status code.
*
* @param pThis The device state structure.
* @param offset Register offset in memory-mapped frame.
* @param index Register index in register array.
* @param value The value to store.
* @param mask Used to implement partial writes (8 and 16-bit).
* @thread EMT
*/
{
return VINF_SUCCESS;
}
/**
* Search register table for matching register.
*
* @returns Index in the register table or -1 if not found.
*
* @param pThis The device state structure.
* @param offReg Register offset in memory-mapped region.
* @thread EMT
*/
{
#if 0
int index;
{
if (g_aE1kRegMap[index].offset <= offReg && offReg < g_aE1kRegMap[index].offset + g_aE1kRegMap[index].size)
{
return index;
}
}
#else
int iStart = 0;
int iEnd = E1K_NUM_OF_BINARY_SEARCHABLE;
for (;;)
{
{
if (i == iStart)
break;
iEnd = i;
}
{
i++;
if (i == iEnd)
break;
iStart = i;
}
else
return i;
}
return i;
# ifdef VBOX_STRICT
for (unsigned i = 0; i < RT_ELEMENTS(g_aE1kRegMap); i++)
# endif
#endif
return -1;
}
/**
* Handle unaligned register read operation.
*
* Looks up and calls appropriate handler.
*
* @returns VBox status code.
*
* @param pThis The device state structure.
* @param offReg Register offset in memory-mapped frame.
* @param pv Where to store the result.
* @param cb Number of bytes to read.
* @thread EMT
* @remarks IOM takes care of unaligned and small reads via MMIO. For I/O port
* accesses we have to take care of that ourselves.
*/
{
int rc = VINF_SUCCESS;
#ifdef DEBUG
char buf[9];
#endif
/*
* From the spec:
* For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
* double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
*/
/*
* To be able to read bytes and short word we convert them to properly
* shifted 32-bit words and masks. The idea is to keep register-specific
* handlers simple. Most accesses will be 32-bit anyway.
*/
switch (cb)
{
default:
}
if (index != -1)
{
{
/* Make the mask correspond to the bits we are about to read. */
if (!mask)
return PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS, "Zero mask: offset=%#10x cb=%#10x\n", offReg, cb);
/*
* Read it. Pass the mask so the handler knows what has to be read.
* Mask out irrelevant bits.
*/
//rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
return rc;
//pThis->fDelayInts = false;
//pThis->iStatIntLost += pThis->iStatIntLostOne;
//pThis->iStatIntLostOne = 0;
//e1kCsLeave(pThis);
E1kLog2(("%s At %08X read %s from %s (%s)\n",
pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
Log6(("%s At %08X read %s from %s (%s) [UNALIGNED]\n",
pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
/* Shift back the result. */
}
else
E1kLog(("%s At %08X read (%s) attempt from write-only register %s (%s)\n",
pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
if (IOM_SUCCESS(rc))
}
else
E1kLog(("%s At %08X read (%s) attempt from non-existing register\n",
return rc;
}
/**
* Handle 4 byte aligned and sized read operation.
*
* Looks up and calls appropriate handler.
*
* @returns VBox status code.
*
* @param pThis The device state structure.
* @param offReg Register offset in memory-mapped frame.
* @param pu32 Where to store the result.
* @thread EMT
*/
{
/*
* Lookup the register and check that it's readable.
*/
int rc = VINF_SUCCESS;
{
{
/*
* Read it. Pass the mask so the handler knows what has to be read.
* Mask out irrelevant bits.
*/
//rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
//if (RT_UNLIKELY(rc != VINF_SUCCESS))
// return rc;
//pThis->fDelayInts = false;
//pThis->iStatIntLost += pThis->iStatIntLostOne;
//pThis->iStatIntLostOne = 0;
//e1kCsLeave(pThis);
Log6(("%s At %08X read %08X from %s (%s)\n",
if (IOM_SUCCESS(rc))
}
else
}
else
return rc;
}
/**
* Handle 4 byte sized and aligned register write operation.
*
* Looks up and calls appropriate handler.
*
* @returns VBox status code.
*
* @param pThis The device state structure.
* @param offReg Register offset in memory-mapped frame.
* @param u32Value The value to write.
* @thread EMT
*/
{
int rc = VINF_SUCCESS;
{
{
/*
* Write it. Pass the mask so the handler knows what has to be written.
* Mask out irrelevant bits.
*/
Log6(("%s At %08X write %08X to %s (%s)\n",
//rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
//if (RT_UNLIKELY(rc != VINF_SUCCESS))
// return rc;
//pThis->fDelayInts = false;
//pThis->iStatIntLost += pThis->iStatIntLostOne;
//pThis->iStatIntLostOne = 0;
//e1kCsLeave(pThis);
}
else
E1kLog(("%s At %08X write attempt (%08X) to read-only register %s (%s)\n",
if (IOM_SUCCESS(rc))
}
else
E1kLog(("%s At %08X write attempt (%08X) to non-existing register\n",
return rc;
}
/* -=-=-=-=- MMIO and I/O Port Callbacks -=-=-=-=- */
/**
* @callback_method_impl{FNIOMMMIOREAD}
*/
PDMBOTHCBDECL(int) e1kMMIORead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
{
return rc;
}
/**
* @callback_method_impl{FNIOMMMIOWRITE}
*/
PDMBOTHCBDECL(int) e1kMMIOWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
{
return rc;
}
/**
* @callback_method_impl{FNIOMIOPORTIN}
*/
PDMBOTHCBDECL(int) e1kIOPortIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT uPort, uint32_t *pu32, unsigned cb)
{
int rc;
switch (uPort)
{
case 0x00: /* IOADDR */
E1kLog2(("%s e1kIOPortIn: IOADDR(0), selecting register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
rc = VINF_SUCCESS;
break;
case 0x04: /* IODATA */
else /** @todo r=bird: I wouldn't be surprised if this unaligned branch wasn't necessary. */
if (rc == VINF_IOM_R3_MMIO_READ)
E1kLog2(("%s e1kIOPortIn: IODATA(4), reading from selected register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
break;
default:
//rc = VERR_IOM_IOPORT_UNUSED; /* Why not? */
rc = VINF_SUCCESS;
}
else
{
rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s e1kIOPortIn: invalid op size: uPort=%RTiop cb=%08x\n", pThis->szPrf, uPort, cb);
}
return rc;
}
/**
* @callback_method_impl{FNIOMIOPORTOUT}
*/
PDMBOTHCBDECL(int) e1kIOPortOut(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT uPort, uint32_t u32, unsigned cb)
{
int rc;
{
switch (uPort)
{
case 0x00: /* IOADDR */
E1kLog2(("%s e1kIOPortOut: IOADDR(0), selected register %08x\n", pThis->szPrf, pThis->uSelectedReg));
rc = VINF_SUCCESS;
break;
case 0x04: /* IODATA */
E1kLog2(("%s e1kIOPortOut: IODATA(4), writing to selected register %#010x, value=%#010x\n", pThis->szPrf, pThis->uSelectedReg, u32));
{
if (rc == VINF_IOM_R3_MMIO_WRITE)
}
else
break;
default:
}
}
else
{
rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "invalid op size: uPort=%RTiop cb=%#x\n", pThis->szPrf, uPort, cb);
}
return rc;
}
#ifdef IN_RING3
/**
* Dump complete device state to log.
*
* @param pThis Pointer to device state.
*/
{
for (int i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
{
}
# ifdef E1K_INT_STATS
# endif /* E1K_INT_STATS */
}
/**
* @callback_method_impl{FNPCIIOREGIONMAP}
*/
static DECLCALLBACK(int) e1kMap(PPCIDEVICE pPciDev, int iRegion, RTGCPHYS GCPhysAddress, uint32_t cb, PCIADDRESSSPACE enmType)
{
int rc;
switch (enmType)
{
case PCI_ADDRESS_SPACE_IO:
break;
case PCI_ADDRESS_SPACE_MEM:
/*
* From the spec:
* For registers that should be accessed as 32-bit double words,
* partial writes (less than a 32-bit double word) is ignored.
* Partial reads return all 32 bits of data regardless of the
* byte enables.
*/
"e1kMMIOWrite", "e1kMMIORead");
"e1kMMIOWrite", "e1kMMIORead");
break;
default:
/* We should never get here */
AssertMsgFailed(("Invalid PCI address space param in map callback"));
break;
}
return rc;
}
/* -=-=-=-=- PDMINETWORKDOWN -=-=-=-=- */
/**
* Check if the device can receive data now.
* This must be called before the pfnRecieve() method is called.
*
* @returns Number of bytes the device can receive.
* @param pInterface Pointer to the interface structure containing the called function pointer.
* @thread EMT
*/
{
#ifndef E1K_WITH_RXD_CACHE
return VERR_NET_NO_BUFFER_SPACE;
{
cb = 0;
else
}
else
{
cb = 0;
E1kLogRel(("E1000: OUT of RX descriptors!\n"));
}
E1kLog2(("%s e1kCanReceive: at exit RDH=%d RDT=%d RDLEN=%d u16RxBSize=%d cb=%lu\n",
#else /* E1K_WITH_RXD_CACHE */
int rc = VINF_SUCCESS;
return VERR_NET_NO_BUFFER_SPACE;
{
}
{
/* Cache is empty, so is the RX ring. */
}
E1kLog2(("%s e1kCanReceive: at exit in_cache=%d RDH=%d RDT=%d RDLEN=%d"
return rc;
#endif /* E1K_WITH_RXD_CACHE */
}
/**
* @interface_method_impl{PDMINETWORKDOWN,pfnWaitReceiveAvail}
*/
static DECLCALLBACK(int) e1kR3NetworkDown_WaitReceiveAvail(PPDMINETWORKDOWN pInterface, RTMSINTERVAL cMillies)
{
if (RT_SUCCESS(rc))
return VINF_SUCCESS;
if (RT_UNLIKELY(cMillies == 0))
return VERR_NET_NO_BUFFER_SPACE;
|| enmVMState == VMSTATE_RUNNING_LS))
{
if (RT_SUCCESS(rc2))
{
rc = VINF_SUCCESS;
break;
}
}
return rc;
}
/**
* Matches the packet addresses against Receive Address table. Looks for
* exact matches only.
*
* @returns true if address matches.
* @param pThis Pointer to the state structure.
* @param pvBuf The ethernet packet.
* @param cb Number of bytes available in the packet.
* @thread EMT
*/
{
{
/* Valid address? */
{
//unsigned char *pAddr = (unsigned char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS);
//E1kLog3(("%s Matching %02x:%02x:%02x:%02x:%02x:%02x against %02x:%02x:%02x:%02x:%02x:%02x...\n",
// pThis->szPrf, pAddr[0], pAddr[1], pAddr[2], pAddr[3], pAddr[4], pAddr[5],
// ra->addr[0], ra->addr[1], ra->addr[2], ra->addr[3], ra->addr[4], ra->addr[5]));
/*
* Address Select:
* 00b = Destination address
* 01b = Source address
* 10b = Reserved
* 11b = Reserved
* Since ethernet header is (DA, SA, len) we can use address
* select as index.
*/
return true;
}
}
return false;
}
/**
* Matches the packet addresses against Multicast Table Array.
*
* @remarks This is imperfect match since it matches not exact address but
* a subset of addresses.
*
* @returns true if address matches.
* @param pThis Pointer to the state structure.
* @param pvBuf The ethernet packet.
* @param cb Number of bytes available in the packet.
* @thread EMT
*/
{
/* Get bits 32..47 of destination address */
/*
* offset means:
* 00b = bits 36..47
* 01b = bits 35..46
* 10b = bits 34..45
* 11b = bits 32..43
*/
if (offset < 3)
}
/**
* Determines if the packet is to be delivered to upper layer.
*
* The following filters supported:
* - Multicast
* - VLAN
*
* @returns true if packet is intended for this node.
* @param pThis Pointer to the state structure.
* @param pvBuf The ethernet packet.
* @param cb Number of bytes available in the packet.
* @param pStatus Bit field to store status bits.
* @thread EMT
*/
{
/* Assume that we fail to pass exact filter. */
/* Discard oversized packets */
if (cb > E1K_MAX_RX_PKT_SIZE)
{
E1kLog(("%s ERROR: Incoming packet is too big, cb=%d > max=%d\n",
return false;
}
{
/* When long packet reception is disabled packets over 1522 are discarded */
E1kLog(("%s Discarding incoming packet (LPE=0), cb=%d\n",
return false;
}
/* Compare TPID with VLAN Ether Type */
{
/* Is VLAN filtering enabled? */
{
/* It is 802.1q packet indeed, let's filter by VID */
if (RCTL & RCTL_CFIEN)
{
{
E1kLog2(("%s Packet filter: CFIs do not match in packet and RCTL (%d!=%d)\n",
return false;
}
}
else
{
E1kLog2(("%s Packet filter: no VLAN match (id=%d)\n",
return false;
}
}
}
/* Broadcast filtering */
return true;
if (e1kIsMulticast(pvBuf))
{
/* Is multicast promiscuous enabled? */
return true;
/* Try perfect matches first */
{
return true;
}
return true;
}
else {
/* Is unicast promiscuous enabled? */
return true;
{
return true;
}
}
return false;
}
/**
* @interface_method_impl{PDMINETWORKDOWN,pfnReceive}
*/
static DECLCALLBACK(int) e1kR3NetworkDown_Receive(PPDMINETWORKDOWN pInterface, const void *pvBuf, size_t cb)
{
int rc = VINF_SUCCESS;
/*
*/
if ( enmVMState != VMSTATE_RUNNING
&& enmVMState != VMSTATE_RUNNING_LS)
{
return VINF_SUCCESS;
}
/* Discard incoming packets in locked state */
{
return VINF_SUCCESS;
}
//if (!e1kCsEnter(pThis, RT_SRC_POS))
// return VERR_PERMISSION_DENIED;
/* Update stats */
{
}
if (fPassed)
{
}
//e1kCsLeave(pThis);
return rc;
}
/* -=-=-=-=- PDMILEDPORTS -=-=-=-=- */
/**
* @interface_method_impl{PDMILEDPORTS,pfnQueryStatusLed}
*/
static DECLCALLBACK(int) e1kR3QueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
{
int rc = VERR_PDM_LUN_NOT_FOUND;
if (iLUN == 0)
{
rc = VINF_SUCCESS;
}
return rc;
}
/* -=-=-=-=- PDMINETWORKCONFIG -=-=-=-=- */
/**
* @interface_method_impl{PDMINETWORKCONFIG,pfnGetMac}
*/
{
return VINF_SUCCESS;
}
/**
* @interface_method_impl{PDMINETWORKCONFIG,pfnGetLinkState}
*/
{
return PDMNETWORKLINKSTATE_UP;
return PDMNETWORKLINKSTATE_DOWN;
}
/**
* @interface_method_impl{PDMINETWORKCONFIG,pfnSetLinkState}
*/
static DECLCALLBACK(int) e1kR3SetLinkState(PPDMINETWORKCONFIG pInterface, PDMNETWORKLINKSTATE enmState)
{
* yet written by guest */
{
if (fNewUp)
{
E1kLog(("%s Link will be up in approximately %d secs\n",
pThis->fCableConnected = true;
/* Restore the link back in 5 seconds (by default). */
}
else
{
pThis->fCableConnected = false;
}
}
return VINF_SUCCESS;
}
/* -=-=-=-=- PDMIBASE -=-=-=-=- */
/**
* @interface_method_impl{PDMIBASE,pfnQueryInterface}
*/
{
return NULL;
}
/* -=-=-=-=- Saved State -=-=-=-=- */
/**
* Saves the configuration.
*
* @param pThis The E1K state.
* @param pSSM The handle to the saved state.
*/
{
}
/**
* @callback_method_impl{FNSSMDEVLIVEEXEC,Save basic configuration.}
*/
{
return VINF_SSM_DONT_CALL_AGAIN;
}
/**
* @callback_method_impl{FNSSMDEVSAVEPREP,Synchronize.}
*/
{
return rc;
return VINF_SUCCESS;
#if 0
/* 1) Prevent all threads from modifying the state and memory */
//pThis->fLocked = true;
/* 2) Cancel all timers */
#ifdef E1K_TX_DELAY
#endif /* E1K_TX_DELAY */
#ifdef E1K_USE_TX_TIMERS
#ifndef E1K_NO_TAD
#endif /* E1K_NO_TAD */
#endif /* E1K_USE_TX_TIMERS */
#ifdef E1K_USE_RX_TIMERS
#endif /* E1K_USE_RX_TIMERS */
/* 3) Did I forget anything? */
return VINF_SUCCESS;
#endif
}
/**
* @callback_method_impl{FNSSMDEVSAVEEXEC}
*/
{
//SSMR3PutBool(pSSM, pThis->fDelayInts);
//SSMR3PutBool(pSSM, pThis->fIntMaskUsed);
/** @todo State wrt to the TSE buffer is incomplete, so little point in
* saving this actually. */
#ifdef E1K_WITH_TXD_CACHE
#if 0
#else
/*
* There is no point in storing TX descriptor cache entries as we can simply
* fetch them again. Moreover, normally the cache is always empty when we
* save the state. Store zero entries for compatibility.
*/
SSMR3PutU8(pSSM, 0);
#endif
#endif /* E1K_WITH_TXD_CACHE */
/**@todo GSO requires some more state here. */
return VINF_SUCCESS;
}
#if 0
/**
* @callback_method_impl{FNSSMDEVSAVEDONE}
*/
{
/* If VM is being powered off unlocking will result in assertions in PGM */
else
return VINF_SUCCESS;
}
#endif
/**
* @callback_method_impl{FNSSMDEVLOADPREP,Synchronize.}
*/
{
return rc;
return VINF_SUCCESS;
}
/**
* @callback_method_impl{FNSSMDEVLOADEXEC}
*/
static DECLCALLBACK(int) e1kLoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
{
int rc;
if ( uVersion != E1K_SAVEDSTATE_VERSION
#ifdef E1K_WITH_TXD_CACHE
#endif /* E1K_WITH_TXD_CACHE */
|| uPass != SSM_PASS_FINAL)
{
/* config checks */
LogRel(("%s: The mac address differs: config=%RTmac saved=%RTmac\n", pThis->szPrf, &pThis->macConfigured, &macConfigured));
return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("The chip type differs: config=%u saved=%u"), pThis->eChip, eChip);
}
if (uPass == SSM_PASS_FINAL)
{
{
}
/* the state */
/** @todo: PHY could be made a separate device with its own versioning */
//SSMR3GetBool(pSSM, pThis->fDelayInts);
//SSMR3GetBool(pSSM, pThis->fIntMaskUsed);
{
}
else
{
pThis->u16VTagTCI = 0;
}
#ifdef E1K_WITH_TXD_CACHE
{
if (pThis->nTxDFetched)
}
else
pThis->nTxDFetched = 0;
/*
* @todo: Perhaps we should not store TXD cache as the entries can be
* simply fetched again from guest's memory. Or can't they?
*/
#endif /* E1K_WITH_TXD_CACHE */
#ifdef E1K_WITH_RXD_CACHE
/*
* There is no point in storing the RX descriptor cache in the saved
* state, we just need to make sure it is empty.
*/
#endif /* E1K_WITH_RXD_CACHE */
/* derived state */
}
return VINF_SUCCESS;
}
/**
* @callback_method_impl{FNSSMDEVLOADDONE, Link status adjustments after loading.}
*/
{
/* Update promiscuous mode */
/*
* Force the link down here, since PDMNETWORKLINKSTATE_DOWN_RESUME is never
* passed to us. We go through all this stuff if the link was up and we
* wasn't teleported.
*/
&& pThis->cMsLinkUpDelay)
{
/* Restore the link back in five seconds (default). */
}
return VINF_SUCCESS;
}
/* -=-=-=-=- Debug Info + Log Types -=-=-=-=- */
/**
* @callback_method_impl{FNRTSTRFORMATTYPE}
*/
void *pvArgOutput,
const char *pszType,
void const *pvValue,
int cchWidth,
int cchPrecision,
unsigned fFlags,
void *pvUser)
{
if (!pDesc)
cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, " STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x",
return cbPrintf;
}
/**
* @callback_method_impl{FNRTSTRFORMATTYPE}
*/
void *pvArgOutput,
const char *pszType,
void const *pvValue,
int cchWidth,
int cchPrecision,
unsigned fFlags,
void *pvUser)
{
if (!pDesc)
switch (e1kGetDescType(pDesc))
{
case E1K_DTYP_CONTEXT:
" IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n"
" TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s",
break;
case E1K_DTYP_DATA:
" DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x",
break;
case E1K_DTYP_LEGACY:
" CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x",
break;
default:
break;
}
return cbPrintf;
}
/** Initializes debug helpers (logging format types). */
static int e1kInitDebugHelpers(void)
{
int rc = VINF_SUCCESS;
static bool s_fHelpersRegistered = false;
if (!s_fHelpersRegistered)
{
s_fHelpersRegistered = true;
}
return rc;
}
/**
* Status info callback.
*
* @param pDevIns The device instance.
* @param pHlp The output helpers.
* @param pszArgs The arguments.
*/
{
unsigned i;
// bool fRcvRing = false;
// bool fXmtRing = false;
/*
* Parse args.
if (pszArgs)
{
fRcvRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "rcv");
fXmtRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "xmt");
}
*/
/*
* Show info.
*/
for (i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
{
{
const char *pcszTmp;
{
case 0: pcszTmp = "DST"; break;
default: pcszTmp = "reserved";
}
}
}
for (i = 0; i < cDescs; ++i)
{
if (i == rdh)
}
#ifdef E1K_WITH_RXD_CACHE
pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors in Cache (at %d (RDH %d)/ fetched %d / max %d) --\n",
else
for (i = 0; i < pThis->nRxDFetched; ++i)
{
if (i == pThis->iRxDCurrent)
&pThis->aRxDescriptors[i]);
}
#endif /* E1K_WITH_RXD_CACHE */
for (i = 0; i < cDescs; ++i)
{
if (i == tdh)
}
#ifdef E1K_WITH_TXD_CACHE
pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
else
for (i = 0; i < pThis->nTxDFetched; ++i)
{
if (i == pThis->iTxDCurrent)
&pThis->aTxDescriptors[i]);
}
#endif /* E1K_WITH_TXD_CACHE */
#ifdef E1K_INT_STATS
#endif /* E1K_INT_STATS */
}
/* -=-=-=-=- PDMDEVREG -=-=-=-=- */
/**
* Detach notification.
*
* One port on the network card has been disconnected from the network.
*
* @param pDevIns The device instance.
* @param iLUN The logical unit which is being detached.
* @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
*/
{
AssertLogRelReturnVoid(iLUN == 0);
/** @todo: r=pritesh still need to check if i missed
* to clean something in this function
*/
/*
* Zero some important members.
*/
}
/**
* Attach the Network attachment.
*
* One port on the network card has been connected to a network.
*
* @returns VBox status code.
* @param pDevIns The device instance.
* @param iLUN The logical unit which is being attached.
* @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
*
* @remarks This code path is not used during construction.
*/
{
/*
* Attach the driver.
*/
if (RT_SUCCESS(rc))
{
if (rc == VINF_NAT_DNS)
{
#ifdef RT_OS_LINUX
N_("A Domain Name Server (DNS) for NAT networking could not be determined. Please check your /etc/resolv.conf for <tt>nameserver</tt> entries. Either add one manually (<i>man resolv.conf</i>) or ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
#else
N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
#endif
}
if (RT_SUCCESS(rc))
{
}
}
else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
{
/* This should never happen because this function is not called
* if there is no driver to attach! */
}
/*
* Temporary set the link down if it was up so that the guest
* will know that we have change the configuration of the
* network card
*/
{
/* Restore the link back in 5 seconds (default). */
}
return rc;
}
/**
* @copydoc FNPDMDEVPOWEROFF
*/
{
/* Poke thread waiting for buffer space. */
}
/**
* @copydoc FNPDMDEVRESET
*/
{
#ifdef E1K_TX_DELAY
#endif /* E1K_TX_DELAY */
pThis->u16TxPktLen = 0;
pThis->fIntMaskUsed = false;
pThis->fDelayInts = false;
pThis->u64AckedAt = 0;
}
/**
* @copydoc FNPDMDEVSUSPEND
*/
{
/* Poke thread waiting for buffer space. */
}
/**
* Device relocation callback.
*
* When this callback is called the device instance data, and if the
* have been changed. The device must use the chance to perform the
* necessary pointer relocations and data updates.
*
* Before the GC code is executed the first time, this function will be
* called with a 0 delta so GC pointer calculations can be one in one place.
*
* @param pDevIns Pointer to the device instance.
* @param offDelta The relocation delta relative to the old location.
*
* @remark A relocation CANNOT fail.
*/
{
#ifdef E1K_USE_RX_TIMERS
#endif /* E1K_USE_RX_TIMERS */
#ifdef E1K_USE_TX_TIMERS
# ifndef E1K_NO_TAD
# endif /* E1K_NO_TAD */
#endif /* E1K_USE_TX_TIMERS */
#ifdef E1K_TX_DELAY
#endif /* E1K_TX_DELAY */
}
/**
* Destruct a device instance.
*
* We need to free non-VM resources only.
*
* @returns VBox status.
* @param pDevIns The device instance data.
* @thread EMT
*/
{
{
{
}
#ifdef E1K_WITH_TX_CS
#endif /* E1K_WITH_TX_CS */
}
return VINF_SUCCESS;
}
/**
* Set PCI configuration space registers.
*
* @param pci Reference to PCI device structure.
* @thread EMT
*/
{
/* Configure PCI Device, assume 32-bit mode ******************************/
/* DEVSEL Timing (medium device), 66 MHz Capable, New capabilities */
/* Stepping A2 */
/* Ethernet adapter */
/* normal single function Ethernet controller */
/* Memory Register Base Address */
/* Memory Flash Base Address */
/* IO Register Base Address */
/* Expansion ROM Base Address */
/* Capabilities Pointer */
/* Interrupt Pin: INTA# */
/* PCI Power Management Registers ****************************************/
/* Capability ID: PCI Power Management Registers */
/* Next Item Pointer: PCI-X */
/* Power Management Capabilities: PM disabled, DSI */
0x0002 | VBOX_PCI_PM_CAP_DSI);
/* Power Management Control / Status Register: PM disabled */
/* PMCSR_BSE Bridge Support Extensions: Not supported */
/* Data Register: PM disabled, always 0 */
/* PCI-X Configuration Registers *****************************************/
/* Capability ID: PCI-X Configuration Registers */
#ifdef E1K_WITH_MSI
#else
/* Next Item Pointer: None (Message Signalled Interrupts are disabled) */
#endif
/* PCI-X Command: Enable Relaxed Ordering */
/* PCI-X Status: 32-bit, 66MHz*/
/** @todo is this value really correct? fff8 doesn't look like actual PCI address */
}
/**
* @interface_method_impl{PDMDEVREG,pfnConstruct}
*/
{
int rc;
/*
* Initialize the instance data (state).
* Note! Caller has initialized it to ZERO already.
*/
pThis->u16TxPktLen = 0;
pThis->fIntMaskUsed = false;
pThis->fDelayInts = false;
pThis->u64AckedAt = 0;
/* Interfaces */
/*
* Internal validations.
*/
("%s@%#xLB%#x vs %s@%#xLB%#x\n",
/*
* Validate configuration.
*/
"LineSpeed\0" "GCEnabled\0" "R0Enabled\0"
"EthernetCRC\0" "GSOEnabled\0" "LinkUpDelay\0"))
N_("Invalid configuration for E1000 device"));
/** @todo: LineSpeed unused! */
pThis->fR0Enabled = true;
pThis->fRCEnabled = true;
pThis->fEthernetCRC = true;
pThis->fGSOEnabled = true;
/* Get config params */
if (RT_FAILURE(rc))
N_("Configuration error: Failed to get MAC address"));
if (RT_FAILURE(rc))
N_("Configuration error: Failed to get the value of 'CableConnected'"));
if (RT_FAILURE(rc))
N_("Configuration error: Failed to get the value of 'AdapterType'"));
if (RT_FAILURE(rc))
N_("Configuration error: Failed to get the value of 'GCEnabled'"));
if (RT_FAILURE(rc))
N_("Configuration error: Failed to get the value of 'R0Enabled'"));
if (RT_FAILURE(rc))
N_("Configuration error: Failed to get the value of 'EthernetCRC'"));
if (RT_FAILURE(rc))
N_("Configuration error: Failed to get the value of 'GSOEnabled'"));
if (RT_FAILURE(rc))
N_("Configuration error: Failed to get the value of 'LinkUpDelay'"));
LogRel(("%s WARNING! Link up delay is set to %u seconds!\n", pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
else if (pThis->cMsLinkUpDelay == 0)
/* Initialize the EEPROM. */
/* Initialize internal PHY. */
Phy::init(&pThis->phy, iInstance, pThis->eChip == E1K_CHIP_82543GC ? PHY_EPID_M881000 : PHY_EPID_M881011);
/* Initialize critical sections. We do our own locking. */
if (RT_FAILURE(rc))
return rc;
if (RT_FAILURE(rc))
return rc;
#ifdef E1K_WITH_TX_CS
if (RT_FAILURE(rc))
return rc;
#endif /* E1K_WITH_TX_CS */
/* Saved state registration. */
if (RT_FAILURE(rc))
return rc;
/* Set PCI config registers and register ourselves with the PCI bus. */
if (RT_FAILURE(rc))
return rc;
#ifdef E1K_WITH_MSI
#endif
/* Map our registers to memory space (region 0, see e1kConfigurePCI)*/
if (RT_FAILURE(rc))
return rc;
/* Map our registers to IO space (region 2, see e1kConfigurePCI) */
if (RT_FAILURE(rc))
return rc;
/* Create transmit queue */
if (RT_FAILURE(rc))
return rc;
/* Create the RX notifier signaller. */
if (RT_FAILURE(rc))
return rc;
#ifdef E1K_TX_DELAY
/* Create Transmit Delay Timer */
if (RT_FAILURE(rc))
return rc;
#endif /* E1K_TX_DELAY */
#ifdef E1K_USE_TX_TIMERS
/* Create Transmit Interrupt Delay Timer */
if (RT_FAILURE(rc))
return rc;
# ifndef E1K_NO_TAD
/* Create Transmit Absolute Delay Timer */
if (RT_FAILURE(rc))
return rc;
# endif /* E1K_NO_TAD */
#endif /* E1K_USE_TX_TIMERS */
#ifdef E1K_USE_RX_TIMERS
/* Create Receive Interrupt Delay Timer */
if (RT_FAILURE(rc))
return rc;
/* Create Receive Absolute Delay Timer */
if (RT_FAILURE(rc))
return rc;
#endif /* E1K_USE_RX_TIMERS */
/* Create Late Interrupt Timer */
if (RT_FAILURE(rc))
return rc;
/* Create Link Up Timer */
if (RT_FAILURE(rc))
return rc;
/* Register the info item */
char szTmp[20];
/* Status driver */
if (RT_FAILURE(rc))
/* Network driver */
if (RT_SUCCESS(rc))
{
if (rc == VINF_NAT_DNS)
N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
AssertMsgReturn(pThis->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"), VERR_PDM_MISSING_INTERFACE_BELOW);
pThis->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASER0), PDMINETWORKUP);
pThis->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASERC), PDMINETWORKUP);
}
else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
{
/* No error! */
}
else
if (RT_FAILURE(rc))
return rc;
rc = e1kInitDebugHelpers();
if (RT_FAILURE(rc))
return rc;
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Public/Net/E1k%u/BytesReceived", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Public/Net/E1k%u/BytesTransmitted", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Devices/E1k%d/ReceiveBytes", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Devices/E1k%d/TransmitBytes", iInstance);
#if defined(VBOX_WITH_STATISTICS)
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in RZ", "/Devices/E1k%d/MMIO/ReadRZ", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in R3", "/Devices/E1k%d/MMIO/ReadR3", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in RZ", "/Devices/E1k%d/MMIO/WriteRZ", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in R3", "/Devices/E1k%d/MMIO/WriteR3", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMRead, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM reads", "/Devices/E1k%d/EEPROM/Read", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMWrite, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM writes", "/Devices/E1k%d/EEPROM/Write", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RZ", "/Devices/E1k%d/IO/ReadRZ", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3", "/Devices/E1k%d/IO/ReadR3", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RZ", "/Devices/E1k%d/IO/WriteRZ", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3", "/Devices/E1k%d/IO/WriteR3", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateIntTimer, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling late int timer", "/Devices/E1k%d/LateInt/Timer", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateInts, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of late interrupts", "/Devices/E1k%d/LateInt/Occured", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsRaised, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of raised interrupts", "/Devices/E1k%d/Interrupts/Raised", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsPrevented, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of prevented interrupts", "/Devices/E1k%d/Interrupts/Prevented", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceive, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive", "/Devices/E1k%d/Receive/Total", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveCRC, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive checksumming", "/Devices/E1k%d/Receive/CRC", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveFilter, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive filtering", "/Devices/E1k%d/Receive/Filter", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveStore, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive storing", "/Devices/E1k%d/Receive/Store", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflow, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, "Profiling RX overflows", "/Devices/E1k%d/RxOverflow", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflowWakeup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups", "/Devices/E1k%d/RxOverflowWakeup", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in RZ", "/Devices/E1k%d/Transmit/TotalRZ", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in R3", "/Devices/E1k%d/Transmit/TotalR3", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in RZ", "/Devices/E1k%d/Transmit/SendRZ", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in R3", "/Devices/E1k%d/Transmit/SendR3", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxNormal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of normal context descriptors","/Devices/E1k%d/TxDesc/ContexNormal", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxTSE, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSE context descriptors", "/Devices/E1k%d/TxDesc/ContextTSE", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX data descriptors", "/Devices/E1k%d/TxDesc/Data", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescLegacy, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX legacy descriptors", "/Devices/E1k%d/TxDesc/Legacy", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescTSEData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX TSE data descriptors", "/Devices/E1k%d/TxDesc/TSEData", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathFallback, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Fallback TSE descriptor path", "/Devices/E1k%d/TxPath/Fallback", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathGSO, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "GSO TSE descriptor path", "/Devices/E1k%d/TxPath/GSO", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathRegular, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Regular descriptor path", "/Devices/E1k%d/TxPath/Normal", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatPHYAccesses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of PHY accesses", "/Devices/E1k%d/PHYAccesses", iInstance);
{
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegReads[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegWrites[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
}
#endif /* VBOX_WITH_STATISTICS */
#ifdef E1K_INT_STATS
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->u64ArmedAt, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "u64ArmedAt", "/Devices/E1k%d/u64ArmedAt", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatMaxTxDelay, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatMaxTxDelay", "/Devices/E1k%d/uStatMaxTxDelay", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatInt, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatInt", "/Devices/E1k%d/uStatInt", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTry, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTry", "/Devices/E1k%d/uStatIntTry", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLower, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLower", "/Devices/E1k%d/uStatIntLower", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntDly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntDly", "/Devices/E1k%d/uStatIntDly", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLost, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLost", "/Devices/E1k%d/iStatIntLost", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLostOne, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLostOne", "/Devices/E1k%d/iStatIntLostOne", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDisDly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDisDly", "/Devices/E1k%d/uStatDisDly", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntSkip, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntSkip", "/Devices/E1k%d/uStatIntSkip", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLate, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLate", "/Devices/E1k%d/uStatIntLate", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntMasked, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntMasked", "/Devices/E1k%d/uStatIntMasked", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntEarly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntEarly", "/Devices/E1k%d/uStatIntEarly", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRx", "/Devices/E1k%d/uStatIntRx", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTx", "/Devices/E1k%d/uStatIntTx", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntICS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntICS", "/Devices/E1k%d/uStatIntICS", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRDTR, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRDTR", "/Devices/E1k%d/uStatIntRDTR", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRXDMT0, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRXDMT0", "/Devices/E1k%d/uStatIntRXDMT0", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTXQE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTXQE", "/Devices/E1k%d/uStatIntTXQE", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxNoRS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxNoRS", "/Devices/E1k%d/uStatTxNoRS", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxIDE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxIDE", "/Devices/E1k%d/uStatTxIDE", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayed", "/Devices/E1k%d/uStatTxDelayed", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayExp, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayExp", "/Devices/E1k%d/uStatTxDelayExp", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTAD", "/Devices/E1k%d/uStatTAD", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTID", "/Devices/E1k%d/uStatTID", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRAD", "/Devices/E1k%d/uStatRAD", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRID", "/Devices/E1k%d/uStatRID", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRxFrm", "/Devices/E1k%d/uStatRxFrm", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxFrm", "/Devices/E1k%d/uStatTxFrm", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescCtx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescCtx", "/Devices/E1k%d/uStatDescCtx", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescDat, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescDat", "/Devices/E1k%d/uStatDescDat", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescLeg, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescLeg", "/Devices/E1k%d/uStatDescLeg", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx1514, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx1514", "/Devices/E1k%d/uStatTx1514", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx2962, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx2962", "/Devices/E1k%d/uStatTx2962", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx4410, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx4410", "/Devices/E1k%d/uStatTx4410", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx5858, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx5858", "/Devices/E1k%d/uStatTx5858", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx7306, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx7306", "/Devices/E1k%d/uStatTx7306", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx8754, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx8754", "/Devices/E1k%d/uStatTx8754", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx16384, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx16384", "/Devices/E1k%d/uStatTx16384", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx32768, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx32768", "/Devices/E1k%d/uStatTx32768", iInstance);
PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxLarge, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxLarge", "/Devices/E1k%d/uStatTxLarge", iInstance);
#endif /* E1K_INT_STATS */
return VINF_SUCCESS;
}
/**
* The device registration structure.
*/
const PDMDEVREG g_DeviceE1000 =
{
/* Structure version. PDM_DEVREG_VERSION defines the current version. */
/* Device name. */
"e1000",
/* Name of guest context module (no path).
* Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
"VBoxDDGC.gc",
/* Name of ring-0 module (no path).
* Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
"VBoxDDR0.r0",
/* The description of the device. The UTF-8 string pointed to shall, like this structure,
* remain unchanged from registration till VM destruction. */
"Intel PRO/1000 MT Desktop Ethernet.\n",
/* Flags, combination of the PDM_DEVREG_FLAGS_* \#defines. */
/* Device class(es), combination of the PDM_DEVREG_CLASS_* \#defines. */
/* Maximum number of instances (per VM). */
~0U,
/* Size of the instance data. */
sizeof(E1KSTATE),
/* pfnConstruct */
/* pfnDestruct */
/* pfnRelocate */
/* pfnMemSetup */
NULL,
/* pfnPowerOn */
NULL,
/* pfnReset */
/* pfnSuspend */
/* pfnResume */
NULL,
/* pfnAttach */
/* pfnDeatch */
/* pfnQueryInterface */
NULL,
/* pfnInitComplete */
NULL,
/* pfnPowerOff */
/* pfnSoftReset */
NULL,
/* u32VersionEnd */
};
#endif /* IN_RING3 */
#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */