VBoxNetFlt-linux.c revision 15310cc04aae78936f50b7c2a860ed68ea83490b
/* $Id$ */
/** @file
* VBoxNetFlt - Network Filter Driver (Host), Linux Specific Code.
*/
/*
* Copyright (C) 2006-2008 Sun Microsystems, Inc.
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
* Clara, CA 95054 USA or visit http://www.sun.com if you need
* additional information or have any questions.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#include "the-linux-kernel.h"
#include "version-generated.h"
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <linux/miscdevice.h>
#define LOG_GROUP LOG_GROUP_NET_FLT_DRV
#include <iprt/spinlock.h>
#include <iprt/semaphore.h>
#include <iprt/initterm.h>
#define VBOXNETFLT_OS_SPECFIC 1
#include "../VBoxNetFltInternal.h"
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22) */
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22) */
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) */
# define CHECKSUM_PARTIAL CHECKSUM_HW
# else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 10) */
# else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 7) */
# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 7) */
# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 10) */
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) */
/* No features, very dumb device */
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
# define VBOX_SKB_IS_GSO(skb) false
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
#ifndef NET_IP_ALIGN
# define NET_IP_ALIGN 2
#endif
{
unsigned flags;
IFF_RUNNING)) |
IFF_ALLMULTI));
flags |= IFF_RUNNING;
return flags;
}
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12) */
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
static int VBoxNetFltLinuxInit(void);
static void VBoxNetFltLinuxUnload(void);
/*******************************************************************************
* Global Variables *
*******************************************************************************/
/**
* The (common) global data.
*/
#ifdef RT_ARCH_AMD64
/**
* Memory for the executable memory heap (in IPRT).
*/
__asm__(".section execmemory, \"awx\", @progbits\n\t"
".align 32\n\t"
".globl g_abExecMemory\n"
"g_abExecMemory:\n\t"
".zero 4096\n\t"
".type g_abExecMemory, @object\n\t"
".size g_abExecMemory, 4096\n\t"
".text\n\t");
#endif
static VBOXNETFLTGLOBALS g_VBoxNetFltGlobals;
MODULE_AUTHOR("Sun Microsystems, Inc.");
MODULE_DESCRIPTION("VirtualBox Network Filter Driver");
MODULE_LICENSE("GPL");
#ifdef MODULE_VERSION
# define str(s) #s
#endif
/**
* The (common) global data.
*/
static VBOXNETFLTGLOBALS g_VBoxNetFltGlobals;
/**
* Initialize module.
*
* @returns appropriate status code.
*/
static int __init VBoxNetFltLinuxInit(void)
{
int rc;
/*
* Initialize IPRT.
*/
if (RT_SUCCESS(rc))
{
#ifdef RT_ARCH_AMD64
if (RT_FAILURE(rc))
{
printk("VBoxNetFlt: failed to donate exec memory, no logging will be available.\n");
}
#endif
Log(("VBoxNetFltLinuxInit\n"));
/*
* Initialize the globals and connect to the support driver.
*
* This will call back vboxNetFltOsOpenSupDrv (and maybe vboxNetFltOsCloseSupDrv)
* for establishing the connect to the support driver.
*/
if (RT_SUCCESS(rc))
{
LogRel(("VBoxNetFlt: Successfully started.\n"));
return 0;
}
else
RTR0Term();
}
else
return -RTErrConvertToErrno(rc);
}
/**
* Unload the module.
*
* @todo We have to prevent this if we're busy!
*/
static void __exit VBoxNetFltLinuxUnload(void)
{
int rc;
Log(("VBoxNetFltLinuxUnload\n"));
/*
* Undo the work done during start (in reverse order).
*/
RTR0Term();
Log(("VBoxNetFltLinuxUnload - done\n"));
}
/**
* Reads and retains the host interface handle.
*
* @returns The handle, NULL if detached.
* @param pThis
*/
{
#if 0
Log(("vboxNetFltLinuxRetainNetDev\n"));
/*
* Be careful here to avoid problems racing the detached callback.
*/
{
if (pDev)
{
Log(("vboxNetFltLinuxRetainNetDev: Device %p(%s) retained. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
}
}
Log(("vboxNetFltLinuxRetainNetDev - done\n"));
return pDev;
#else
#endif
}
/**
* Release the host interface handle previously retained
* by vboxNetFltLinuxRetainNetDev.
*
* @param pThis The instance.
* @param pDev The vboxNetFltLinuxRetainNetDev
* return value, NULL is fine.
*/
{
#if 0
Log(("vboxNetFltLinuxReleaseNetDev\n"));
if (pDev)
{
Log(("vboxNetFltLinuxReleaseNetDev: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
}
Log(("vboxNetFltLinuxReleaseNetDev - done\n"));
#endif
}
/**
* Checks whether this is an mbuf created by vboxNetFltLinuxMBufFromSG,
* i.e. a buffer which we're pushing and should be ignored by the filter callbacks.
*
* @returns true / false accordingly.
* @param pBuf The sk_buff.
*/
{
}
/**
* Internal worker that create a linux sk_buff for a
* (scatter/)gather list.
*
* @returns Pointer to the sk_buff.
* @param pThis The instance.
* @param pSG The (scatter/)gather list.
*/
static struct sk_buff *vboxNetFltLinuxSkBufFromSG(PVBOXNETFLTINS pThis, PINTNETSG pSG, bool fDstWire)
{
struct net_device *pDev;
/*
* Because we're lazy, we will ASSUME that all SGs coming from INTNET
* will only contain one single segment.
*/
{
LogRel(("VBoxNetFlt: Dropped multi-segment(%d) packet coming from internal network.\n", pSG->cSegsUsed));
return NULL;
}
{
LogRel(("VBoxNetFlt: Dropped empty packet coming from internal network.\n"));
return NULL;
}
/*
* Allocate a packet and copy over the data.
*
*/
if (pPkt)
{
/* Align IP header on 16-byte boundary: 2 + 14 (ethernet hdr size). */
/* Set protocol and packet_type fields. */
if (fDstWire)
{
/* Restore ethernet header back. */
}
return pPkt;
}
else
return NULL;
}
/**
* Initializes a SG list from an sk_buff.
*
* @returns Number of segments.
* @param pThis The instance.
* @param pBuf The sk_buff.
* @param pSG The SG.
* @param pvFrame The frame pointer, optional.
* @param cSegs The number of segments allocated for the SG.
* This should match the number in the mbuf exactly!
* @param fSrc The source of the frame.
*/
DECLINLINE(void) vboxNetFltLinuxSkBufToSG(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, PINTNETSG pSG, unsigned cSegs, uint32_t fSrc)
{
int i;
if (fSrc & INTNETTRUNKDIR_WIRE)
{
/*
* The packet came from wire, ethernet header was removed by device driver.
* Restore it.
*/
}
#ifdef VBOXNETFLT_SG_SUPPORT
{
}
#else
#endif
#ifdef PADD_RUNT_FRAMES_FROM_HOST
/*
* Add a trailer if the frame is too small.
*
* Since we're getting to the packet before it is framed, it has not
* yet been padded. The current solution is to add a segment pointing
* to a buffer containing all zeros and pray that works for all frames...
*/
{
AssertReturnVoid(i < cSegs);
}
#endif
Log4(("vboxNetFltLinuxSkBufToSG: allocated=%d, segments=%d frags=%d next=%p frag_list=%p pkt_type=%x fSrc=%x\n",
pSG->cSegsAlloc, pSG->cSegsUsed, skb_shinfo(pBuf)->nr_frags, pBuf->next, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type, fSrc));
Log4(("vboxNetFltLinuxSkBufToSG: #%d: cb=%d pv=%p\n",
}
/**
* Packet handler,
*
* @returns 0 or EJUSTRETURN.
* @param pThis The instance.
* @param pMBuf The mbuf.
* @param pvFrame The start of the frame, optional.
* @param fSrc Where the packet (allegedly) comes from, one INTNETTRUNKDIR_* value.
* @param eProtocol The protocol.
*/
struct net_device *pSkbDev,
struct packet_type *pPacketType,
struct net_device *pOrigDev)
#else
struct net_device *pSkbDev,
struct packet_type *pPacketType)
#endif
{
struct net_device *pDev;
LogFlow(("vboxNetFltLinuxPacketHandler: pBuf=%p pSkbDev=%p pPacketType=%p\n",
Log3(("vboxNetFltLinuxPacketHandler: skb len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
#else
Log3(("vboxNetFltLinuxPacketHandler: skb len=%u data_len=%u truesize=%u next=%p nr_frags=%u tso_size=%u tso_seqs=%u frag_list=%p pkt_type=%x\n",
pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->tso_size, skb_shinfo(pBuf)->tso_segs, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
#endif
/*
* Drop it immediately?
*/
if (!pBuf)
return 0;
{
return 0;
}
{
Log2(("vboxNetFltLinuxPacketHandler: got our own sk_buff, drop it.\n"));
return 0;
}
#ifndef VBOXNETFLT_SG_SUPPORT
{
/*
* Get rid of fragmented packets, they cause too much trouble.
*/
if (!pCopy)
{
LogRel(("VBoxNetFlt: Failed to allocate packet buffer, dropping the packet.\n"));
return 0;
}
Log3(("vboxNetFltLinuxPacketHandler: skb copy len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
#else
Log3(("vboxNetFltLinuxPacketHandler: skb copy len=%u data_len=%u truesize=%u next=%p nr_frags=%u tso_size=%u tso_seqs=%u frag_list=%p pkt_type=%x\n",
pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->tso_size, skb_shinfo(pBuf)->tso_segs, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
#endif
}
#endif
/* Add the packet to transmit queue and schedule the bottom half. */
Log4(("vboxNetFltLinuxPacketHandler: scheduled work %p for sk_buff %p\n",
/* It does not really matter what we return, it is ignored by the kernel. */
return 0;
}
{
#ifdef VBOXNETFLT_SG_SUPPORT
#else
unsigned cSegs = 1;
#endif
#ifdef PADD_RUNT_FRAMES_FROM_HOST
/*
* Add a trailer if the frame is too small.
*/
cSegs++;
#endif
return cSegs;
}
/* WARNING! This function should only be called after vboxNetFltLinuxSkBufToSG()! */
{
#ifdef VBOXNETFLT_SG_SUPPORT
int i;
{
}
#endif
}
#ifndef LOG_ENABLED
#define vboxNetFltDumpPacket(a, b, c, d)
#else
{
static int iPacketNo = 1;
iPacketNo += iIncrement;
if (fEgress)
{
}
else
{
}
Log(("VBoxNetFlt: (int)%02x:%02x:%02x:%02x:%02x:%02x"
" %s (%s)%02x:%02x:%02x:%02x:%02x:%02x (%u bytes) packet #%u\n",
}
#endif
{
if (cSegs < MAX_SKB_FRAGS)
{
if (!pSG)
{
Log(("VBoxNetFlt: Failed to allocate SG buffer.\n"));
return VERR_NO_MEMORY;
}
Log4(("VBoxNetFlt: Dropping the sk_buff.\n"));
}
return VINF_SUCCESS;
}
{
if (VBOX_SKB_IS_GSO(pBuf))
{
/* Need to segment the packet */
Log3(("vboxNetFltLinuxForwardToIntNet: skb len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
#endif
{
return;
}
{
Log3(("vboxNetFltLinuxForwardToIntNet: segment len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
pSegment->len, pSegment->data_len, pSegment->truesize, pSegment->next, skb_shinfo(pSegment)->nr_frags, skb_shinfo(pSegment)->gso_size, skb_shinfo(pSegment)->gso_segs, skb_shinfo(pSegment)->gso_type, skb_shinfo(pSegment)->frag_list, pSegment->pkt_type));
#endif
}
}
else
{
if (VBOX_SKB_CHECKSUM_HELP(pBuf))
{
LogRel(("VBoxNetFlt: Failed to compute checksum, dropping the packet.\n"));
return;
}
}
/*
* Create a (scatter/)gather list for the sk_buff and feed it to the internal network.
*/
}
#else
static void vboxNetFltLinuxXmitTask(void *pWork)
#endif
{
bool fActive;
/*
* Active? Retain the instance and increment the busy counter.
*/
if (fActive)
if (!fActive)
return;
}
/**
* Internal worker for vboxNetFltOsInitInstance and vboxNetFltOsMaybeRediscovered.
*
* @returns VBox status code.
* @param pThis The instance.
* @param fRediscovery If set we're doing a rediscovery attempt, so, don't
* flood the release log.
*/
{
struct packet_type *pt;
if (!pDev)
{
return VERR_INTNET_FLT_IF_NOT_FOUND;
}
Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) retained. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
Log(("vboxNetFltLinuxAttachToInterface: Got pDev=%p pThis=%p pThis->u.s.pDev=%p\n", pDev, pThis, ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev)));
/*
* Get the mac address while we still have a valid ifnet reference.
*/
if (pDev)
{
}
/* Release the interface on failure. */
if (pDev)
{
Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
}
LogRel(("VBoxNetFlt: attached to '%s' / %.*Rhxs\n", pThis->szName, sizeof(pThis->u.s.Mac), &pThis->u.s.Mac));
return VINF_SUCCESS;
}
{
Log(("vboxNetFltLinuxUnregisterDevice: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
Log(("vboxNetFltLinuxUnregisterDevice: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
return NOTIFY_OK;
}
{
/* Check if we are not suspended and promiscuous mode has not been set. */
{
/* Note that there is no need for locking as the kernel got hold of the lock already. */
Log(("vboxNetFltLinuxDeviceIsUp: enabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
}
else
Log(("vboxNetFltLinuxDeviceIsUp: no need to enable promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
return NOTIFY_OK;
}
{
/* Undo promiscuous mode if we has set it. */
{
/* Note that there is no need for locking as the kernel got hold of the lock already. */
Log(("vboxNetFltLinuxDeviceGoingDown: disabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
}
else
Log(("vboxNetFltLinuxDeviceGoingDown: no need to disable promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
return NOTIFY_OK;
}
static int vboxNetFltLinuxNotifierCallback(struct notifier_block *self, unsigned long ulEventType, void *ptr)
{
#ifdef DEBUG
char *pszEvent = "<unknown>";
#endif
#ifdef DEBUG
switch (ulEventType)
{
}
pszEvent, ulEventType, pDev->name, pDev, pThis, ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev)));
#endif
{
}
else
{
return NOTIFY_OK;
switch (ulEventType)
{
case NETDEV_UNREGISTER:
break;
case NETDEV_UP:
break;
case NETDEV_GOING_DOWN:
break;
case NETDEV_CHANGENAME:
break;
}
}
return rc;
}
{
}
{
struct net_device * pDev;
int err;
int rc = VINF_SUCCESS;
if (pDev)
{
/*
* Create a sk_buff for the gather list and push it onto the wire.
*/
if (fDst & INTNETTRUNKDIR_WIRE)
{
if (pBuf)
{
if (err)
}
else
rc = VERR_NO_MEMORY;
}
/*
* Create a sk_buff for the gather list and push it onto the host stack.
*/
if (fDst & INTNETTRUNKDIR_HOST)
{
if (pBuf)
{
if (err)
}
else
rc = VERR_NO_MEMORY;
}
}
return rc;
}
{
bool fRc = false;
if (pDev)
{
LogFlow(("vboxNetFltPortOsIsPromiscuous: returns %d, pDev->promiscuity=%d, fPromiscuousSet=%d\n",
}
return fRc;
}
{
}
{
/* ASSUMES that the MAC address never changes. */
}
{
struct net_device * pDev;
LogFlow(("vboxNetFltPortOsSetActive: pThis=%p (%s), fActive=%s, fDisablePromiscuous=%s\n",
if (pThis->fDisablePromiscuous)
return;
if (pDev)
{
/*
* This api is a bit weird, the best reference is the code.
*
* Also, we have a bit or race conditions wrt the maintance of
* host the interface promiscuity for vboxNetFltPortOsIsPromiscuous.
*/
#ifdef LOG_ENABLED
#endif
if (fActive)
{
rtnl_lock();
rtnl_unlock();
pThis->u.s.fPromiscuousSet = true;
Log(("vboxNetFltPortOsSetActive: enabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
}
else
{
if (pThis->u.s.fPromiscuousSet)
{
rtnl_lock();
rtnl_unlock();
Log(("vboxNetFltPortOsSetActive: disabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
}
pThis->u.s.fPromiscuousSet = false;
#ifdef LOG_ENABLED
#endif
}
}
}
{
/* Nothing to do here. */
return VINF_SUCCESS;
}
{
/* Nothing to do here. */
return VINF_SUCCESS;
}
{
struct net_device *pDev;
bool fRegistered;
if (fRegistered)
{
Log(("vboxNetFltOsDeleteInstance: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
}
}
{
int err;
if (err)
return VERR_INTNET_FLT_IF_FAILED;
if (!pThis->u.s.fRegistered)
{
return VERR_INTNET_FLT_IF_NOT_FOUND;
}
if ( pThis->fDisconnectedFromHost
|| !try_module_get(THIS_MODULE))
return VERR_INTNET_FLT_IF_FAILED;
return VINF_SUCCESS;
}
{
/*
* Init the linux specific members.
*/
pThis->u.s.fRegistered = false;
pThis->u.s.fPromiscuousSet = false;
#else
#endif
return VINF_SUCCESS;
}