{
BUG(); return NULL; }
static inline u16 vlan_dev_vlan_id(const struct net_device *dev) {
BUG(); return 0; }
static inline int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, u16 vlan_tci, int polling) {
BUG();
return NET_XMIT_SUCCESS; }
static inline bool vlan_hwaccel_do_receive(struct sk_buff **skb) {
if ((*skb)->vlan_tci & VLAN_VID_MASK) (*skb)->pkt_type = PACKET_OTHERHOST; return false; }
static inline gro_result_t
vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, unsigned int vlan_tci, struct sk_buff *skb) {
return GRO_DROP; }
static inline gro_result_t
vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, unsigned int vlan_tci) {
return GRO_DROP; } #endif /**
* vlan_hwaccel_rx - netif_rx wrapper for VLAN RX acceleration * @skb: buffer * @grp: vlan group
* @vlan_tci: VLAN TCI as received from the card */
static inline int vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, u16 vlan_tci) {
return __vlan_hwaccel_rx(skb, grp, vlan_tci, 0); } /**
* vlan_hwaccel_receive_skb - netif_receive_skb wrapper for VLAN RX acceleration * @skb: buffer * @grp: vlan group
* @vlan_tci: VLAN TCI as received from the card */
static inline int vlan_hwaccel_receive_skb(struct sk_buff *skb, struct vlan_group *grp, u16 vlan_tci)
{
return __vlan_hwaccel_rx(skb, grp, vlan_tci, 1); } /**
* __vlan_put_tag - regular VLAN tag inserting * @skb: skbuff to tag
* @vlan_tci: VLAN TCI to insert *
* Inserts the VLAN tag into @skb as part of the payload
* Returns a VLAN tagged skb. If a new skb is created, @skb is freed. *
* Following the skb_unshare() example, in case of error, the calling function * doesn't have to worry about freeing the original skb. */
static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci) {
struct vlan_ethhdr *veth;
if (skb_cow_head(skb, VLAN_HLEN) < 0) { kfree_skb(skb); return NULL; }
veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN);
/* Move the mac addresses to the beginning of the new header. */
memmove(skb->data, skb->data + VLAN_HLEN, 2 * VLAN_ETH_ALEN); skb->mac_header -= VLAN_HLEN;
/* first, the ethernet type */
veth->h_vlan_proto = htons(ETH_P_8021Q);
/* now, the TCI */
veth->h_vlan_TCI = htons(vlan_tci);
skb->protocol = htons(ETH_P_8021Q);
return skb; } /**
* __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting * @skb: skbuff to tag
* @vlan_tci: VLAN TCI to insert *
* Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest */
static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb, u16 vlan_tci) {
skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci; return skb; }
#define HAVE_VLAN_PUT_TAG /**
* vlan_put_tag - inserts VLAN tag according to device features * @skb: skbuff to tag
* @vlan_tci: VLAN TCI to insert *
* Assumes skb->dev is the target that will xmit this frame.
* Returns a VLAN tagged skb. */
static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb, u16 vlan_tci) {
if (skb->dev->features & NETIF_F_HW_VLAN_TX) { return __vlan_hwaccel_put_tag(skb, vlan_tci); } else {
return __vlan_put_tag(skb, vlan_tci); } } /**
* __vlan_get_tag - get the VLAN ID that is part of the payload * @skb: skbuff to query * @vlan_tci: buffer to store vlaue *
* Returns error if the skb is not of VLAN type */
static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) {
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
if (veth->h_vlan_proto != htons(ETH_P_8021Q)) { return -EINVAL; }
*vlan_tci = ntohs(veth->h_vlan_TCI); return 0; } /**