| /* |
| * NET3 Protocol independent device support routines. |
| * |
| * This program is free software; you can redistribute it and/or |
| * modify it under the terms of the GNU General Public License |
| * as published by the Free Software Foundation; either version |
| * 2 of the License, or (at your option) any later version. |
| * |
| * Derived from the non IP parts of dev.c 1.0.19 |
| * Authors: Ross Biro |
| * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
| * Mark Evans, <evansmp@uhura.aston.ac.uk> |
| * |
| * Additional Authors: |
| * Florian la Roche <rzsfl@rz.uni-sb.de> |
| * Alan Cox <gw4pts@gw4pts.ampr.org> |
| * David Hinds <dahinds@users.sourceforge.net> |
| * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> |
| * Adam Sulmicki <adam@cfar.umd.edu> |
| * Pekka Riikonen <priikone@poesidon.pspt.fi> |
| * |
| * Changes: |
| * D.J. Barrow : Fixed bug where dev->refcnt gets set |
| * to 2 if register_netdev gets called |
| * before net_dev_init & also removed a |
| * few lines of code in the process. |
| * Alan Cox : device private ioctl copies fields back. |
| * Alan Cox : Transmit queue code does relevant |
| * stunts to keep the queue safe. |
| * Alan Cox : Fixed double lock. |
| * Alan Cox : Fixed promisc NULL pointer trap |
| * ???????? : Support the full private ioctl range |
| * Alan Cox : Moved ioctl permission check into |
| * drivers |
| * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI |
| * Alan Cox : 100 backlog just doesn't cut it when |
| * you start doing multicast video 8) |
| * Alan Cox : Rewrote net_bh and list manager. |
| * Alan Cox : Fix ETH_P_ALL echoback lengths. |
| * Alan Cox : Took out transmit every packet pass |
| * Saved a few bytes in the ioctl handler |
| * Alan Cox : Network driver sets packet type before |
| * calling netif_rx. Saves a function |
| * call a packet. |
| * Alan Cox : Hashed net_bh() |
| * Richard Kooijman: Timestamp fixes. |
| * Alan Cox : Wrong field in SIOCGIFDSTADDR |
| * Alan Cox : Device lock protection. |
| * Alan Cox : Fixed nasty side effect of device close |
| * changes. |
| * Rudi Cilibrasi : Pass the right thing to |
| * set_mac_address() |
| * Dave Miller : 32bit quantity for the device lock to |
| * make it work out on a Sparc. |
| * Bjorn Ekwall : Added KERNELD hack. |
| * Alan Cox : Cleaned up the backlog initialise. |
| * Craig Metz : SIOCGIFCONF fix if space for under |
| * 1 device. |
| * Thomas Bogendoerfer : Return ENODEV for dev_open, if there |
| * is no device open function. |
| * Andi Kleen : Fix error reporting for SIOCGIFCONF |
| * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF |
| * Cyrus Durgin : Cleaned for KMOD |
| * Adam Sulmicki : Bug Fix : Network Device Unload |
| * A network device unload needs to purge |
| * the backlog queue. |
| * Paul Rusty Russell : SIOCSIFNAME |
| * Pekka Riikonen : Netdev boot-time settings code |
| * Andrew Morton : Make unregister_netdevice wait |
| * indefinitely on dev->refcnt |
| * J Hadi Salim : - Backlog queue sampling |
| * - netif_rx() feedback |
| */ |
| |
| #include <asm/uaccess.h> |
| #include <linux/bitops.h> |
| #include <linux/capability.h> |
| #include <linux/cpu.h> |
| #include <linux/types.h> |
| #include <linux/kernel.h> |
| #include <linux/hash.h> |
| #include <linux/slab.h> |
| #include <linux/sched.h> |
| #include <linux/mutex.h> |
| #include <linux/string.h> |
| #include <linux/mm.h> |
| #include <linux/socket.h> |
| #include <linux/sockios.h> |
| #include <linux/errno.h> |
| #include <linux/interrupt.h> |
| #include <linux/if_ether.h> |
| #include <linux/netdevice.h> |
| #include <linux/etherdevice.h> |
| #include <linux/ethtool.h> |
| #include <linux/notifier.h> |
| #include <linux/skbuff.h> |
| #include <linux/bpf.h> |
| #include <net/net_namespace.h> |
| #include <net/sock.h> |
| #include <net/busy_poll.h> |
| #include <linux/rtnetlink.h> |
| #include <linux/stat.h> |
| #include <net/dst.h> |
| #include <net/dst_metadata.h> |
| #include <net/pkt_sched.h> |
| #include <net/checksum.h> |
| #include <net/xfrm.h> |
| #include <linux/highmem.h> |
| #include <linux/init.h> |
| #include <linux/module.h> |
| #include <linux/netpoll.h> |
| #include <linux/rcupdate.h> |
| #include <linux/delay.h> |
| #include <net/iw_handler.h> |
| #include <asm/current.h> |
| #include <linux/audit.h> |
| #include <linux/dmaengine.h> |
| #include <linux/err.h> |
| #include <linux/ctype.h> |
| #include <linux/if_arp.h> |
| #include <linux/if_vlan.h> |
| #include <linux/ip.h> |
| #include <net/ip.h> |
| #include <net/mpls.h> |
| #include <linux/ipv6.h> |
| #include <linux/in.h> |
| #include <linux/jhash.h> |
| #include <linux/random.h> |
| #include <trace/events/napi.h> |
| #include <trace/events/net.h> |
| #include <trace/events/skb.h> |
| #include <linux/pci.h> |
| #include <linux/inetdevice.h> |
| #include <linux/cpu_rmap.h> |
| #include <linux/static_key.h> |
| #include <linux/hashtable.h> |
| #include <linux/vmalloc.h> |
| #include <linux/if_macvlan.h> |
| #include <linux/errqueue.h> |
| #include <linux/hrtimer.h> |
| #include <linux/netfilter_ingress.h> |
| #include <linux/sctp.h> |
| #include <linux/crash_dump.h> |
| |
| #include "net-sysfs.h" |
| |
| /* Instead of increasing this, you should create a hash table. */ |
| #define MAX_GRO_SKBS 8 |
| |
| /* This should be increased if a protocol with a bigger head is added. */ |
| #define GRO_MAX_HEAD (MAX_HEADER + 128) |
| |
| static DEFINE_SPINLOCK(ptype_lock); |
| static DEFINE_SPINLOCK(offload_lock); |
| struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; |
| struct list_head ptype_all __read_mostly; /* Taps */ |
| static struct list_head offload_base __read_mostly; |
| |
| static int netif_rx_internal(struct sk_buff *skb); |
| static int call_netdevice_notifiers_info(unsigned long val, |
| struct net_device *dev, |
| struct netdev_notifier_info *info); |
| |
| /* |
| * The @dev_base_head list is protected by @dev_base_lock and the rtnl |
| * semaphore. |
| * |
| * Pure readers hold dev_base_lock for reading, or rcu_read_lock() |
| * |
| * Writers must hold the rtnl semaphore while they loop through the |
| * dev_base_head list, and hold dev_base_lock for writing when they do the |
| * actual updates. This allows pure readers to access the list even |
| * while a writer is preparing to update it. |
| * |
| * To put it another way, dev_base_lock is held for writing only to |
| * protect against pure readers; the rtnl semaphore provides the |
| * protection against other writers. |
| * |
| * See, for example usages, register_netdevice() and |
| * unregister_netdevice(), which must be called with the rtnl |
| * semaphore held. |
| */ |
| DEFINE_RWLOCK(dev_base_lock); |
| EXPORT_SYMBOL(dev_base_lock); |
| |
| /* protects napi_hash addition/deletion and napi_gen_id */ |
| static DEFINE_SPINLOCK(napi_hash_lock); |
| |
| static unsigned int napi_gen_id = NR_CPUS; |
| static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8); |
| |
| static seqcount_t devnet_rename_seq; |
| |
| static inline void dev_base_seq_inc(struct net *net) |
| { |
| while (++net->dev_base_seq == 0); |
| } |
| |
| static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) |
| { |
| unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ)); |
| |
| return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; |
| } |
| |
| static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) |
| { |
| return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; |
| } |
| |
| static inline void rps_lock(struct softnet_data *sd) |
| { |
| #ifdef CONFIG_RPS |
| spin_lock(&sd->input_pkt_queue.lock); |
| #endif |
| } |
| |
| static inline void rps_unlock(struct softnet_data *sd) |
| { |
| #ifdef CONFIG_RPS |
| spin_unlock(&sd->input_pkt_queue.lock); |
| #endif |
| } |
| |
| /* Device list insertion */ |
| static void list_netdevice(struct net_device *dev) |
| { |
| struct net *net = dev_net(dev); |
| |
| ASSERT_RTNL(); |
| |
| write_lock_bh(&dev_base_lock); |
| list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); |
| hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); |
| hlist_add_head_rcu(&dev->index_hlist, |
| dev_index_hash(net, dev->ifindex)); |
| write_unlock_bh(&dev_base_lock); |
| |
| dev_base_seq_inc(net); |
| } |
| |
| /* Device list removal |
| * caller must respect a RCU grace period before freeing/reusing dev |
| */ |
| static void unlist_netdevice(struct net_device *dev) |
| { |
| ASSERT_RTNL(); |
| |
| /* Unlink dev from the device chain */ |
| write_lock_bh(&dev_base_lock); |
| list_del_rcu(&dev->dev_list); |
| hlist_del_rcu(&dev->name_hlist); |
| hlist_del_rcu(&dev->index_hlist); |
| write_unlock_bh(&dev_base_lock); |
| |
| dev_base_seq_inc(dev_net(dev)); |
| } |
| |
| /* |
| * Our notifier list |
| */ |
| |
| static RAW_NOTIFIER_HEAD(netdev_chain); |
| |
| /* |
| * Device drivers call our routines to queue packets here. We empty the |
| * queue in the local softnet handler. |
| */ |
| |
| DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); |
| EXPORT_PER_CPU_SYMBOL(softnet_data); |
| |
| #ifdef CONFIG_LOCKDEP |
| /* |
| * register_netdevice() inits txq->_xmit_lock and sets lockdep class |
| * according to dev->type |
| */ |
| static const unsigned short netdev_lock_type[] = |
| {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, |
| ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, |
| ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, |
| ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, |
| ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, |
| ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, |
| ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, |
| ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, |
| ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, |
| ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, |
| ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, |
| ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, |
| ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM, |
| ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE, |
| ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; |
| |
| static const char *const netdev_lock_name[] = |
| {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", |
| "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", |
| "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", |
| "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", |
| "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", |
| "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", |
| "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", |
| "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", |
| "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", |
| "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", |
| "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", |
| "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", |
| "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", |
| "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", |
| "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; |
| |
| static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; |
| static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; |
| |
| static inline unsigned short netdev_lock_pos(unsigned short dev_type) |
| { |
| int i; |
| |
| for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) |
| if (netdev_lock_type[i] == dev_type) |
| return i; |
| /* the last key is used by default */ |
| return ARRAY_SIZE(netdev_lock_type) - 1; |
| } |
| |
| static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, |
| unsigned short dev_type) |
| { |
| int i; |
| |
| i = netdev_lock_pos(dev_type); |
| lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], |
| netdev_lock_name[i]); |
| } |
| |
| static inline void netdev_set_addr_lockdep_class(struct net_device *dev) |
| { |
| int i; |
| |
| i = netdev_lock_pos(dev->type); |
| lockdep_set_class_and_name(&dev->addr_list_lock, |
| &netdev_addr_lock_key[i], |
| netdev_lock_name[i]); |
| } |
| #else |
| static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, |
| unsigned short dev_type) |
| { |
| } |
| static inline void netdev_set_addr_lockdep_class(struct net_device *dev) |
| { |
| } |
| #endif |
| |
| /******************************************************************************* |
| |
| Protocol management and registration routines |
| |
| *******************************************************************************/ |
| |
| /* |
| * Add a protocol ID to the list. Now that the input handler is |
| * smarter we can dispense with all the messy stuff that used to be |
| * here. |
| * |
| * BEWARE!!! Protocol handlers, mangling input packets, |
| * MUST BE last in hash buckets and checking protocol handlers |
| * MUST start from promiscuous ptype_all chain in net_bh. |
| * It is true now, do not change it. |
| * Explanation follows: if protocol handler, mangling packet, will |
| * be the first on list, it is not able to sense, that packet |
| * is cloned and should be copied-on-write, so that it will |
| * change it and subsequent readers will get broken packet. |
| * --ANK (980803) |
| */ |
| |
| static inline struct list_head *ptype_head(const struct packet_type *pt) |
| { |
| if (pt->type == htons(ETH_P_ALL)) |
| return pt->dev ? &pt->dev->ptype_all : &ptype_all; |
| else |
| return pt->dev ? &pt->dev->ptype_specific : |
| &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; |
| } |
| |
| /** |
| * dev_add_pack - add packet handler |
| * @pt: packet type declaration |
| * |
| * Add a protocol handler to the networking stack. The passed &packet_type |
| * is linked into kernel lists and may not be freed until it has been |
| * removed from the kernel lists. |
| * |
| * This call does not sleep therefore it can not |
| * guarantee all CPU's that are in middle of receiving packets |
| * will see the new packet type (until the next received packet). |
| */ |
| |
| void dev_add_pack(struct packet_type *pt) |
| { |
| struct list_head *head = ptype_head(pt); |
| |
| spin_lock(&ptype_lock); |
| list_add_rcu(&pt->list, head); |
| spin_unlock(&ptype_lock); |
| } |
| EXPORT_SYMBOL(dev_add_pack); |
| |
| /** |
| * __dev_remove_pack - remove packet handler |
| * @pt: packet type declaration |
| * |
| * Remove a protocol handler that was previously added to the kernel |
| * protocol handlers by dev_add_pack(). The passed &packet_type is removed |
| * from the kernel lists and can be freed or reused once this function |
| * returns. |
| * |
| * The packet type might still be in use by receivers |
| * and must not be freed until after all the CPU's have gone |
| * through a quiescent state. |
| */ |
| void __dev_remove_pack(struct packet_type *pt) |
| { |
| struct list_head *head = ptype_head(pt); |
| struct packet_type *pt1; |
| |
| spin_lock(&ptype_lock); |
| |
| list_for_each_entry(pt1, head, list) { |
| if (pt == pt1) { |
| list_del_rcu(&pt->list); |
| goto out; |
| } |
| } |
| |
| pr_warn("dev_remove_pack: %p not found\n", pt); |
| out: |
| spin_unlock(&ptype_lock); |
| } |
| EXPORT_SYMBOL(__dev_remove_pack); |
| |
| /** |
| * dev_remove_pack - remove packet handler |
| * @pt: packet type declaration |
| * |
| * Remove a protocol handler that was previously added to the kernel |
| * protocol handlers by dev_add_pack(). The passed &packet_type is removed |
| * from the kernel lists and can be freed or reused once this function |
| * returns. |
| * |
| * This call sleeps to guarantee that no CPU is looking at the packet |
| * type after return. |
| */ |
| void dev_remove_pack(struct packet_type *pt) |
| { |
| __dev_remove_pack(pt); |
| |
| synchronize_net(); |
| } |
| EXPORT_SYMBOL(dev_remove_pack); |
| |
| |
| /** |
| * dev_add_offload - register offload handlers |
| * @po: protocol offload declaration |
| * |
| * Add protocol offload handlers to the networking stack. The passed |
| * &proto_offload is linked into kernel lists and may not be freed until |
| * it has been removed from the kernel lists. |
| * |
| * This call does not sleep therefore it can not |
| * guarantee all CPU's that are in middle of receiving packets |
| * will see the new offload handlers (until the next received packet). |
| */ |
| void dev_add_offload(struct packet_offload *po) |
| { |
| struct packet_offload *elem; |
| |
| spin_lock(&offload_lock); |
| list_for_each_entry(elem, &offload_base, list) { |
| if (po->priority < elem->priority) |
| break; |
| } |
| list_add_rcu(&po->list, elem->list.prev); |
| spin_unlock(&offload_lock); |
| } |
| EXPORT_SYMBOL(dev_add_offload); |
| |
| /** |
| * __dev_remove_offload - remove offload handler |
| * @po: packet offload declaration |
| * |
| * Remove a protocol offload handler that was previously added to the |
| * kernel offload handlers by dev_add_offload(). The passed &offload_type |
| * is removed from the kernel lists and can be freed or reused once this |
| * function returns. |
| * |
| * The packet type might still be in use by receivers |
| * and must not be freed until after all the CPU's have gone |
| * through a quiescent state. |
| */ |
| static void __dev_remove_offload(struct packet_offload *po) |
| { |
| struct list_head *head = &offload_base; |
| struct packet_offload *po1; |
| |
| spin_lock(&offload_lock); |
| |
| list_for_each_entry(po1, head, list) { |
| if (po == po1) { |
| list_del_rcu(&po->list); |
| goto out; |
| } |
| } |
| |
| pr_warn("dev_remove_offload: %p not found\n", po); |
| out: |
| spin_unlock(&offload_lock); |
| } |
| |
| /** |
| * dev_remove_offload - remove packet offload handler |
| * @po: packet offload declaration |
| * |
| * Remove a packet offload handler that was previously added to the kernel |
| * offload handlers by dev_add_offload(). The passed &offload_type is |
| * removed from the kernel lists and can be freed or reused once this |
| * function returns. |
| * |
| * This call sleeps to guarantee that no CPU is looking at the packet |
| * type after return. |
| */ |
| void dev_remove_offload(struct packet_offload *po) |
| { |
| __dev_remove_offload(po); |
| |
| synchronize_net(); |
| } |
| EXPORT_SYMBOL(dev_remove_offload); |
| |
| /****************************************************************************** |
| |
| Device Boot-time Settings Routines |
| |
| *******************************************************************************/ |
| |
| /* Boot time configuration table */ |
| static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX]; |
| |
| /** |
| * netdev_boot_setup_add - add new setup entry |
| * @name: name of the device |
| * @map: configured settings for the device |
| * |
| * Adds new setup entry to the dev_boot_setup list. The function |
| * returns 0 on error and 1 on success. This is a generic routine to |
| * all netdevices. |
| */ |
| static int netdev_boot_setup_add(char *name, struct ifmap *map) |
| { |
| struct netdev_boot_setup *s; |
| int i; |
| |
| s = dev_boot_setup; |
| for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { |
| if (s[i].name[0] == '\0' || s[i].name[0] == ' ') { |
| memset(s[i].name, 0, sizeof(s[i].name)); |
| strlcpy(s[i].name, name, IFNAMSIZ); |
| memcpy(&s[i].map, map, sizeof(s[i].map)); |
| break; |
| } |
| } |
| |
| return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1; |
| } |
| |
| /** |
| * netdev_boot_setup_check - check boot time settings |
| * @dev: the netdevice |
| * |
| * Check boot time settings for the device. |
| * The found settings are set for the device to be used |
| * later in the device probing. |
| * Returns 0 if no settings found, 1 if they are. |
| */ |
| int netdev_boot_setup_check(struct net_device *dev) |
| { |
| struct netdev_boot_setup *s = dev_boot_setup; |
| int i; |
| |
| for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { |
| if (s[i].name[0] != '\0' && s[i].name[0] != ' ' && |
| !strcmp(dev->name, s[i].name)) { |
| dev->irq = s[i].map.irq; |
| dev->base_addr = s[i].map.base_addr; |
| dev->mem_start = s[i].map.mem_start; |
| dev->mem_end = s[i].map.mem_end; |
| return 1; |
| } |
| } |
| return 0; |
| } |
| EXPORT_SYMBOL(netdev_boot_setup_check); |
| |
| |
| /** |
| * netdev_boot_base - get address from boot time settings |
| * @prefix: prefix for network device |
| * @unit: id for network device |
| * |
| * Check boot time settings for the base address of device. |
| * The found settings are set for the device to be used |
| * later in the device probing. |
| * Returns 0 if no settings found. |
| */ |
| unsigned long netdev_boot_base(const char *prefix, int unit) |
| { |
| const struct netdev_boot_setup *s = dev_boot_setup; |
| char name[IFNAMSIZ]; |
| int i; |
| |
| sprintf(name, "%s%d", prefix, unit); |
| |
| /* |
| * If device already registered then return base of 1 |
| * to indicate not to probe for this interface |
| */ |
| if (__dev_get_by_name(&init_net, name)) |
| return 1; |
| |
| for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) |
| if (!strcmp(name, s[i].name)) |
| return s[i].map.base_addr; |
| return 0; |
| } |
| |
| /* |
| * Saves at boot time configured settings for any netdevice. |
| */ |
| int __init netdev_boot_setup(char *str) |
| { |
| int ints[5]; |
| struct ifmap map; |
| |
| str = get_options(str, ARRAY_SIZE(ints), ints); |
| if (!str || !*str) |
| return 0; |
| |
| /* Save settings */ |
| memset(&map, 0, sizeof(map)); |
| if (ints[0] > 0) |
| map.irq = ints[1]; |
| if (ints[0] > 1) |
| map.base_addr = ints[2]; |
| if (ints[0] > 2) |
| map.mem_start = ints[3]; |
| if (ints[0] > 3) |
| map.mem_end = ints[4]; |
| |
| /* Add new entry to the list */ |
| return netdev_boot_setup_add(str, &map); |
| } |
| |
| __setup("netdev=", netdev_boot_setup); |
| |
| /******************************************************************************* |
| |
| Device Interface Subroutines |
| |
| *******************************************************************************/ |
| |
| /** |
| * dev_get_iflink - get 'iflink' value of a interface |
| * @dev: targeted interface |
| * |
| * Indicates the ifindex the interface is linked to. |
| * Physical interfaces have the same 'ifindex' and 'iflink' values. |
| */ |
| |
| int dev_get_iflink(const struct net_device *dev) |
| { |
| if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink) |
| return dev->netdev_ops->ndo_get_iflink(dev); |
| |
| return dev->ifindex; |
| } |
| EXPORT_SYMBOL(dev_get_iflink); |
| |
| /** |
| * dev_fill_metadata_dst - Retrieve tunnel egress information. |
| * @dev: targeted interface |
| * @skb: The packet. |
| * |
| * For better visibility of tunnel traffic OVS needs to retrieve |
| * egress tunnel information for a packet. Following API allows |
| * user to get this info. |
| */ |
| int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) |
| { |
| struct ip_tunnel_info *info; |
| |
| if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst) |
| return -EINVAL; |
| |
| info = skb_tunnel_info_unclone(skb); |
| if (!info) |
| return -ENOMEM; |
| if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX))) |
| return -EINVAL; |
| |
| return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb); |
| } |
| EXPORT_SYMBOL_GPL(dev_fill_metadata_dst); |
| |
| /** |
| * __dev_get_by_name - find a device by its name |
| * @net: the applicable net namespace |
| * @name: name to find |
| * |
| * Find an interface by name. Must be called under RTNL semaphore |
| * or @dev_base_lock. If the name is found a pointer to the device |
| * is returned. If the name is not found then %NULL is returned. The |
| * reference counters are not incremented so the caller must be |
| * careful with locks. |
| */ |
| |
| struct net_device *__dev_get_by_name(struct net *net, const char *name) |
| { |
| struct net_device *dev; |
| struct hlist_head *head = dev_name_hash(net, name); |
| |
| hlist_for_each_entry(dev, head, name_hlist) |
| if (!strncmp(dev->name, name, IFNAMSIZ)) |
| return dev; |
| |
| return NULL; |
| } |
| EXPORT_SYMBOL(__dev_get_by_name); |
| |
| /** |
| * dev_get_by_name_rcu - find a device by its name |
| * @net: the applicable net namespace |
| * @name: name to find |
| * |
| * Find an interface by name. |
| * If the name is found a pointer to the device is returned. |
| * If the name is not found then %NULL is returned. |
| * The reference counters are not incremented so the caller must be |
| * careful with locks. The caller must hold RCU lock. |
| */ |
| |
| struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) |
| { |
| struct net_device *dev; |
| struct hlist_head *head = dev_name_hash(net, name); |
| |
| hlist_for_each_entry_rcu(dev, head, name_hlist) |
| if (!strncmp(dev->name, name, IFNAMSIZ)) |
| return dev; |
| |
| return NULL; |
| } |
| EXPORT_SYMBOL(dev_get_by_name_rcu); |
| |
| /** |
| * dev_get_by_name - find a device by its name |
| * @net: the applicable net namespace |
| * @name: name to find |
| * |
| * Find an interface by name. This can be called from any |
| * context and does its own locking. The returned handle has |
| * the usage count incremented and the caller must use dev_put() to |
| * release it when it is no longer needed. %NULL is returned if no |
| * matching device is found. |
| */ |
| |
| struct net_device *dev_get_by_name(struct net *net, const char *name) |
| { |
| struct net_device *dev; |
| |
| rcu_read_lock(); |
| dev = dev_get_by_name_rcu(net, name); |
| if (dev) |
| dev_hold(dev); |
| rcu_read_unlock(); |
| return dev; |
| } |
| EXPORT_SYMBOL(dev_get_by_name); |
| |
| /** |
| * __dev_get_by_index - find a device by its ifindex |
| * @net: the applicable net namespace |
| * @ifindex: index of device |
| * |
| * Search for an interface by index. Returns %NULL if the device |
| * is not found or a pointer to the device. The device has not |
| * had its reference counter increased so the caller must be careful |
| * about locking. The caller must hold either the RTNL semaphore |
| * or @dev_base_lock. |
| */ |
| |
| struct net_device *__dev_get_by_index(struct net *net, int ifindex) |
| { |
| struct net_device *dev; |
| struct hlist_head *head = dev_index_hash(net, ifindex); |
| |
| hlist_for_each_entry(dev, head, index_hlist) |
| if (dev->ifindex == ifindex) |
| return dev; |
| |
| return NULL; |
| } |
| EXPORT_SYMBOL(__dev_get_by_index); |
| |
| /** |
| * dev_get_by_index_rcu - find a device by its ifindex |
| * @net: the applicable net namespace |
| * @ifindex: index of device |
| * |
| * Search for an interface by index. Returns %NULL if the device |
| * is not found or a pointer to the device. The device has not |
| * had its reference counter increased so the caller must be careful |
| * about locking. The caller must hold RCU lock. |
| */ |
| |
| struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) |
| { |
| struct net_device *dev; |
| struct hlist_head *head = dev_index_hash(net, ifindex); |
| |
| hlist_for_each_entry_rcu(dev, head, index_hlist) |
| if (dev->ifindex == ifindex) |
| return dev; |
| |
| return NULL; |
| } |
| EXPORT_SYMBOL(dev_get_by_index_rcu); |
| |
| |
| /** |
| * dev_get_by_index - find a device by its ifindex |
| * @net: the applicable net namespace |
| * @ifindex: index of device |
| * |
| * Search for an interface by index. Returns NULL if the device |
| * is not found or a pointer to the device. The device returned has |
| * had a reference added and the pointer is safe until the user calls |
| * dev_put to indicate they have finished with it. |
| */ |
| |
| struct net_device *dev_get_by_index(struct net *net, int ifindex) |
| { |
| struct net_device *dev; |
| |
| rcu_read_lock(); |
| dev = dev_get_by_index_rcu(net, ifindex); |
| if (dev) |
| dev_hold(dev); |
| rcu_read_unlock(); |
| return dev; |
| } |
| EXPORT_SYMBOL(dev_get_by_index); |
| |
| /** |
| * netdev_get_name - get a netdevice name, knowing its ifindex. |
| * @net: network namespace |
| * @name: a pointer to the buffer where the name will be stored. |
| * @ifindex: the ifindex of the interface to get the name from. |
| * |
| * The use of raw_seqcount_begin() and cond_resched() before |
| * retrying is required as we want to give the writers a chance |
| * to complete when CONFIG_PREEMPT is not set. |
| */ |
| int netdev_get_name(struct net *net, char *name, int ifindex) |
| { |
| struct net_device *dev; |
| unsigned int seq; |
| |
| retry: |
| seq = raw_seqcount_begin(&devnet_rename_seq); |
| rcu_read_lock(); |
| dev = dev_get_by_index_rcu(net, ifindex); |
| if (!dev) { |
| rcu_read_unlock(); |
| return -ENODEV; |
| } |
| |
| strcpy(name, dev->name); |
| rcu_read_unlock(); |
| if (read_seqcount_retry(&devnet_rename_seq, seq)) { |
| cond_resched(); |
| goto retry; |
| } |
| |
| return 0; |
| } |
| |
| /** |
| * dev_getbyhwaddr_rcu - find a device by its hardware address |
| * @net: the applicable net namespace |
| * @type: media type of device |
| * @ha: hardware address |
| * |
| * Search for an interface by MAC address. Returns NULL if the device |
| * is not found or a pointer to the device. |
| * The caller must hold RCU or RTNL. |
| * The returned device has not had its ref count increased |
| * and the caller must therefore be careful about locking |
| * |
| */ |
| |
| struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, |
| const char *ha) |
| { |
| struct net_device *dev; |
| |
| for_each_netdev_rcu(net, dev) |
| if (dev->type == type && |
| !memcmp(dev->dev_addr, ha, dev->addr_len)) |
| return dev; |
| |
| return NULL; |
| } |
| EXPORT_SYMBOL(dev_getbyhwaddr_rcu); |
| |
| struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type) |
| { |
| struct net_device *dev; |
| |
| ASSERT_RTNL(); |
| for_each_netdev(net, dev) |
| if (dev->type == type) |
| return dev; |
| |
| return NULL; |
| } |
| EXPORT_SYMBOL(__dev_getfirstbyhwtype); |
| |
| struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) |
| { |
| struct net_device *dev, *ret = NULL; |
| |
| rcu_read_lock(); |
| for_each_netdev_rcu(net, dev) |
| if (dev->type == type) { |
| dev_hold(dev); |
| ret = dev; |
| break; |
| } |
| rcu_read_unlock(); |
| return ret; |
| } |
| EXPORT_SYMBOL(dev_getfirstbyhwtype); |
| |
| /** |
| * __dev_get_by_flags - find any device with given flags |
| * @net: the applicable net namespace |
| * @if_flags: IFF_* values |
| * @mask: bitmask of bits in if_flags to check |
| * |
| * Search for any interface with the given flags. Returns NULL if a device |
| * is not found or a pointer to the device. Must be called inside |
| * rtnl_lock(), and result refcount is unchanged. |
| */ |
| |
| struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags, |
| unsigned short mask) |
| { |
| struct net_device *dev, *ret; |
| |
| ASSERT_RTNL(); |
| |
| ret = NULL; |
| for_each_netdev(net, dev) { |
| if (((dev->flags ^ if_flags) & mask) == 0) { |
| ret = dev; |
| break; |
| } |
| } |
| return ret; |
| } |
| EXPORT_SYMBOL(__dev_get_by_flags); |
| |
| /** |
| * dev_valid_name - check if name is okay for network device |
| * @name: name string |
| * |
| * Network device names need to be valid file names to |
| * to allow sysfs to work. We also disallow any kind of |
| * whitespace. |
| */ |
| bool dev_valid_name(const char *name) |
| { |
| if (*name == '\0') |
| return false; |
| if (strnlen(name, IFNAMSIZ) == IFNAMSIZ) |
| return false; |
| if (!strcmp(name, ".") || !strcmp(name, "..")) |
| return false; |
| |
| while (*name) { |
| if (*name == '/' || *name == ':' || isspace(*name)) |
| return false; |
| name++; |
| } |
| return true; |
| } |
| EXPORT_SYMBOL(dev_valid_name); |
| |
| /** |
| * __dev_alloc_name - allocate a name for a device |
| * @net: network namespace to allocate the device name in |
| * @name: name format string |
| * @buf: scratch buffer and result name string |
| * |
| * Passed a format string - eg "lt%d" it will try and find a suitable |
| * id. It scans list of devices to build up a free map, then chooses |
| * the first empty slot. The caller must hold the dev_base or rtnl lock |
| * while allocating the name and adding the device in order to avoid |
| * duplicates. |
| * Limited to bits_per_byte * page size devices (ie 32K on most platforms). |
| * Returns the number of the unit assigned or a negative errno code. |
| */ |
| |
| static int __dev_alloc_name(struct net *net, const char *name, char *buf) |
| { |
| int i = 0; |
| const char *p; |
| const int max_netdevices = 8*PAGE_SIZE; |
| unsigned long *inuse; |
| struct net_device *d; |
| |
| p = strnchr(name, IFNAMSIZ-1, '%'); |
| if (p) { |
| /* |
| * Verify the string as this thing may have come from |
| * the user. There must be either one "%d" and no other "%" |
| * characters. |
| */ |
| if (p[1] != 'd' || strchr(p + 2, '%')) |
| return -EINVAL; |
| |
| /* Use one page as a bit array of possible slots */ |
| inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC); |
| if (!inuse) |
| return -ENOMEM; |
| |
| for_each_netdev(net, d) { |
| if (!sscanf(d->name, name, &i)) |
| continue; |
| if (i < 0 || i >= max_netdevices) |
| continue; |
| |
| /* avoid cases where sscanf is not exact inverse of printf */ |
| snprintf(buf, IFNAMSIZ, name, i); |
| if (!strncmp(buf, d->name, IFNAMSIZ)) |
| set_bit(i, inuse); |
| } |
| |
| i = find_first_zero_bit(inuse, max_netdevices); |
| free_page((unsigned long) inuse); |
| } |
| |
| if (buf != name) |
| snprintf(buf, IFNAMSIZ, name, i); |
| if (!__dev_get_by_name(net, buf)) |
| return i; |
| |
| /* It is possible to run out of possible slots |
| * when the name is long and there isn't enough space left |
| * for the digits, or if all bits are used. |
| */ |
| return -ENFILE; |
| } |
| |
| /** |
| * dev_alloc_name - allocate a name for a device |
| * @dev: device |
| * @name: name format string |
| * |
| * Passed a format string - eg "lt%d" it will try and find a suitable |
| * id. It scans list of devices to build up a free map, then chooses |
| * the first empty slot. The caller must hold the dev_base or rtnl lock |
| * while allocating the name and adding the device in order to avoid |
| * duplicates. |
| * Limited to bits_per_byte * page size devices (ie 32K on most platforms). |
| * Returns the number of the unit assigned or a negative errno code. |
| */ |
| |
| int dev_alloc_name(struct net_device *dev, const char *name) |
| { |
| char buf[IFNAMSIZ]; |
| struct net *net; |
| int ret; |
| |
| BUG_ON(!dev_net(dev)); |
| net = dev_net(dev); |
| ret = __dev_alloc_name(net, name, buf); |
| if (ret >= 0) |
| strlcpy(dev->name, buf, IFNAMSIZ); |
| return ret; |
| } |
| EXPORT_SYMBOL(dev_alloc_name); |
| |
| static int dev_alloc_name_ns(struct net *net, |
| struct net_device *dev, |
| const char *name) |
| { |
| char buf[IFNAMSIZ]; |
| int ret; |
| |
| ret = __dev_alloc_name(net, name, buf); |
| if (ret >= 0) |
| strlcpy(dev->name, buf, IFNAMSIZ); |
| return ret; |
| } |
| |
| int dev_get_valid_name(struct net *net, struct net_device *dev, |
| const char *name) |
| { |
| BUG_ON(!net); |
| |
| if (!dev_valid_name(name)) |
| return -EINVAL; |
| |
| if (strchr(name, '%')) |
| return dev_alloc_name_ns(net, dev, name); |
| else if (__dev_get_by_name(net, name)) |
| return -EEXIST; |
| else if (dev->name != name) |
| strlcpy(dev->name, name, IFNAMSIZ); |
| |
| return 0; |
| } |
| EXPORT_SYMBOL(dev_get_valid_name); |
| |
| /** |
| * dev_change_name - change name of a device |
| * @dev: device |
| * @newname: name (or format string) must be at least IFNAMSIZ |
| * |
| * Change name of a device, can pass format strings "eth%d". |
| * for wildcarding. |
| */ |
| int dev_change_name(struct net_device *dev, const char *newname) |
| { |
| unsigned char old_assign_type; |
| char oldname[IFNAMSIZ]; |
| int err = 0; |
| int ret; |
| struct net *net; |
| |
| ASSERT_RTNL(); |
| BUG_ON(!dev_net(dev)); |
| |
| net = dev_net(dev); |
| if (dev->flags & IFF_UP) |
| return -EBUSY; |
| |
| write_seqcount_begin(&devnet_rename_seq); |
| |
| if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { |
| write_seqcount_end(&devnet_rename_seq); |
| return 0; |
| } |
| |
| memcpy(oldname, dev->name, IFNAMSIZ); |
| |
| err = dev_get_valid_name(net, dev, newname); |
| if (err < 0) { |
| write_seqcount_end(&devnet_rename_seq); |
| return err; |
| } |
| |
| if (oldname[0] && !strchr(oldname, '%')) |
| netdev_info(dev, "renamed from %s\n", oldname); |
| |
| old_assign_type = dev->name_assign_type; |
| dev->name_assign_type = NET_NAME_RENAMED; |
| |
| rollback: |
| ret = device_rename(&dev->dev, dev->name); |
| if (ret) { |
| memcpy(dev->name, oldname, IFNAMSIZ); |
| dev->name_assign_type = old_assign_type; |
| write_seqcount_end(&devnet_rename_seq); |
| return ret; |
| } |
| |
| write_seqcount_end(&devnet_rename_seq); |
| |
| netdev_adjacent_rename_links(dev, oldname); |
| |
| write_lock_bh(&dev_base_lock); |
| hlist_del_rcu(&dev->name_hlist); |
| write_unlock_bh(&dev_base_lock); |
| |
| synchronize_rcu(); |
| |
| write_lock_bh(&dev_base_lock); |
| hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name)); |
| write_unlock_bh(&dev_base_lock); |
| |
| ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); |
| ret = notifier_to_errno(ret); |
| |
| if (ret) { |
| /* err >= 0 after dev_alloc_name() or stores the first errno */ |
| if (err >= 0) { |
| err = ret; |
| write_seqcount_begin(&devnet_rename_seq); |
| memcpy(dev->name, oldname, IFNAMSIZ); |
| memcpy(oldname, newname, IFNAMSIZ); |
| dev->name_assign_type = old_assign_type; |
| old_assign_type = NET_NAME_RENAMED; |
| goto rollback; |
| } else { |
| pr_err("%s: name change rollback failed: %d\n", |
| dev->name, ret); |
| } |
| } |
| |
| return err; |
| } |
| |
| /** |
| * dev_set_alias - change ifalias of a device |
| * @dev: device |
| * @alias: name up to IFALIASZ |
| * @len: limit of bytes to copy from info |
| * |
| * Set ifalias for a device, |
| */ |
| int dev_set_alias(struct net_device *dev, const char *alias, size_t len) |
| { |
| char *new_ifalias; |
| |
| ASSERT_RTNL(); |
| |
| if (len >= IFALIASZ) |
| return -EINVAL; |
| |
| if (!len) { |
| kfree(dev->ifalias); |
| dev->ifalias = NULL; |
| return 0; |
| } |
| |
| new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL); |
| if (!new_ifalias) |
| return -ENOMEM; |
| dev->ifalias = new_ifalias; |
| memcpy(dev->ifalias, alias, len); |
| dev->ifalias[len] = 0; |
| |
| return len; |
| } |
| |
| |
| /** |
| * netdev_features_change - device changes features |
| * @dev: device to cause notification |
| * |
| * Called to indicate a device has changed features. |
| */ |
| void netdev_features_change(struct net_device *dev) |
| { |
| call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); |
| } |
| EXPORT_SYMBOL(netdev_features_change); |
| |
| /** |
| * netdev_state_change - device changes state |
| * @dev: device to cause notification |
| * |
| * Called to indicate a device has changed state. This function calls |
| * the notifier chains for netdev_chain and sends a NEWLINK message |
| * to the routing socket. |
| */ |
| void netdev_state_change(struct net_device *dev) |
| { |
| if (dev->flags & IFF_UP) { |
| struct netdev_notifier_change_info change_info; |
| |
| change_info.flags_changed = 0; |
| call_netdevice_notifiers_info(NETDEV_CHANGE, dev, |
| &change_info.info); |
| rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL); |
| } |
| } |
| EXPORT_SYMBOL(netdev_state_change); |
| |
| /** |
| * netdev_notify_peers - notify network peers about existence of @dev |
| * @dev: network device |
| * |
| * Generate traffic such that interested network peers are aware of |
| * @dev, such as by generating a gratuitous ARP. This may be used when |
| * a device wants to inform the rest of the network about some sort of |
| * reconfiguration such as a failover event or virtual machine |
| * migration. |
| */ |
| void netdev_notify_peers(struct net_device *dev) |
| { |
| rtnl_lock(); |
| call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); |
| call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev); |
| rtnl_unlock(); |
| } |
| EXPORT_SYMBOL(netdev_notify_peers); |
| |
| static int __dev_open(struct net_device *dev) |
| { |
| const struct net_device_ops *ops = dev->netdev_ops; |
| int ret; |
| |
| ASSERT_RTNL(); |
| |
| if (!netif_device_present(dev)) |
| return -ENODEV; |
| |
| /* Block netpoll from trying to do any rx path servicing. |
| * If we don't do this there is a chance ndo_poll_controller |
| * or ndo_poll may be running while we open the device |
| */ |
| netpoll_poll_disable(dev); |
| |
| ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev); |
| ret = notifier_to_errno(ret); |
| if (ret) |
| return ret; |
| |
| set_bit(__LINK_STATE_START, &dev->state); |
| |
| if (ops->ndo_validate_addr) |
| ret = ops->ndo_validate_addr(dev); |
| |
| if (!ret && ops->ndo_open) |
| ret = ops->ndo_open(dev); |
| |
| netpoll_poll_enable(dev); |
| |
| if (ret) |
| clear_bit(__LINK_STATE_START, &dev->state); |
| else { |
| dev->flags |= IFF_UP; |
| dev_set_rx_mode(dev); |
| dev_activate(dev); |
| add_device_randomness(dev->dev_addr, dev->addr_len); |
| } |
| |
| return ret; |
| } |
| |
| /** |
| * dev_open - prepare an interface for use. |
| * @dev: device to open |
| * |
| * Takes a device from down to up state. The device's private open |
| * function is invoked and then the multicast lists are loaded. Finally |
| * the device is moved into the up state and a %NETDEV_UP message is |
| * sent to the netdev notifier chain. |
| * |
| * Calling this function on an active interface is a nop. On a failure |
| * a negative errno code is returned. |
| */ |
| int dev_open(struct net_device *dev) |
| { |
| int ret; |
| |
| if (dev->flags & IFF_UP) |
| return 0; |
| |
| ret = __dev_open(dev); |
| if (ret < 0) |
| return ret; |
| |
| rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); |
| call_netdevice_notifiers(NETDEV_UP, dev); |
| |
| return ret; |
| } |
| EXPORT_SYMBOL(dev_open); |
| |
| static int __dev_close_many(struct list_head *head) |
| { |
| struct net_device *dev; |
| |
| ASSERT_RTNL(); |
| might_sleep(); |
| |
| list_for_each_entry(dev, head, close_list) { |
| /* Temporarily disable netpoll until the interface is down */ |
| netpoll_poll_disable(dev); |
| |
| call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); |
| |
| clear_bit(__LINK_STATE_START, &dev->state); |
| |
| /* Synchronize to scheduled poll. We cannot touch poll list, it |
| * can be even on different cpu. So just clear netif_running(). |
| * |
| * dev->stop() will invoke napi_disable() on all of it's |
| * napi_struct instances on this device. |
| */ |
| smp_mb__after_atomic(); /* Commit netif_running(). */ |
| } |
| |
| dev_deactivate_many(head); |
| |
| list_for_each_entry(dev, head, close_list) { |
| const struct net_device_ops *ops = dev->netdev_ops; |
| |
| /* |
| * Call the device specific close. This cannot fail. |
| * Only if device is UP |
| * |
| * We allow it to be called even after a DETACH hot-plug |
| * event. |
| */ |
| if (ops->ndo_stop) |
| ops->ndo_stop(dev); |
| |
| dev->flags &= ~IFF_UP; |
| netpoll_poll_enable(dev); |
| } |
| |
| return 0; |
| } |
| |
| static int __dev_close(struct net_device *dev) |
| { |
| int retval; |
| LIST_HEAD(single); |
| |
| list_add(&dev->close_list, &single); |
| retval = __dev_close_many(&single); |
| list_del(&single); |
| |
| return retval; |
| } |
| |
| int dev_close_many(struct list_head *head, bool unlink) |
| { |
| struct net_device *dev, *tmp; |
| |
| /* Remove the devices that don't need to be closed */ |
| list_for_each_entry_safe(dev, tmp, head, close_list) |
| if (!(dev->flags & IFF_UP)) |
| list_del_init(&dev->close_list); |
| |
| __dev_close_many(head); |
| |
| list_for_each_entry_safe(dev, tmp, head, close_list) { |
| rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); |
| call_netdevice_notifiers(NETDEV_DOWN, dev); |
| if (unlink) |
| list_del_init(&dev->close_list); |
| } |
| |
| return 0; |
| } |
| EXPORT_SYMBOL(dev_close_many); |
| |
| /** |
| * dev_close - shutdown an interface. |
| * @dev: device to shutdown |
| * |
| * This function moves an active device into down state. A |
| * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device |
| * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier |
| * chain. |
| */ |
| int dev_close(struct net_device *dev) |
| { |
| if (dev->flags & IFF_UP) { |
| LIST_HEAD(single); |
| |
| list_add(&dev->close_list, &single); |
| dev_close_many(&single, true); |
| list_del(&single); |
| } |
| return 0; |
| } |
| EXPORT_SYMBOL(dev_close); |
| |
| |
| /** |
| * dev_disable_lro - disable Large Receive Offload on a device |
| * @dev: device |
| * |
| * Disable Large Receive Offload (LRO) on a net device. Must be |
| * called under RTNL. This is needed if received packets may be |
| * forwarded to another interface. |
| */ |
| void dev_disable_lro(struct net_device *dev) |
| { |
| struct net_device *lower_dev; |
| struct list_head *iter; |
| |
| dev->wanted_features &= ~NETIF_F_LRO; |
| netdev_update_features(dev); |
| |
| if (unlikely(dev->features & NETIF_F_LRO)) |
| netdev_WARN(dev, "failed to disable LRO!\n"); |
| |
| netdev_for_each_lower_dev(dev, lower_dev, iter) |
| dev_disable_lro(lower_dev); |
| } |
| EXPORT_SYMBOL(dev_disable_lro); |
| |
| static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val, |
| struct net_device *dev) |
| { |
| struct netdev_notifier_info info; |
| |
| netdev_notifier_info_init(&info, dev); |
| return nb->notifier_call(nb, val, &info); |
| } |
| |
| static int dev_boot_phase = 1; |
| |
| /** |
| * register_netdevice_notifier - register a network notifier block |
| * @nb: notifier |
| * |
| * Register a notifier to be called when network device events occur. |
| * The notifier passed is linked into the kernel structures and must |
| * not be reused until it has been unregistered. A negative errno code |
| * is returned on a failure. |
| * |
| * When registered all registration and up events are replayed |
| * to the new notifier to allow device to have a race free |
| * view of the network device list. |
| */ |
| |
| int register_netdevice_notifier(struct notifier_block *nb) |
| { |
| struct net_device *dev; |
| struct net_device *last; |
| struct net *net; |
| int err; |
| |
| rtnl_lock(); |
| err = raw_notifier_chain_register(&netdev_chain, nb); |
| if (err) |
| goto unlock; |
| if (dev_boot_phase) |
| goto unlock; |
| for_each_net(net) { |
| for_each_netdev(net, dev) { |
| err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev); |
| err = notifier_to_errno(err); |
| if (err) |
| goto rollback; |
| |
| if (!(dev->flags & IFF_UP)) |
| continue; |
| |
| call_netdevice_notifier(nb, NETDEV_UP, dev); |
| } |
| } |
| |
| unlock: |
| rtnl_unlock(); |
| return err; |
| |
| rollback: |
| last = dev; |
| for_each_net(net) { |
| for_each_netdev(net, dev) { |
| if (dev == last) |
| goto outroll; |
| |
| if (dev->flags & IFF_UP) { |
| call_netdevice_notifier(nb, NETDEV_GOING_DOWN, |
| dev); |
| call_netdevice_notifier(nb, NETDEV_DOWN, dev); |
| } |
| call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); |
| } |
| } |
| |
| outroll: |
| raw_notifier_chain_unregister(&netdev_chain, nb); |
| goto unlock; |
| } |
| EXPORT_SYMBOL(register_netdevice_notifier); |
| |
| /** |
| * unregister_netdevice_notifier - unregister a network notifier block |
| * @nb: notifier |
| * |
| * Unregister a notifier previously registered by |
| * register_netdevice_notifier(). The notifier is unlinked into the |
| * kernel structures and may then be reused. A negative errno code |
| * is returned on a failure. |
| * |
| * After unregistering unregister and down device events are synthesized |
| * for all devices on the device list to the removed notifier to remove |
| * the need for special case cleanup code. |
| */ |
| |
| int unregister_netdevice_notifier(struct notifier_block *nb) |
| { |
| struct net_device *dev; |
| struct net *net; |
| int err; |
| |
| rtnl_lock(); |
| err = raw_notifier_chain_unregister(&netdev_chain, nb); |
| if (err) |
| goto unlock; |
| |
| for_each_net(net) { |
| for_each_netdev(net, dev) { |
| if (dev->flags & IFF_UP) { |
| call_netdevice_notifier(nb, NETDEV_GOING_DOWN, |
| dev); |
| call_netdevice_notifier(nb, NETDEV_DOWN, dev); |
| } |
| call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); |
| } |
| } |
| unlock: |
| rtnl_unlock(); |
| return err; |
| } |
| EXPORT_SYMBOL(unregister_netdevice_notifier); |
| |
| /** |
| * call_netdevice_notifiers_info - call all network notifier blocks |
| * @val: value passed unmodified to notifier function |
| * @dev: net_device pointer passed unmodified to notifier function |
| * @info: notifier information data |
| * |
| * Call all network notifier blocks. Parameters and return value |
| * are as for raw_notifier_call_chain(). |
| */ |
| |
| static int call_netdevice_notifiers_info(unsigned long val, |
| struct net_device *dev, |
| struct netdev_notifier_info *info) |
| { |
| ASSERT_RTNL(); |
| netdev_notifier_info_init(info, dev); |
| return raw_notifier_call_chain(&netdev_chain, val, info); |
| } |
| |
| /** |
| * call_netdevice_notifiers - call all network notifier blocks |
| * @val: value passed unmodified to notifier function |
| * @dev: net_device pointer passed unmodified to notifier function |
| * |
| * Call all network notifier blocks. Parameters and return value |
| * are as for raw_notifier_call_chain(). |
| */ |
| |
| int call_netdevice_notifiers(unsigned long val, struct net_device *dev) |
| { |
| struct netdev_notifier_info info; |
| |
| return call_netdevice_notifiers_info(val, dev, &info); |
| } |
| EXPORT_SYMBOL(call_netdevice_notifiers); |
| |
| /** |
| * call_netdevice_notifiers_mtu - call all network notifier blocks |
| * @val: value passed unmodified to notifier function |
| * @dev: net_device pointer passed unmodified to notifier function |
| * @arg: additional u32 argument passed to the notifier function |
| * |
| * Call all network notifier blocks. Parameters and return value |
| * are as for raw_notifier_call_chain(). |
| */ |
| static int call_netdevice_notifiers_mtu(unsigned long val, |
| struct net_device *dev, u32 arg) |
| { |
| struct netdev_notifier_info_ext info = { |
| .info.dev = dev, |
| .ext.mtu = arg, |
| }; |
| |
| BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0); |
| |
| return call_netdevice_notifiers_info(val, dev, &info.info); |
| } |
| |
| #ifdef CONFIG_NET_INGRESS |
| static struct static_key ingress_needed __read_mostly; |
| |
| void net_inc_ingress_queue(void) |
| { |
| static_key_slow_inc(&ingress_needed); |
| } |
| EXPORT_SYMBOL_GPL(net_inc_ingress_queue); |
| |
| void net_dec_ingress_queue(void) |
| { |
| static_key_slow_dec(&ingress_needed); |
| } |
| EXPORT_SYMBOL_GPL(net_dec_ingress_queue); |
| #endif |
| |
| #ifdef CONFIG_NET_EGRESS |
| static struct static_key egress_needed __read_mostly; |
| |
| void net_inc_egress_queue(void) |
| { |
| static_key_slow_inc(&egress_needed); |
| } |
| EXPORT_SYMBOL_GPL(net_inc_egress_queue); |
| |
| void net_dec_egress_queue(void) |
| { |
| static_key_slow_dec(&egress_needed); |
| } |
| EXPORT_SYMBOL_GPL(net_dec_egress_queue); |
| #endif |
| |
| static struct static_key netstamp_needed __read_mostly; |
| #ifdef HAVE_JUMP_LABEL |
| static atomic_t netstamp_needed_deferred; |
| static atomic_t netstamp_wanted; |
| static void netstamp_clear(struct work_struct *work) |
| { |
| int deferred = atomic_xchg(&netstamp_needed_deferred, 0); |
| int wanted; |
| |
| wanted = atomic_add_return(deferred, &netstamp_wanted); |
| if (wanted > 0) |
| static_key_enable(&netstamp_needed); |
| else |
| static_key_disable(&netstamp_needed); |
| } |
| static DECLARE_WORK(netstamp_work, netstamp_clear); |
| #endif |
| |
| void net_enable_timestamp(void) |
| { |
| #ifdef HAVE_JUMP_LABEL |
| int wanted; |
| |
| while (1) { |
| wanted = atomic_read(&netstamp_wanted); |
| if (wanted <= 0) |
| break; |
| if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted) |
| return; |
| } |
| atomic_inc(&netstamp_needed_deferred); |
| schedule_work(&netstamp_work); |
| #else |
| static_key_slow_inc(&netstamp_needed); |
| #endif |
| } |
| EXPORT_SYMBOL(net_enable_timestamp); |
| |
| void net_disable_timestamp(void) |
| { |
| #ifdef HAVE_JUMP_LABEL |
| int wanted; |
| |
| while (1) { |
| wanted = atomic_read(&netstamp_wanted); |
| if (wanted <= 1) |
| break; |
| if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted) |
| return; |
| } |
| atomic_dec(&netstamp_needed_deferred); |
| schedule_work(&netstamp_work); |
| #else |
| static_key_slow_dec(&netstamp_needed); |
| #endif |
| } |
| EXPORT_SYMBOL(net_disable_timestamp); |
| |
| static inline void net_timestamp_set(struct sk_buff *skb) |
| { |
| skb->tstamp.tv64 = 0; |
| if (static_key_false(&netstamp_needed)) |
| __net_timestamp(skb); |
| } |
| |
| #define net_timestamp_check(COND, SKB) \ |
| if (static_key_false(&netstamp_needed)) { \ |
| if ((COND) && !(SKB)->tstamp.tv64) \ |
| __net_timestamp(SKB); \ |
| } \ |
| |
| bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb) |
| { |
| unsigned int len; |
| |
| if (!(dev->flags & IFF_UP)) |
| return false; |
| |
| len = dev->mtu + dev->hard_header_len + VLAN_HLEN; |
| if (skb->len <= len) |
| return true; |
| |
| /* if TSO is enabled, we don't care about the length as the packet |
| * could be forwarded without being segmented before |
| */ |
| if (skb_is_gso(skb)) |
| return true; |
| |
| return false; |
| } |
| EXPORT_SYMBOL_GPL(is_skb_forwardable); |
| |
| int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) |
| { |
| int ret = ____dev_forward_skb(dev, skb); |
| |
| if (likely(!ret)) { |
| skb->protocol = eth_type_trans(skb, dev); |
| skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); |
| } |
| |
| return ret; |
| } |
| EXPORT_SYMBOL_GPL(__dev_forward_skb); |
| |
| /** |
| * dev_forward_skb - loopback an skb to another netif |
| * |
| * @dev: destination network device |
| * @skb: buffer to forward |
| * |
| * return values: |
| * NET_RX_SUCCESS (no congestion) |
| * NET_RX_DROP (packet was dropped, but freed) |
| * |
| * dev_forward_skb can be used for injecting an skb from the |
| * start_xmit function of one device into the receive queue |
| * of another device. |
| * |
| * The receiving device may be in another namespace, so |
| * we have to clear all information in the skb that could |
| * impact namespace isolation. |
| */ |
| int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) |
| { |
| return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb); |
| } |
| EXPORT_SYMBOL_GPL(dev_forward_skb); |
| |
| static inline int deliver_skb(struct sk_buff *skb, |
| struct packet_type *pt_prev, |
| struct net_device *orig_dev) |
| { |
| if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) |
| return -ENOMEM; |
| atomic_inc(&skb->users); |
| return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); |
| } |
| |
| static inline void deliver_ptype_list_skb(struct sk_buff *skb, |
| struct packet_type **pt, |
| struct net_device *orig_dev, |
| __be16 type, |
| struct list_head *ptype_list) |
| { |
| struct packet_type *ptype, *pt_prev = *pt; |
| |
| list_for_each_entry_rcu(ptype, ptype_list, list) { |
| if (ptype->type != type) |
| continue; |
| if (pt_prev) |
| deliver_skb(skb, pt_prev, orig_dev); |
| pt_prev = ptype; |
| } |
| *pt = pt_prev; |
| } |
| |
| static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) |
| { |
| if (!ptype->af_packet_priv || !skb->sk) |
| return false; |
| |
| if (ptype->id_match) |
| return ptype->id_match(ptype, skb->sk); |
| else if ((struct sock *)ptype->af_packet_priv == skb->sk) |
| return true; |
| |
| return false; |
| } |
| |
| /* |
| * Support routine. Sends outgoing frames to any network |
| * taps currently in use. |
| */ |
| |
| void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) |
| { |
| struct packet_type *ptype; |
| struct sk_buff *skb2 = NULL; |
| struct packet_type *pt_prev = NULL; |
| struct list_head *ptype_list = &ptype_all; |
| |
| rcu_read_lock(); |
| again: |
| list_for_each_entry_rcu(ptype, ptype_list, list) { |
| /* Never send packets back to the socket |
| * they originated from - MvS (miquels@drinkel.ow.org) |
| */ |
| if (skb_loop_sk(ptype, skb)) |
| continue; |
| |
| if (pt_prev) { |
| deliver_skb(skb2, pt_prev, skb->dev); |
| pt_prev = ptype; |
| continue; |
| } |
| |
| /* need to clone skb, done only once */ |
| skb2 = skb_clone(skb, GFP_ATOMIC); |
| if (!skb2) |
| goto out_unlock; |
| |
| net_timestamp_set(skb2); |
| |
| /* skb->nh should be correctly |
| * set by sender, so that the second statement is |
| * just protection against buggy protocols. |
| */ |
| skb_reset_mac_header(skb2); |
| |
| if (skb_network_header(skb2) < skb2->data || |
| skb_network_header(skb2) > skb_tail_pointer(skb2)) { |
| net_crit_ratelimited("protocol %04x is buggy, dev %s\n", |
| ntohs(skb2->protocol), |
| dev->name); |
| skb_reset_network_header(skb2); |
| } |
| |
| skb2->transport_header = skb2->network_header; |
| skb2->pkt_type = PACKET_OUTGOING; |
| pt_prev = ptype; |
| } |
| |
| if (ptype_list == &ptype_all) { |
| ptype_list = &dev->ptype_all; |
| goto again; |
| } |
| out_unlock: |
| if (pt_prev) |
| pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); |
| rcu_read_unlock(); |
| } |
| EXPORT_SYMBOL_GPL(dev_queue_xmit_nit); |
| |
| /** |
| * netif_setup_tc - Handle tc mappings on real_num_tx_queues change |
| * @dev: Network device |
| * @txq: number of queues available |
| * |
| * If real_num_tx_queues is changed the tc mappings may no longer be |
| * valid. To resolve this verify the tc mapping remains valid and if |
| * not NULL the mapping. With no priorities mapping to this |
| * offset/count pair it will no longer be used. In the worst case TC0 |
| * is invalid nothing can be done so disable priority mappings. If is |
| * expected that drivers will fix this mapping if they can before |
| * calling netif_set_real_num_tx_queues. |
| */ |
| static void netif_setup_tc(struct net_device *dev, unsigned int txq) |
| { |
| int i; |
| struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; |
| |
| /* If TC0 is invalidated disable TC mapping */ |
| if (tc->offset + tc->count > txq) { |
| pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); |
| dev->num_tc = 0; |
| return; |
| } |
| |
| /* Invalidated prio to tc mappings set to TC0 */ |
| for (i = 1; i < TC_BITMASK + 1; i++) { |
| int q = netdev_get_prio_tc_map(dev, i); |
| |
| tc = &dev->tc_to_txq[q]; |
| if (tc->offset + tc->count > txq) { |
| pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", |
| i, q); |
| netdev_set_prio_tc_map(dev, i, 0); |
| } |
| } |
| } |
| |
| #ifdef CONFIG_XPS |
| static DEFINE_MUTEX(xps_map_mutex); |
| #define xmap_dereference(P) \ |
| rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) |
| |
| static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps, |
| int cpu, u16 index) |
| { |
| struct xps_map *map = NULL; |
| int pos; |
| |
| if (dev_maps) |
| map = xmap_dereference(dev_maps->cpu_map[cpu]); |
| |
| for (pos = 0; map && pos < map->len; pos++) { |
| if (map->queues[pos] == index) { |
| if (map->len > 1) { |
| map->queues[pos] = map->queues[--map->len]; |
| } else { |
| RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL); |
| kfree_rcu(map, rcu); |
| map = NULL; |
| } |
| break; |
| } |
| } |
| |
| return map; |
| } |
| |
| static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index) |
| { |
| struct xps_dev_maps *dev_maps; |
| int cpu, i; |
| bool active = false; |
| |
| mutex_lock(&xps_map_mutex); |
| dev_maps = xmap_dereference(dev->xps_maps); |
| |
| if (!dev_maps) |
| goto out_no_maps; |
| |
| for_each_possible_cpu(cpu) { |
| for (i = index; i < dev->num_tx_queues; i++) { |
| if (!remove_xps_queue(dev_maps, cpu, i)) |
| break; |
| } |
| if (i == dev->num_tx_queues) |
| active = true; |
| } |
| |
| if (!active) { |
| RCU_INIT_POINTER(dev->xps_maps, NULL); |
| kfree_rcu(dev_maps, rcu); |
| } |
| |
| for (i = index; i < dev->num_tx_queues; i++) |
| netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i), |
| NUMA_NO_NODE); |
| |
| out_no_maps: |
| mutex_unlock(&xps_map_mutex); |
| } |
| |
| static struct xps_map *expand_xps_map(struct xps_map *map, |
| int cpu, u16 index) |
| { |
| struct xps_map *new_map; |
| int alloc_len = XPS_MIN_MAP_ALLOC; |
| int i, pos; |
| |
| for (pos = 0; map && pos < map->len; pos++) { |
| if (map->queues[pos] != index) |
| continue; |
| return map; |
| } |
| |
| /* Need to add queue to this CPU's existing map */ |
| if (map) { |
| if (pos < map->alloc_len) |
| return map; |
| |
| alloc_len = map->alloc_len * 2; |
| } |
| |
| /* Need to allocate new map to store queue on this CPU's map */ |
| new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL, |
| cpu_to_node(cpu)); |
| if (!new_map) |
| return NULL; |
| |
| for (i = 0; i < pos; i++) |
| new_map->queues[i] = map->queues[i]; |
| new_map->alloc_len = alloc_len; |
| new_map->len = pos; |
| |
| return new_map; |
| } |
| |
| int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, |
| u16 index) |
| { |
| struct xps_dev_maps *dev_maps, *new_dev_maps = NULL; |
| struct xps_map *map, *new_map; |
| int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES); |
| int cpu, numa_node_id = -2; |
| bool active = false; |
| |
| mutex_lock(&xps_map_mutex); |
| |
| dev_maps = xmap_dereference(dev->xps_maps); |
| |
| /* allocate memory for queue storage */ |
| for_each_online_cpu(cpu) { |
| if (!cpumask_test_cpu(cpu, mask)) |
| continue; |
| |
| if (!new_dev_maps) |
| new_dev_maps = kzalloc(maps_sz, GFP_KERNEL); |
| if (!new_dev_maps) { |
| mutex_unlock(&xps_map_mutex); |
| return -ENOMEM; |
| } |
| |
| map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) : |
| NULL; |
| |
| map = expand_xps_map(map, cpu, index); |
| if (!map) |
| goto error; |
| |
| RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map); |
| } |
| |
| if (!new_dev_maps) |
| goto out_no_new_maps; |
| |
| for_each_possible_cpu(cpu) { |
| if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) { |
| /* add queue to CPU maps */ |
| int pos = 0; |
| |
| map = xmap_dereference(new_dev_maps->cpu_map[cpu]); |
| while ((pos < map->len) && (map->queues[pos] != index)) |
| pos++; |
| |
| if (pos == map->len) |
| map->queues[map->len++] = index; |
| #ifdef CONFIG_NUMA |
| if (numa_node_id == -2) |
| numa_node_id = cpu_to_node(cpu); |
| else if (numa_node_id != cpu_to_node(cpu)) |
| numa_node_id = -1; |
| #endif |
| } else if (dev_maps) { |
| /* fill in the new device map from the old device map */ |
| map = xmap_dereference(dev_maps->cpu_map[cpu]); |
| RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map); |
| } |
| |
| } |
| |
| rcu_assign_pointer(dev->xps_maps, new_dev_maps); |
| |
| /* Cleanup old maps */ |
| if (dev_maps) { |
| for_each_possible_cpu(cpu) { |
| new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]); |
| map = xmap_dereference(dev_maps->cpu_map[cpu]); |
| if (map && map != new_map) |
| kfree_rcu(map, rcu); |
| } |
| |
| kfree_rcu(dev_maps, rcu); |
| } |
| |
| dev_maps = new_dev_maps; |
| active = true; |
| |
| out_no_new_maps: |
| /* update Tx queue numa node */ |
| netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index), |
| (numa_node_id >= 0) ? numa_node_id : |
| NUMA_NO_NODE); |
| |
| if (!dev_maps) |
| goto out_no_maps; |
| |
| /* removes queue from unused CPUs */ |
| for_each_possible_cpu(cpu) { |
| if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) |
| continue; |
| |
| if (remove_xps_queue(dev_maps, cpu, index)) |
| active = true; |
| } |
| |
| /* free map if not active */ |
| if (!active) { |
| RCU_INIT_POINTER(dev->xps_maps, NULL); |
| kfree_rcu(dev_maps, rcu); |
| } |
| |
| out_no_maps: |
| mutex_unlock(&xps_map_mutex); |
| |
| return 0; |
| error: |
| /* remove any maps that we added */ |
| for_each_possible_cpu(cpu) { |
| new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]); |
| map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) : |
| NULL; |
| if (new_map && new_map != map) |
| kfree(new_map); |
| } |
| |
| mutex_unlock(&xps_map_mutex); |
| |
| kfree(new_dev_maps); |
| return -ENOMEM; |
| } |
| EXPORT_SYMBOL(netif_set_xps_queue); |
| |
| #endif |
| /* |
| * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues |
| * greater then real_num_tx_queues stale skbs on the qdisc must be flushed. |
| */ |
| int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) |
| { |
| bool disabling; |
| int rc; |
| |
| disabling = txq < dev->real_num_tx_queues; |
| |
| if (txq < 1 || txq > dev->num_tx_queues) |
| return -EINVAL; |
| |
| if (dev->reg_state == NETREG_REGISTERED || |
| dev->reg_state == NETREG_UNREGISTERING) { |
| ASSERT_RTNL(); |
| |
| rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, |
| txq); |
| if (rc) |
| return rc; |
| |
| if (dev->num_tc) |
| netif_setup_tc(dev, txq); |
| |
| dev->real_num_tx_queues = txq; |
| |
| if (disabling) { |
| synchronize_net(); |
| qdisc_reset_all_tx_gt(dev, txq); |
| #ifdef CONFIG_XPS |
| netif_reset_xps_queues_gt(dev, txq); |
| #endif |
| } |
| } else { |
| dev->real_num_tx_queues = txq; |
| } |
| |
| return 0; |
| } |
| EXPORT_SYMBOL(netif_set_real_num_tx_queues); |
| |
| #ifdef CONFIG_SYSFS |
| /** |
| * netif_set_real_num_rx_queues - set actual number of RX queues used |
| * @dev: Network device |
| * @rxq: Actual number of RX queues |
| * |
| * This must be called either with the rtnl_lock held or before |
| * registration of the net device. Returns 0 on success, or a |
| * negative error code. If called before registration, it always |
| * succeeds. |
| */ |
| int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) |
| { |
| int rc; |
| |
| if (rxq < 1 || rxq > dev->num_rx_queues) |
| return -EINVAL; |
| |
| if (dev->reg_state == NETREG_REGISTERED) { |
| ASSERT_RTNL(); |
| |
| rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues, |
| rxq); |
| if (rc) |
| return rc; |
| } |
| |
| dev->real_num_rx_queues = rxq; |
| return 0; |
| } |
| EXPORT_SYMBOL(netif_set_real_num_rx_queues); |
| #endif |
| |
| /** |
| * netif_get_num_default_rss_queues - default number of RSS queues |
| * |
| * This routine should set an upper limit on the number of RSS queues |
| * used by default by multiqueue devices. |
| */ |
| int netif_get_num_default_rss_queues(void) |
| { |
| return is_kdump_kernel() ? |
| 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus()); |
| } |
| EXPORT_SYMBOL(netif_get_num_default_rss_queues); |
| |
| static void __netif_reschedule(struct Qdisc *q) |
| { |
| struct softnet_data *sd; |
| unsigned long flags; |
| |
| local_irq_save(flags); |
| sd = this_cpu_ptr(&softnet_data); |
| q->next_sched = NULL; |
| *sd->output_queue_tailp = q; |
| sd->output_queue_tailp = &q->next_sched; |
| raise_softirq_irqoff(NET_TX_SOFTIRQ); |
| local_irq_restore(flags); |
| } |
| |
| void __netif_schedule(struct Qdisc *q) |
| { |
| if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) |
| __netif_reschedule(q); |
| } |
| EXPORT_SYMBOL(__netif_schedule); |
| |
| struct dev_kfree_skb_cb { |
| enum skb_free_reason reason; |
| }; |
| |
| static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb) |
| { |
| return (struct dev_kfree_skb_cb *)skb->cb; |
| } |
| |
| void netif_schedule_queue(struct netdev_queue *txq) |
| { |
| rcu_read_lock(); |
| if (!(txq->state & QUEUE_STATE_ANY_XOFF)) { |
| struct Qdisc *q = rcu_dereference(txq->qdisc); |
| |
| __netif_schedule(q); |
| } |
| rcu_read_unlock(); |
| } |
| EXPORT_SYMBOL(netif_schedule_queue); |
| |
| /** |
| * netif_wake_subqueue - allow sending packets on subqueue |
| * @dev: network device |
| * @queue_index: sub queue index |
| * |
| * Resume individual transmit queue of a device with multiple transmit queues. |
| */ |
| void netif_wake_subqueue(struct net_device *dev, u16 queue_index) |
| { |
| struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
| |
| if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) { |
| struct Qdisc *q; |
| |
| rcu_read_lock(); |
| q = rcu_dereference(txq->qdisc); |
| __netif_schedule(q); |
| rcu_read_unlock(); |
| } |
| } |
| EXPORT_SYMBOL(netif_wake_subqueue); |
| |
| void netif_tx_wake_queue(struct netdev_queue *dev_queue) |
| { |
| if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) { |
| struct Qdisc *q; |
| |
| rcu_read_lock(); |
| q = rcu_dereference(dev_queue->qdisc); |
| __netif_schedule(q); |
| rcu_read_unlock(); |
| } |
| } |
| EXPORT_SYMBOL(netif_tx_wake_queue); |
| |
| void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) |
| { |
| unsigned long flags; |
| |
| if (unlikely(!skb)) |
| return; |
| |
| if (likely(atomic_read(&skb->users) == 1)) { |
| smp_rmb(); |
| atomic_set(&skb->users, 0); |
| } else if (likely(!atomic_dec_and_test(&skb->users))) { |
| return; |
| } |
| get_kfree_skb_cb(skb)->reason = reason; |
| local_irq_save(flags); |
| skb->next = __this_cpu_read(softnet_data.completion_queue); |
| __this_cpu_write(softnet_data.completion_queue, skb); |
| raise_softirq_irqoff(NET_TX_SOFTIRQ); |
| local_irq_restore(flags); |
| } |
| EXPORT_SYMBOL(__dev_kfree_skb_irq); |
| |
| void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason) |
| { |
| if (in_irq() || irqs_disabled()) |
| __dev_kfree_skb_irq(skb, reason); |
| else |
| dev_kfree_skb(skb); |
| } |
| EXPORT_SYMBOL(__dev_kfree_skb_any); |
| |
| |
| /** |
| * netif_device_detach - mark device as removed |
| * @dev: network device |
| * |
| * Mark device as removed from system and therefore no longer available. |
| */ |
| void netif_device_detach(struct net_device *dev) |
| { |
| if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && |
| netif_running(dev)) { |
| netif_tx_stop_all_queues(dev); |
| } |
| } |
| EXPORT_SYMBOL(netif_device_detach); |
| |
| /** |
| * netif_device_attach - mark device as attached |
| * @dev: network device |
| * |
| * Mark device as attached from system and restart if needed. |
| */ |
| void netif_device_attach(struct net_device *dev) |
| { |
| if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && |
| netif_running(dev)) { |
| netif_tx_wake_all_queues(dev); |
| __netdev_watchdog_up(dev); |
| } |
| } |
| EXPORT_SYMBOL(netif_device_attach); |
| |
| /* |
| * Returns a Tx hash based on the given packet descriptor a Tx queues' number |
| * to be used as a distribution range. |
| */ |
| u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb, |
| unsigned int num_tx_queues) |
| { |
| u32 hash; |
| u16 qoffset = 0; |
| u16 qcount = num_tx_queues; |
| |
| if (skb_rx_queue_recorded(skb)) { |
| hash = skb_get_rx_queue(skb); |
| while (unlikely(hash >= num_tx_queues)) |
| hash -= num_tx_queues; |
| return hash; |
| } |
| |
| if (dev->num_tc) { |
| u8 tc = netdev_get_prio_tc_map(dev, skb->priority); |
| qoffset = dev->tc_to_txq[tc].offset; |
| qcount = dev->tc_to_txq[tc].count; |
| } |
| |
| return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; |
| } |
| EXPORT_SYMBOL(__skb_tx_hash); |
| |
| static void skb_warn_bad_offload(const struct sk_buff *skb) |
| { |
| static const netdev_features_t null_features; |
| struct net_device *dev = skb->dev; |
| const char *name = ""; |
| |
| if (!net_ratelimit()) |
| return; |
| |
| if (dev) { |
| if (dev->dev.parent) |
| name = dev_driver_string(dev->dev.parent); |
| else |
| name = netdev_name(dev); |
| } |
| WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d " |
| "gso_type=%d ip_summed=%d\n", |
| name, dev ? &dev->features : &null_features, |
| skb->sk ? &skb->sk->sk_route_caps : &null_features, |
| skb->len, skb->data_len, skb_shinfo(skb)->gso_size, |
| skb_shinfo(skb)->gso_type, skb->ip_summed); |
| } |
| |
| /* |
| * Invalidate hardware checksum when packet is to be mangled, and |
| * complete checksum manually on outgoing path. |
| */ |
| int skb_checksum_help(struct sk_buff *skb) |
| { |
| __wsum csum; |
| int ret = 0, offset; |
| |
| if (skb->ip_summed == CHECKSUM_COMPLETE) |
| goto out_set_summed; |
| |
| if (unlikely(skb_shinfo(skb)->gso_size)) { |
| skb_warn_bad_offload(skb); |
| return -EINVAL; |
| } |
| |
| /* Before computing a checksum, we should make sure no frag could |
| * be modified by an external entity : checksum could be wrong. |
| */ |
| if (skb_has_shared_frag(skb)) { |
| ret = __skb_linearize(skb); |
| if (ret) |
| goto out; |
| } |
| |
| offset = skb_checksum_start_offset(skb); |
| BUG_ON(offset >= skb_headlen(skb)); |
| csum = skb_checksum(skb, offset, skb->len - offset, 0); |
| |
| offset += skb->csum_offset; |
| BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb)); |
| |
| if (skb_cloned(skb) && |
| !skb_clone_writable(skb, offset + sizeof(__sum16))) { |
| ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
| if (ret) |
| goto out; |
| } |
| |
| *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0; |
| out_set_summed: |
| skb->ip_summed = CHECKSUM_NONE; |
| out: |
| return ret; |
| } |
| EXPORT_SYMBOL(skb_checksum_help); |
| |
| /* skb_csum_offload_check - Driver helper function to determine if a device |
| * with limited checksum offload capabilities is able to offload the checksum |
| * for a given packet. |
| * |
| * Arguments: |
| * skb - sk_buff for the packet in question |
| * spec - contains the description of what device can offload |
| * csum_encapped - returns true if the checksum being offloaded is |
| * encpasulated. That is it is checksum for the transport header |
| * in the inner headers. |
| * checksum_help - when set indicates that helper function should |
| * call skb_checksum_help if offload checks fail |
| * |
| * Returns: |
| * true: Packet has passed the checksum checks and should be offloadable to |
| * the device (a driver may still need to check for additional |
| * restrictions of its device) |
| * false: Checksum is not offloadable. If checksum_help was set then |
| * skb_checksum_help was called to resolve checksum for non-GSO |
| * packets and when IP protocol is not SCTP |
| */ |
| bool __skb_csum_offload_chk(struct sk_buff *skb, |
| const struct skb_csum_offl_spec *spec, |
| bool *csum_encapped, |
| bool csum_help) |
| { |
| struct iphdr *iph; |
| struct ipv6hdr *ipv6; |
| void *nhdr; |
| int protocol; |
| u8 ip_proto; |
| |
| if (skb->protocol == htons(ETH_P_8021Q) || |
| skb->protocol == htons(ETH_P_8021AD)) { |
| if (!spec->vlan_okay) |
| goto need_help; |
| } |
| |
| /* We check whether the checksum refers to a transport layer checksum in |
| * the outermost header or an encapsulated transport layer checksum that |
| * corresponds to the inner headers of the skb. If the checksum is for |
| * something else in the packet we need help. |
| */ |
| if (skb_checksum_start_offset(skb) == skb_transport_offset(skb)) { |
| /* Non-encapsulated checksum */ |
| protocol = eproto_to_ipproto(vlan_get_protocol(skb)); |
| nhdr = skb_network_header(skb); |
| *csum_encapped = false; |
| if (spec->no_not_encapped) |
| goto need_help; |
| } else if (skb->encapsulation && spec->encap_okay && |
| skb_checksum_start_offset(skb) == |
| skb_inner_transport_offset(skb)) { |
| /* Encapsulated checksum */ |
| *csum_encapped = true; |
| switch (skb->inner_protocol_type) { |
| case ENCAP_TYPE_ETHER: |
| protocol = eproto_to_ipproto(skb->inner_protocol); |
| break; |
| case ENCAP_TYPE_IPPROTO: |
| protocol = skb->inner_protocol; |
| break; |
| } |
| nhdr = skb_inner_network_header(skb); |
| } else { |
| goto need_help; |
| } |
| |
| switch (protocol) { |
| case IPPROTO_IP: |
| if (!spec->ipv4_okay) |
| goto need_help; |
| iph = nhdr; |
| ip_proto = iph->protocol; |
| if (iph->ihl != 5 && !spec->ip_options_okay) |
| goto need_help; |
| break; |
| case IPPROTO_IPV6: |
| if (!spec->ipv6_okay) |
| goto need_help; |
| if (spec->no_encapped_ipv6 && *csum_encapped) |
| goto need_help; |
| ipv6 = nhdr; |
| nhdr += sizeof(*ipv6); |
| ip_proto = ipv6->nexthdr; |
| break; |
| default: |
| goto need_help; |
| } |
| |
| ip_proto_again: |
| switch (ip_proto) { |
| case IPPROTO_TCP: |
| if (!spec->tcp_okay || |
| skb->csum_offset != offsetof(struct tcphdr, check)) |
| goto need_help; |
| break; |
| case IPPROTO_UDP: |
| if (!spec->udp_okay || |
| skb->csum_offset != offsetof(struct udphdr, check)) |
| goto need_help; |
| break; |
| case IPPROTO_SCTP: |
| if (!spec->sctp_okay || |
| skb->csum_offset != offsetof(struct sctphdr, checksum)) |
| goto cant_help; |
| break; |
| case NEXTHDR_HOP: |
| case NEXTHDR_ROUTING: |
| case NEXTHDR_DEST: { |
| u8 *opthdr = nhdr; |
| |
| if (protocol != IPPROTO_IPV6 || !spec->ext_hdrs_okay) |
| goto need_help; |
| |
| ip_proto = opthdr[0]; |
| nhdr += (opthdr[1] + 1) << 3; |
| |
| goto ip_proto_again; |
| } |
| default: |
| goto need_help; |
| } |
| |
| /* Passed the tests for offloading checksum */ |
| return true; |
| |
| need_help: |
| if (csum_help && !skb_shinfo(skb)->gso_size) |
| skb_checksum_help(skb); |
| cant_help: |
| return false; |
| } |
| EXPORT_SYMBOL(__skb_csum_offload_chk); |
| |
| __be16 skb_network_protocol(struct sk_buff *skb, int *depth) |
| { |
| __be16 type = skb->protocol; |
| |
| /* Tunnel gso handlers can set protocol to ethernet. */ |
| if (type == htons(ETH_P_TEB)) { |
| struct ethhdr *eth; |
| |
| if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) |
| return 0; |
| |
| eth = (struct ethhdr *)skb->data; |
| type = eth->h_proto; |
| } |
| |
| return __vlan_get_protocol(skb, type, depth); |
| } |
| |
| /** |
| * skb_mac_gso_segment - mac layer segmentation handler. |
| * @skb: buffer to segment |
| * @features: features for the output path (see dev->features) |
| */ |
| struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, |
| netdev_features_t features) |
| { |
| struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); |
| struct packet_offload *ptype; |
| int vlan_depth = skb->mac_len; |
| __be16 type = skb_network_protocol(skb, &vlan_depth); |
| |
| if (unlikely(!type)) |
| return ERR_PTR(-EINVAL); |
| |
| __skb_pull(skb, vlan_depth); |
| |
| rcu_read_lock(); |
| list_for_each_entry_rcu(ptype, &offload_base, list) { |
| if (ptype->type == type && ptype->callbacks.gso_segment) { |
| segs = ptype->callbacks.gso_segment(skb, features); |
| break; |
| } |
| } |
| rcu_read_unlock(); |
| |
| __skb_push(skb, skb->data - skb_mac_header(skb)); |
| |
| return segs; |
| } |
| EXPORT_SYMBOL(skb_mac_gso_segment); |
| |
| |
| /* openvswitch calls this on rx path, so we need a different check. |
| */ |
| static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) |
| { |
| if (tx_path) |
| return skb->ip_summed != CHECKSUM_PARTIAL && |
| skb->ip_summed != CHECKSUM_UNNECESSARY; |
| |
| return skb->ip_summed == CHECKSUM_NONE; |
| } |
| |
| /** |
| * __skb_gso_segment - Perform segmentation on skb. |
| * @skb: buffer to segment |
| * @features: features for the output path (see dev->features) |
| * @tx_path: whether it is called in TX path |
| * |
| * This function segments the given skb and returns a list of segments. |
| * |
| * It may return NULL if the skb requires no segmentation. This is |
| * only possible when GSO is used for verifying header integrity. |
| * |
| * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb. |
| */ |
| struct sk_buff *__skb_gso_segment(struct sk_buff *skb, |
| netdev_features_t features, bool tx_path) |
| { |
| struct sk_buff *segs; |
| |
| if (unlikely(skb_needs_check(skb, tx_path))) { |
| int err; |
| |
| /* We're going to init ->check field in TCP or UDP header */ |
| err = skb_cow_head(skb, 0); |
| if (err < 0) |
| return ERR_PTR(err); |
| } |
| |
| /* Only report GSO partial support if it will enable us to |
| * support segmentation on this frame without needing additional |
| * work. |
| */ |
| if (features & NETIF_F_GSO_PARTIAL) { |
| netdev_features_t partial_features = NETIF_F_GSO_ROBUST; |
| struct net_device *dev = skb->dev; |
| |
| partial_features |= dev->features & dev->gso_partial_features; |
| if (!skb_gso_ok(skb, features | partial_features)) |
| features &= ~NETIF_F_GSO_PARTIAL; |
| } |
| |
| BUILD_BUG_ON(SKB_SGO_CB_OFFSET + |
| sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb)); |
| |
| SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb); |
| SKB_GSO_CB(skb)->encap_level = 0; |
| |
| skb_reset_mac_header(skb); |
| skb_reset_mac_len(skb); |
| |
| segs = skb_mac_gso_segment(skb, features); |
| |
| if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs))) |
| skb_warn_bad_offload(skb); |
| |
| return segs; |
| } |
| EXPORT_SYMBOL(__skb_gso_segment); |
| |
| /* Take action when hardware reception checksum errors are detected. */ |
| #ifdef CONFIG_BUG |
| void netdev_rx_csum_fault(struct net_device *dev) |
| { |
| if (net_ratelimit()) { |
| pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>"); |
| dump_stack(); |
| } |
| } |
| EXPORT_SYMBOL(netdev_rx_csum_fault); |
| #endif |
| |
| /* Actually, we should eliminate this check as soon as we know, that: |
| * 1. IOMMU is present and allows to map all the memory. |
| * 2. No high memory really exists on this machine. |
| */ |
| |
| static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) |
| { |
| #ifdef CONFIG_HIGHMEM |
| int i; |
| if (!(dev->features & NETIF_F_HIGHDMA)) { |
| for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
| skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| if (PageHighMem(skb_frag_page(frag))) |
| return 1; |
| } |
| } |
| |
| if (PCI_DMA_BUS_IS_PHYS) { |
| struct device *pdev = dev->dev.parent; |
| |
| if (!pdev) |
| return 0; |
| for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
| skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| dma_addr_t addr = page_to_phys(skb_frag_page(frag)); |
| if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask) |
| return 1; |
| } |
| } |
| #endif |
| return 0; |
| } |
| |
| /* If MPLS offload request, verify we are testing hardware MPLS features |
| * instead of standard features for the netdev. |
| */ |
| #if IS_ENABLED(CONFIG_NET_MPLS_GSO) |
| static netdev_features_t net_mpls_features(struct sk_buff *skb, |
| netdev_features_t features, |
| __be16 type) |
| { |
| if (eth_p_mpls(type)) |
| features &= skb->dev->mpls_features; |
| |
| return features; |
| } |
| #else |
| static netdev_features_t net_mpls_features(struct sk_buff *skb, |
| netdev_features_t features, |
| __be16 type) |
| { |
| return features; |
| } |
| #endif |
| |
| static netdev_features_t harmonize_features(struct sk_buff *skb, |
| netdev_features_t features) |
| { |
| int tmp; |
| __be16 type; |
| |
| type = skb_network_protocol(skb, &tmp); |
| features = net_mpls_features(skb, features, type); |
| |
| if (skb->ip_summed != CHECKSUM_NONE && |
| !can_checksum_protocol(features, type)) { |
| features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); |
| } |
| if (illegal_highdma(skb->dev, skb)) |
| features &= ~NETIF_F_SG; |
| |
| return features; |
| } |
| |
| netdev_features_t passthru_features_check(struct sk_buff *skb, |
| struct net_device *dev, |
| netdev_features_t features) |
| { |
| return features; |
| } |
| EXPORT_SYMBOL(passthru_features_check); |
| |
| static netdev_features_t dflt_features_check(struct sk_buff *skb, |
| struct net_device *dev, |
| netdev_features_t features) |
| { |
| return vlan_features_check(skb, features); |
| } |
| |
| static netdev_features_t gso_features_check(const struct sk_buff *skb, |
| struct net_device *dev, |
| netdev_features_t features) |
| { |
| u16 gso_segs = skb_shinfo(skb)->gso_segs; |
| |
| if (gso_segs > dev->gso_max_segs) |
| return features & ~NETIF_F_GSO_MASK; |
| |
| /* Support for GSO partial features requires software |
| * intervention before we can actually process the packets |
| * so we need to strip support for any partial features now |
| * and we can pull them back in after we have partially |
| * segmented the frame. |
| */ |
| if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL)) |
| features &= ~dev->gso_partial_features; |
| |
| /* Make sure to clear the IPv4 ID mangling feature if the |
| * IPv4 header has the potential to be fragmented. |
| */ |
| if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { |
| struct iphdr *iph = skb->encapsulation ? |
| inner_ip_hdr(skb) : ip_hdr(skb); |
| |
| if (!(iph->frag_off & htons(IP_DF))) |
| features &= ~NETIF_F_TSO_MANGLEID; |
| } |
| |
| return features; |
| } |
| |
| netdev_features_t netif_skb_features(struct sk_buff *skb) |
| { |
| struct net_device *dev = skb->dev; |
| netdev_features_t features = dev->features; |
| |
| if (skb_is_gso(skb)) |
| features = gso_features_check(skb, dev, features); |
| |
| /* If encapsulation offload request, verify we are testing |
| * hardware encapsulation features instead of standard |
| * features for the netdev |
| */ |
| if (skb->encapsulation) |
| features &= dev->hw_enc_features; |
| |
| if (skb_vlan_tagged(skb)) |
| features = netdev_intersect_features(features, |
| dev->vlan_features | |
| NETIF_F_HW_VLAN_CTAG_TX | |
| NETIF_F_HW_VLAN_STAG_TX); |
| |
| if (dev->netdev_ops->ndo_features_check) |
| features &= dev->netdev_ops->ndo_features_check(skb, dev, |
| features); |
| else |
| features &= dflt_features_check(skb, dev, features); |
| |
| return harmonize_features(skb, features); |
| } |
| EXPORT_SYMBOL(netif_skb_features); |
| |
| static int xmit_one(struct sk_buff *skb, struct net_device *dev, |
| struct netdev_queue *txq, bool more) |
| { |
| unsigned int len; |
| int rc; |
| |
| if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all)) |
| dev_queue_xmit_nit(skb, dev); |
| |
| len = skb->len; |
| trace_net_dev_start_xmit(skb, dev); |
| rc = netdev_start_xmit(skb, dev, txq, more); |
| trace_net_dev_xmit(skb, rc, dev, len); |
| |
| return rc; |
| } |
| |
| struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev, |
| struct netdev_queue *txq, int *ret) |
| { |
| struct sk_buff *skb = first; |
| int rc = NETDEV_TX_OK; |
| |
| while (skb) { |
| struct sk_buff *next = skb->next; |
| |
| skb->next = NULL; |
| rc = xmit_one(skb, dev, txq, next != NULL); |
| if (unlikely(!dev_xmit_complete(rc))) { |
| skb->next = next; |
| goto out; |
| } |
| |
| skb = next; |
| if (netif_xmit_stopped(txq) && skb) { |
| rc = NETDEV_TX_BUSY; |
| break; |
| } |
| } |
| |
| out: |
| *ret = rc; |
| return skb; |
| } |
| |
| static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, |
| netdev_features_t features) |
| { |
| if (skb_vlan_tag_present(skb) && |
| !vlan_hw_offload_capable(features, skb->vlan_proto)) |
| skb = __vlan_hwaccel_push_inside(skb); |
| return skb; |
| } |
| |
| static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev) |
| { |
| netdev_features_t features; |
| |
| features = netif_skb_features(skb); |
| skb = validate_xmit_vlan(skb, features); |
| if (unlikely(!skb)) |
| goto out_null; |
| |
| if (netif_needs_gso(skb, features)) { |
| struct sk_buff *segs; |
| |
| segs = skb_gso_segment(skb, features); |
| if (IS_ERR(segs)) { |
| goto out_kfree_skb; |
| } else if (segs) { |
| consume_skb(skb); |
| skb = segs; |
| } |
| } else { |
| if (skb_needs_linearize(skb, features) && |
| __skb_linearize(skb)) |
| goto out_kfree_skb; |
| |
| /* If packet is not checksummed and device does not |
| * support checksumming for this protocol, complete |
| * checksumming here. |
| */ |
| if (skb->ip_summed == CHECKSUM_PARTIAL) { |
| if (skb->encapsulation) |
| skb_set_inner_transport_header(skb, |
| skb_checksum_start_offset(skb)); |
| else |
| skb_set_transport_header(skb, |
| skb_checksum_start_offset(skb)); |
| if (!(features & NETIF_F_CSUM_MASK) && |
| skb_checksum_help(skb)) |
| goto out_kfree_skb; |
| } |
| } |
| |
| return skb; |
| |
| out_kfree_skb: |
| kfree_skb(skb); |
| out_null: |
| atomic_long_inc(&dev->tx_dropped); |
| return NULL; |
| } |
| |
| struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev) |
| { |
| struct sk_buff *next, *head = NULL, *tail; |
| |
| for (; skb != NULL; skb = next) { |
| next = skb->next; |
| skb->next = NULL; |
| |
| /* in case skb wont be segmented, point to itself */ |
| skb->prev = skb; |
| |
| skb = validate_xmit_skb(skb, dev); |
| if (!skb) |
| continue; |
| |
| if (!head) |
| head = skb; |
| else |
| tail->next = skb; |
| /* If skb was segmented, skb->prev points to |
| * the last segment. If not, it still contains skb. |
| */ |
| tail = skb->prev; |
| } |
| return head; |
| } |
| EXPORT_SYMBOL_GPL(validate_xmit_skb_list); |
| |
| static void qdisc_pkt_len_init(struct sk_buff *skb) |
| { |
| const struct skb_shared_info *shinfo = skb_shinfo(skb); |
| |
| qdisc_skb_cb(skb)->pkt_len = skb->len; |
| |
| /* To get more precise estimation of bytes sent on wire, |
| * we add to pkt_len the headers size of all segments |
| */ |
| if (shinfo->gso_size) { |
| unsigned int hdr_len; |
| u16 gso_segs = shinfo->gso_segs; |
| |
| /* mac layer + network layer */ |
| hdr_len = skb_transport_header(skb) - skb_mac_header(skb); |
| |
| /* + transport layer */ |
| if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { |
| const struct tcphdr *th; |
| struct tcphdr _tcphdr; |
| |
| th = skb_header_pointer(skb, skb_transport_offset(skb), |
| sizeof(_tcphdr), &_tcphdr); |
| if (likely(th)) |
| hdr_len += __tcp_hdrlen(th); |
| } else { |
| struct udphdr _udphdr; |
| |
| if (skb_header_pointer(skb, skb_transport_offset(skb), |
| sizeof(_udphdr), &_udphdr)) |
| hdr_len += sizeof(struct udphdr); |
| } |
| |
| if (shinfo->gso_type & SKB_GSO_DODGY) |
| gso_segs = DIV_ROUND_UP(skb->len - hdr_len, |
| shinfo->gso_size); |
| |
| qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len; |
| } |
| } |
| |
| static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, |
| struct net_device *dev, |
| struct netdev_queue *txq) |
| { |
| spinlock_t *root_lock = qdisc_lock(q); |
| struct sk_buff *to_free = NULL; |
| bool contended; |
| int rc; |
| |
| qdisc_calculate_pkt_len(skb, q); |
| /* |
| * Heuristic to force contended enqueues to serialize on a |
| * separate lock before trying to get qdisc main lock. |
| * This permits qdisc->running owner to get the lock more |
| * often and dequeue packets faster. |
| */ |
| contended = qdisc_is_running(q); |
| if (unlikely(contended)) |
| spin_lock(&q->busylock); |
| |
| spin_lock(root_lock); |
| if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { |
| __qdisc_drop(skb, &to_free); |
| rc = NET_XMIT_DROP; |
| } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && |
| qdisc_run_begin(q)) { |
| /* |
| * This is a work-conserving queue; there are no old skbs |
| * waiting to be sent out; and the qdisc is not running - |
| * xmit the skb directly. |
| */ |
| |
| qdisc_bstats_update(q, skb); |
| |
| if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) { |
| if (unlikely(contended)) { |
| spin_unlock(&q->busylock); |
| contended = false; |
| } |
| __qdisc_run(q); |
| } else |
| qdisc_run_end(q); |
| |
| rc = NET_XMIT_SUCCESS; |
| } else { |
| rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; |
| if (qdisc_run_begin(q)) { |
| if (unlikely(contended)) { |
| spin_unlock(&q->busylock); |
| contended = false; |
| } |
| __qdisc_run(q); |
| } |
| } |
| spin_unlock(root_lock); |
| if (unlikely(to_free)) |
| kfree_skb_list(to_free); |
| if (unlikely(contended)) |
| spin_unlock(&q->busylock); |
| return rc; |
| } |
| |
| #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) |
| static void skb_update_prio(struct sk_buff *skb) |
| { |
| const struct netprio_map *map; |
| const struct sock *sk; |
| unsigned int prioidx; |
| |
| if (skb->priority) |
| return; |
| map = rcu_dereference_bh(skb->dev->priomap); |
| if (!map) |
| return; |
| sk = skb_to_full_sk(skb); |
| if (!sk) |
| return; |
| |
| prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data); |
| |
| if (prioidx < map->priomap_len) |
| skb->priority = map->priomap[prioidx]; |
| } |
| #else |
| #define skb_update_prio(skb) |
| #endif |
| |
| DEFINE_PER_CPU(int, xmit_recursion); |
| EXPORT_SYMBOL(xmit_recursion); |
| |
| /** |
| * dev_loopback_xmit - loop back @skb |
| * @net: network namespace this loopback is happening in |
| * @sk: sk needed to be a netfilter okfn |
| * @skb: buffer to transmit |
| */ |
| int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) |
| { |
| skb_reset_mac_header(skb); |
| __skb_pull(skb, skb_network_offset(skb)); |
| skb->pkt_type = PACKET_LOOPBACK; |
| skb->ip_summed = CHECKSUM_UNNECESSARY; |
| WARN_ON(!skb_dst(skb)); |
| skb_dst_force(skb); |
| netif_rx_ni(skb); |
| return 0; |
| } |
| EXPORT_SYMBOL(dev_loopback_xmit); |
| |
| #ifdef CONFIG_NET_EGRESS |
| static struct sk_buff * |
| sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) |
| { |
| struct tcf_proto *cl = rcu_dereference_bh(dev->egress_cl_list); |
| struct tcf_result cl_res; |
| |
| if (!cl) |
| return skb; |
| |
| /* skb->tc_verd and qdisc_skb_cb(skb)->pkt_len were already set |
| * earlier by the caller. |
| */ |
| qdisc_bstats_cpu_update(cl->q, skb); |
| |
| switch (tc_classify(skb, cl, &cl_res, false)) { |
| case TC_ACT_OK: |
| case TC_ACT_RECLASSIFY: |
| skb->tc_index = TC_H_MIN(cl_res.classid); |
| break; |
| case TC_ACT_SHOT: |
| qdisc_qstats_cpu_drop(cl->q); |
| *ret = NET_XMIT_DROP; |
| kfree_skb(skb); |
| return NULL; |
| case TC_ACT_STOLEN: |
| case TC_ACT_QUEUED: |
| *ret = NET_XMIT_SUCCESS; |
| consume_skb(skb); |
| return NULL; |
| case TC_ACT_REDIRECT: |
| /* No need to push/pop skb's mac_header here on egress! */ |
| skb_do_redirect(skb); |
| *ret = NET_XMIT_SUCCESS; |
| return NULL; |
| default: |
| break; |
| } |
| |
| return skb; |
| } |
| #endif /* CONFIG_NET_EGRESS */ |
| |
| static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) |
| { |
| #ifdef CONFIG_XPS |
| struct xps_dev_maps *dev_maps; |
| struct xps_map *map; |
| int queue_index = -1; |
| |
| rcu_read_lock(); |
| dev_maps = rcu_dereference(dev->xps_maps); |
| if (dev_maps) { |
| map = rcu_dereference( |
| dev_maps->cpu_map[skb->sender_cpu - 1]); |
| if (map) { |
| if (map->len == 1) |
| queue_index |