Merge android-4.14 (4.14.165) into android-msm-floral-4.14-lts
Merge 4.14.165 into android-4.14
cuttlefish - enable CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
ANDROID: Enable HID_STEAM as y
Linux 4.14.165
drm/i915/gen9: Clear residual context state on context switch
netfilter: ipset: avoid null deref when IPSET_ATTR_LINENO is present
* netfilter: arp_tables: init netns pointer in xt_tgchk_param struct
net/ipv4/netfilter/arp_tables.c
phy: cpcap-usb: Fix flakey host idling and enumerating of devices
phy: cpcap-usb: Fix error path when no host driver is loaded
* USB: Fix: Don't skip endpoint descriptors with maxpacket=0
drivers/usb/core/config.c
* HID: hiddev: fix mess in hiddev_open()
drivers/hid/usbhid/hiddev.c
* arm64: cpufeature: Avoid warnings due to unused symbols
arch/arm64/kernel/cpufeature.c
ath10k: fix memory leak
rtl8xxxu: prevent leaking urb
scsi: bfa: release allocated memory in case of error
mwifiex: pcie: Fix memory leak in mwifiex_pcie_alloc_cmdrsp_buf
mwifiex: fix possible heap overflow in mwifiex_process_country_ie()
* tty: always relink the port
drivers/tty/tty_port.c
* tty: link tty and port before configuring it as console
drivers/tty/serial/serial_core.c
drivers/tty/tty_port.c
staging: rtl8188eu: Add device code for TP-Link TL-WN727N v5.21
* drm/dp_mst: correct the shifting in DP_REMOTE_I2C_READ
drivers/gpu/drm/drm_dp_mst_topology.c
drm/fb-helper: Round up bits_per_pixel if possible
* Input: add safety guards to input_set_keycode()
drivers/input/input.c
* HID: hid-input: clear unmapped usages
drivers/hid/hid-input.c
staging: comedi: adv_pci1710: fix AI channels 16-31 for PCI-1713
usb: musb: dma: Correct parameter passed to IRQ handler
usb: musb: Disable pullup at init
usb: musb: fix idling for suspend after disconnect interrupt
USB: serial: option: add ZLP support for 0x1bc7/0x9010
staging: vt6656: set usb_set_intfdata on driver fail.
gpiolib: acpi: Add honor_wakeup module-option + quirk mechanism
gpiolib: acpi: Turn dmi_system_id table into a generic quirk table
can: can_dropped_invalid_skb(): ensure an initialized headroom in outgoing CAN sk_buffs
can: mscan: mscan_rx_poll(): fix rx path lockup when returning from polling to irq mode
can: gs_usb: gs_usb_probe(): use descriptors of current altsetting
* HID: uhid: Fix returning EPOLLOUT from uhid_char_poll
drivers/hid/uhid.c
* HID: Fix slab-out-of-bounds read in hid_field_extract
drivers/hid/hid-core.c
tracing: Have stack tracer compile when MCOUNT_INSN_SIZE is not defined
kernel/trace: Fix do not unregister tracepoints when register sched_migrate_task fail
ALSA: hda/realtek - Set EAPD control to default for ALC222
ALSA: hda/realtek - Add new codec supported for ALCS1200A
* ALSA: usb-audio: Apply the sample rate quirk for Bose Companion 5
sound/usb/quirks.c
usb: chipidea: host: Disable port power only if previously enabled
* chardev: Avoid potential use-after-free in 'chrdev_open()'
fs/char_dev.c
Merge remote-tracking branch 'origin/upstream-f2fs-stable-linux-4.14.y' into android-4.14
* UPSTREAM: kcov: fix struct layout for kcov_remote_arg
include/uapi/linux/kcov.h
UPSTREAM: vhost, kcov: collect coverage from vhost_worker
* UPSTREAM: usb, kcov: collect coverage from hub_event
drivers/usb/core/hub.c
* BACKPORT: kcov: remote coverage support
include/linux/kcov.h
include/linux/sched.h
include/uapi/linux/kcov.h
* UPSTREAM: kcov: improve CONFIG_ARCH_HAS_KCOV help text
lib/Kconfig.debug
UPSTREAM: kcov: convert kcov.refcount to refcount_t
UPSTREAM: kcov: no need to check return value of debugfs_create functions
UPSTREAM: kernel/kcov.c: mark write_comp_data() as notrace
UPSTREAM: kernel/kcov.c: mark funcs in __sanitizer_cov_trace_pc() as notrace
* BACKPORT: sched/core / kcov: avoid kcov_area during task switch
include/linux/kcov.h
include/linux/sched.h
kernel/sched/core.c
UPSTREAM: kcov: prefault the kcov_area
* BACKPORT: kcov: test compiler capability in Kconfig and correct dependency
lib/Kconfig.debug
scripts/Makefile.gcc-plugins
scripts/Makefile.kcov
* UPSTREAM: gcc-plugins: fix build condition of SANCOV plugin
scripts/Makefile.gcc-plugins
UPSTREAM: kcov: fix comparison callback signature
UPSTREAM: kcov: update documentation
* BACKPORT: Makefile: support flag -fsanitizer-coverage=trace-cmp
Makefile
lib/Kconfig.debug
scripts/Makefile.kcov
* BACKPORT: kcov: support comparison operands collection
include/linux/kcov.h
include/uapi/linux/kcov.h
UPSTREAM: kcov: remove pointless current != NULL check
Merge 4.14.164 into android-4.14
Linux 4.14.164
vlan: fix memory leak in vlan_dev_set_egress_priority
* net: sch_prio: When ungrafting, replace with FIFO
net/sched/sch_prio.c
vlan: vlan_changelink() should propagate errors
vxlan: fix tos value before xmit
* tcp: fix "old stuff" D-SACK causing SACK to be treated as D-SACK
net/ipv4/tcp_input.c
sctp: free cmd->obj.chunk for the unprocessed SCTP_CMD_REPLY
USB: serial: option: add Telit ME910G1 0x110a composition
* USB: core: fix check for duplicate endpoints
drivers/usb/core/config.c
pkt_sched: fq: do not accept silly TCA_FQ_QUANTUM
net: usb: lan78xx: fix possible skb leak
net: stmmac: dwmac-sunxi: Allow all RGMII modes
net: stmmac: dwmac-sun8i: Allow all RGMII modes
net: dsa: mv88e6xxx: Preserve priority when setting CPU port.
* macvlan: do not assume mac_header is set in macvlan_broadcast()
include/linux/if_ether.h
gtp: fix bad unlock balance in gtp_encap_enable_socket
mmc: block: propagate correct returned value in mmc_rpmb_ioctl
mmc: core: Prevent bus reference leak in mmc_blk_init()
mmc: block: Fix bug when removing RPMB chardev
mmc: block: Delete mmc_access_rpmb()
mmc: block: Convert RPMB to a character device
PCI/switchtec: Read all 64 bits of part_event_bitmap
* bpf: Fix passing modified ctx to ld/abs/ind instruction
kernel/bpf/verifier.c
* bpf: reject passing modified ctx to helper functions
kernel/bpf/verifier.c
hv_netvsc: Fix unwanted rx_table reset
llc2: Fix return statement of llc_stat_ev_rx_null_dsap_xid_c (and _test_c)
parisc: Fix compiler warnings in debug_core.c
* block: fix memleak when __blk_rq_map_user_iov() is failed
block/blk-map.c
s390/dasd: fix memleak in path handling error case
s390/dasd/cio: Interpret ccw_device_get_mdc return value correctly
net: stmmac: RX buffer size must be 16 byte aligned
net: stmmac: Do not accept invalid MTU values
* fs: avoid softlockups in s_inodes iterators
fs/drop_caches.c
fs/inode.c
fs/notify/fsnotify.c
fs/quota/dquot.c
perf/x86/intel: Fix PT PMI handling
* kconfig: don't crash on NULL expressions in expr_eq()
scripts/kconfig/expr.c
regulator: rn5t618: fix module aliases
ASoC: wm8962: fix lambda value
* rfkill: Fix incorrect check to avoid NULL pointer dereference
net/rfkill/core.c
net: usb: lan78xx: Fix error message format specifier
bnx2x: Fix logic to get total no. of PFs per engine
bnx2x: Do not handle requests from VFs after parity
powerpc: Ensure that swiotlb buffer is allocated from low memory
samples: bpf: fix syscall_tp due to unused syscall
samples: bpf: Replace symbol compare of trace_event
ARM: dts: am437x-gp/epos-evm: fix panel compatible
bpf, mips: Limit to 33 tail calls
ARM: dts: bcm283x: Fix critical trip point
ASoC: topology: Check return value for soc_tplg_pcm_create()
spi: spi-cavium-thunderx: Add missing pci_release_regions()
ARM: dts: Cygnus: Fix MDIO node address/size cells
netfilter: nf_tables: validate NFT_SET_ELEM_INTERVAL_END
netfilter: uapi: Avoid undefined left-shift in xt_sctp.h
ARM: vexpress: Set-up shared OPP table instead of individual for each CPU
efi/gop: Fix memory leak in __gop_query32/64()
efi/gop: Return EFI_SUCCESS if a usable GOP was found
efi/gop: Return EFI_NOT_FOUND if there are no usable GOPs
x86/efi: Update e820 with reserved EFI boot services data to fix kexec breakage
libtraceevent: Fix lib installation with O=
mwifiex: Fix heap overflow in mmwifiex_process_tdls_action_frame()
* netfilter: ctnetlink: netns exit must wait for callbacks
net/netfilter/nf_conntrack_netlink.c
locking/spinlock/debug: Fix various data races
USB: dummy-hcd: increase max number of devices to 32
USB: dummy-hcd: use usb_urb_dir_in instead of usb_pipein
UPSTREAM: USB: dummy-hcd: use usb_urb_dir_in instead of usb_pipein
UPSTREAM: USB: dummy-hcd: increase max number of devices to 32
UPSTREAM: USB: dummy-hcd: Fix failure to give back unlinked URBs
UPSTREAM: USB: dummy-hcd: bandwidth limits for non-bulk transfers
* BACKPORT: perf_event: Add support for LSM and SELinux checks
include/linux/lsm_hooks.h
include/linux/perf_event.h
include/linux/security.h
kernel/events/core.c
kernel/trace/trace_event_perf.c
security/security.c
security/selinux/hooks.c
security/selinux/include/classmap.h
security/selinux/include/objsec.h
ANDROID: cuttlefish_defconfig: remove 80211_HWSIM
docs: fs-verity: mention statx() support
* f2fs: support STATX_ATTR_VERITY
fs/f2fs/file.c
* ext4: support STATX_ATTR_VERITY
fs/ext4/inode.c
* statx: define STATX_ATTR_VERITY
include/linux/stat.h
include/uapi/linux/stat.h
docs: fs-verity: document first supported kernel version
* f2fs: add support for IV_INO_LBLK_64 encryption policies
fs/f2fs/super.c
* ext4: add support for IV_INO_LBLK_64 encryption policies
fs/ext4/ext4.h
fs/ext4/super.c
* fscrypt: add support for IV_INO_LBLK_64 policies
fs/crypto/crypto.c
fs/crypto/fscrypt_private.h
fs/crypto/keyring.c
fs/crypto/keysetup.c
fs/crypto/policy.c
include/linux/fscrypt.h
include/uapi/linux/fscrypt.h
* fscrypt: avoid data race on fscrypt_mode::logged_impl_name
fs/crypto/fscrypt_private.h
fs/crypto/keysetup.c
* fscrypt: zeroize fscrypt_info before freeing
fs/crypto/keysetup.c
* fscrypt: remove struct fscrypt_ctx
fs/crypto/bio.c
fs/crypto/crypto.c
fs/crypto/fscrypt_private.h
include/linux/fscrypt.h
* fscrypt: invoke crypto API for ESSIV handling
fs/crypto/crypto.c
fs/crypto/fscrypt_private.h
fs/crypto/keysetup.c
fs/crypto/keysetup_v1.c
Merge 4.14.163 into android-4.14
* Revert "BACKPORT: perf_event: Add support for LSM and SELinux checks"
include/linux/lsm_hooks.h
include/linux/perf_event.h
include/linux/security.h
kernel/events/core.c
kernel/trace/trace_event_perf.c
security/security.c
security/selinux/hooks.c
security/selinux/include/classmap.h
security/selinux/include/objsec.h
Linux 4.14.163
perf/x86/intel/bts: Fix the use of page_private()
xen/blkback: Avoid unmapping unmapped grant pages
s390/smp: fix physical to logical CPU map for SMT
* net: add annotations on hh->hh_len lockless accesses
include/net/neighbour.h
net/core/neighbour.c
net/ethernet/eth.c
arm64: dts: meson: odroid-c2: Disable usb_otg bus to avoid power failed warning
ath9k_htc: Discard undersized packets
ath9k_htc: Modify byte order for an error message
rxrpc: Fix possible NULL pointer access in ICMP handling
selftests: rtnetlink: add addresses with fixed life time
powerpc/pseries/hvconsole: Fix stack overread via udbg
* drm/mst: Fix MST sideband up-reply failure handling
drivers/gpu/drm/drm_dp_mst_topology.c
scsi: qedf: Do not retry ELS request if qedf_alloc_cmd fails
* fix compat handling of FICLONERANGE, FIDEDUPERANGE and FS_IOC_FIEMAP
fs/compat_ioctl.c
tty: serial: msm_serial: Fix lockup for sysrq and oops
dt-bindings: clock: renesas: rcar-usb2-clock-sel: Fix typo in example
media: usb: fix memory leak in af9005_identify_state
regulator: ab8500: Remove AB8505 USB regulator
media: flexcop-usb: ensure -EIO is returned on error condition
* Bluetooth: Fix memory leak in hci_connect_le_scan
net/bluetooth/hci_conn.c
* Bluetooth: delete a stray unlock
net/bluetooth/l2cap_core.c
Bluetooth: btusb: fix PM leak in error case of setup
platform/x86: pmc_atom: Add Siemens CONNECT X300 to critclk_systems DMI table
xfs: don't check for AG deadlock for realtime files in bunmapi
scsi: qla2xxx: Drop superfluous INIT_WORK of del_work
nfsd4: fix up replay_matches_cache()
* PM / devfreq: Check NULL governor in available_governors_show
drivers/devfreq/devfreq.c
* arm64: Revert support for execute-only user mappings
arch/arm64/include/asm/pgtable-prot.h
arch/arm64/include/asm/pgtable.h
arch/arm64/mm/fault.c
mm/mmap.c
ftrace: Avoid potential division by zero in function profiler
* exit: panic before exit_mm() on global init exit
kernel/exit.c
ALSA: firewire-motu: Correct a typo in the clock proc string
ALSA: cs4236: fix error return comparison of an unsigned integer
tracing: Have the histogram compare functions convert to u64 first
* tracing: Fix lock inversion in trace_event_enable_tgid_record()
kernel/trace/trace.c
kernel/trace/trace_events.c
* gpiolib: fix up emulated open drain outputs
drivers/gpio/gpiolib.c
ata: ahci_brcm: Fix AHCI resources management
ata: ahci_brcm: Allow optional reset controller to be used
ata: libahci_platform: Export again ahci_platform_<en/dis>able_phys()
* compat_ioctl: block: handle BLKREPORTZONE/BLKRESETZONE
block/compat_ioctl.c
* compat_ioctl: block: handle Persistent Reservations
block/compat_ioctl.c
* dmaengine: Fix access to uninitialized dma_slave_caps
include/linux/dmaengine.h
* locks: print unsigned ino in /proc/locks
fs/locks.c
* pstore/ram: Write new dumps to start of recycled zones
fs/pstore/ram.c
* memcg: account security cred as well to kmemcg
kernel/cred.c
* mm/zsmalloc.c: fix the migrated zspage statistics.
mm/zsmalloc.c
media: cec: avoid decrementing transmit_queue_sz if it is 0
media: cec: CEC 2.0-only bcast messages were ignored
media: pulse8-cec: fix lost cec_transmit_attempt_done() call
MIPS: Avoid VDSO ABI breakage due to global register variable
drm/sun4i: hdmi: Remove duplicate cleanup calls
ALSA: ice1724: Fix sleep-in-atomic in Infrasonic Quartet support code
* drm: limit to INT_MAX in create_blob ioctl
drivers/gpu/drm/drm_property.c
* taskstats: fix data-race
kernel/taskstats.c
xfs: fix mount failure crash on invalid iclog memory access
PM / hibernate: memory_bm_find_bit(): Tighten node optimisation
xen/balloon: fix ballooned page accounting without hotplug enabled
xen-blkback: prevent premature module unload
IB/mlx4: Follow mirror sequence of device add during device removal
s390/cpum_sf: Avoid SBD overflow condition in irq handler
s390/cpum_sf: Adjust sampling interval to avoid hitting sample limits
md: raid1: check rdev before reference in raid1_sync_request func
* net: make socket read/write_iter() honor IOCB_NOWAIT
net/socket.c
usb: gadget: fix wrong endpoint desc
drm/nouveau: Move the declaration of struct nouveau_conn_atom up a bit
scsi: libsas: stop discovering if oob mode is disconnected
scsi: iscsi: qla4xxx: fix double free in probe
scsi: qla2xxx: Don't call qlt_async_event twice
scsi: lpfc: Fix memory leak on lpfc_bsg_write_ebuf_set func
rxe: correctly calculate iCRC for unaligned payloads
RDMA/cma: add missed unregister_pernet_subsys in init failure
* PM / devfreq: Don't fail devfreq_dev_release if not in list
drivers/devfreq/devfreq.c
iio: adc: max9611: Fix too short conversion time delay
nvme_fc: add module to ops template to allow module references
* UPSTREAM: selinux: sidtab reverse lookup hash table
security/selinux/Kconfig
security/selinux/include/security.h
security/selinux/selinuxfs.c
security/selinux/ss/context.h
security/selinux/ss/policydb.c
security/selinux/ss/services.c
security/selinux/ss/services.h
security/selinux/ss/sidtab.c
security/selinux/ss/sidtab.h
* UPSTREAM: selinux: avoid atomic_t usage in sidtab
security/selinux/ss/sidtab.c
security/selinux/ss/sidtab.h
* UPSTREAM: selinux: check sidtab limit before adding a new entry
security/selinux/ss/sidtab.c
* UPSTREAM: selinux: fix context string corruption in convert_context()
security/selinux/ss/services.c
* BACKPORT: selinux: overhaul sidtab to fix bug and improve performance
security/selinux/ss/mls.c
security/selinux/ss/mls.h
security/selinux/ss/services.c
security/selinux/ss/sidtab.c
security/selinux/ss/sidtab.h
* UPSTREAM: selinux: refactor mls_context_to_sid() and make it stricter
security/selinux/ss/mls.c
security/selinux/ss/mls.h
security/selinux/ss/services.c
* UPSTREAM: selinux: Cleanup printk logging in services
security/selinux/ss/services.c
* UPSTREAM: scsi: ilog2: create truly constant version for sparse
include/linux/log2.h
* BACKPORT: selinux: use separate table for initial SID lookup
security/selinux/ss/policydb.c
security/selinux/ss/services.c
security/selinux/ss/services.h
security/selinux/ss/sidtab.c
security/selinux/ss/sidtab.h
* UPSTREAM: selinux: make "selinux_policycap_names[]" const char *
security/selinux/include/security.h
security/selinux/ss/services.c
* UPSTREAM: selinux: refactor sidtab conversion
security/selinux/ss/services.c
security/selinux/ss/sidtab.c
security/selinux/ss/sidtab.h
* BACKPORT: selinux: wrap AVC state
security/selinux/avc.c
security/selinux/hooks.c
security/selinux/include/avc.h
security/selinux/include/avc_ss.h
security/selinux/include/security.h
security/selinux/netlabel.c
security/selinux/selinuxfs.c
security/selinux/ss/services.c
* UPSTREAM: selinux: wrap selinuxfs state
security/selinux/selinuxfs.c
* UPSTREAM: selinux: rename the {is,set}_enforcing() functions
security/selinux/avc.c
security/selinux/hooks.c
security/selinux/include/security.h
security/selinux/selinuxfs.c
security/selinux/ss/services.c
security/selinux/ss/status.c
* BACKPORT: selinux: wrap global selinux state
security/selinux/avc.c
security/selinux/hooks.c
security/selinux/ibpkey.c
security/selinux/include/avc.h
security/selinux/include/avc_ss.h
security/selinux/include/conditional.h
security/selinux/include/objsec.h
security/selinux/include/security.h
security/selinux/netif.c
security/selinux/netlabel.c
security/selinux/netnode.c
security/selinux/netport.c
security/selinux/selinuxfs.c
security/selinux/ss/avtab.c
security/selinux/ss/avtab.h
security/selinux/ss/ebitmap.c
security/selinux/ss/ebitmap.h
security/selinux/ss/hashtab.c
security/selinux/ss/hashtab.h
security/selinux/ss/mls.c
security/selinux/ss/mls.h
security/selinux/ss/services.c
security/selinux/ss/services.h
security/selinux/ss/status.c
* UPSTREAM: selinux: Use kmem_cache for hashtab_node
security/selinux/ss/hashtab.c
security/selinux/ss/hashtab.h
security/selinux/ss/services.c
* BACKPORT: perf_event: Add support for LSM and SELinux checks
include/linux/lsm_hooks.h
include/linux/perf_event.h
include/linux/security.h
kernel/events/core.c
kernel/trace/trace_event_perf.c
security/security.c
security/selinux/hooks.c
security/selinux/include/classmap.h
security/selinux/include/objsec.h
* UPSTREAM: binder: Add binder_proc logging to binderfs
drivers/android/binder.c
drivers/android/binder_internal.h
* UPSTREAM: binder: Make transaction_log available in binderfs
drivers/android/binder.c
drivers/android/binder_internal.h
* UPSTREAM: binder: Add stats, state and transactions files
drivers/android/binder.c
drivers/android/binder_internal.h
UPSTREAM: binder: add a mount option to show global stats
UPSTREAM: binder: Validate the default binderfs device names.
* UPSTREAM: binder: Add default binder devices through binderfs when configured
drivers/android/binder.c
drivers/android/binder_internal.h
* UPSTREAM: binder: fix CONFIG_ANDROID_BINDER_DEVICES
drivers/android/binder.c
* UPSTREAM: android: binder: use kstrdup instead of open-coding it
drivers/android/binder.c
* UPSTREAM: binderfs: remove separate device_initcall()
drivers/android/binder.c
drivers/android/binder_internal.h
BACKPORT: binderfs: respect limit on binder control creation
UPSTREAM: binderfs: switch from d_add() to d_instantiate()
UPSTREAM: binderfs: drop lock in binderfs_binder_ctl_create
UPSTREAM: binderfs: kill_litter_super() before cleanup
UPSTREAM: binderfs: rework binderfs_binder_device_create()
UPSTREAM: binderfs: rework binderfs_fill_super()
UPSTREAM: binderfs: prevent renaming the control dentry
UPSTREAM: binderfs: remove outdated comment
UPSTREAM: binderfs: fix error return code in binderfs_fill_super()
UPSTREAM: binderfs: handle !CONFIG_IPC_NS builds
BACKPORT: binderfs: reserve devices for initial mount
UPSTREAM: binderfs: rename header to binderfs.h
BACKPORT: binderfs: implement "max" mount option
UPSTREAM: binderfs: make each binderfs mount a new instance
UPSTREAM: binderfs: remove wrong kern_mount() call
* BACKPORT: binder: implement binderfs
drivers/android/Kconfig
drivers/android/Makefile
drivers/android/binder.c
drivers/android/binder_internal.h
include/uapi/linux/magic.h
* UPSTREAM: binder: remove BINDER_DEBUG_ENTRY()
drivers/android/binder.c
* UPSTREAM: seq_file: Introduce DEFINE_SHOW_ATTRIBUTE() helper macro
include/linux/seq_file.h
* UPSTREAM: exit: panic before exit_mm() on global init exit
kernel/exit.c
Merge 4.14.162 into android-4.14
Linux 4.14.162
spi: fsl: use platform_get_irq() instead of of_irq_to_resource()
gtp: avoid zero size hashtable
gtp: fix an use-after-free in ipv4_pdp_find()
gtp: fix wrong condition in gtp_genl_dump_pdp()
* tcp: do not send empty skb from tcp_write_xmit()
net/ipv4/tcp_output.c
* tcp/dccp: fix possible race __inet_lookup_established()
include/linux/rculist_nulls.h
include/net/inet_hashtables.h
include/net/sock.h
net/ipv4/inet_diag.c
net/ipv4/inet_hashtables.c
net/ipv4/tcp_ipv4.c
net/ipv6/inet6_hashtables.c
gtp: do not allow adding duplicate tid and ms_addr pdp context
* sit: do not confirm neighbor when do pmtu update
net/ipv6/sit.c
* vti: do not confirm neighbor when do pmtu update
net/ipv4/ip_vti.c
net/ipv6/ip6_vti.c
* tunnel: do not confirm neighbor when do pmtu update
net/ipv4/ip_tunnel.c
net/ipv6/ip6_tunnel.c
* net/dst: add new function skb_dst_update_pmtu_no_confirm
include/net/dst.h
gtp: do not confirm neighbor when do pmtu update
ip6_gre: do not confirm neighbor when do pmtu update
* net: add bool confirm_neigh parameter for dst_ops.update_pmtu
include/net/dst.h
include/net/dst_ops.h
net/ipv4/inet_connection_sock.c
net/ipv4/route.c
net/ipv4/xfrm4_policy.c
net/ipv6/inet6_connection_sock.c
net/ipv6/route.c
net/ipv6/xfrm6_policy.c
vhost/vsock: accept only packets with the right dst_cid
* udp: fix integer overflow while computing available space in sk_rcvbuf
net/ipv4/udp.c
* ptp: fix the race between the release of ptp_clock and cdev
include/linux/posix-clock.h
kernel/time/posix-clock.c
net/mlxfw: Fix out-of-memory error in mfa2 flash burning
net: ena: fix napi handler misbehavior when the napi budget is zero
pinctrl: baytrail: Really serialize all register accesses
tty/serial: atmel: fix out of range clock divider handling
spi: fsl: don't map irq during probe
* hrtimer: Annotate lockless access to timer->state
include/linux/hrtimer.h
kernel/time/hrtimer.c
* net: icmp: fix data-race in cmp_global_allow()
net/ipv4/icmp.c
* net: add a READ_ONCE() in skb_peek_tail()
include/linux/skbuff.h
* inetpeer: fix data-race in inet_putpeer / inet_putpeer
net/ipv4/inetpeer.c
netfilter: bridge: make sure to pull arp header in br_nf_forward_arp()
6pack,mkiss: fix possible deadlock
* netfilter: ebtables: compat: reject all padding in matches/watchers
net/bridge/netfilter/ebtables.c
* filldir[64]: remove WARN_ON_ONCE() for bad directory entries
fs/readdir.c
* Make filldir[64]() verify the directory entry filename is valid
fs/readdir.c
perf strbuf: Remove redundant va_end() in strbuf_addv()
* bonding: fix active-backup transition after link failure
drivers/net/bonding/bond_main.c
ALSA: hda - Downgrade error message for single-cmd fallback
* netfilter: nf_queue: enqueue skbs with NULL dst
net/netfilter/nf_queue.c
* net, sysctl: Fix compiler warning when only cBPF is present
net/core/sysctl_net_core.c
x86/mce: Fix possibly incorrect severity calculation on AMD
userfaultfd: require CAP_SYS_PTRACE for UFFD_FEATURE_EVENT_FORK
* kernel: sysctl: make drop_caches write-only
kernel/sysctl.c
ocfs2: fix passing zero to 'PTR_ERR' warning
s390/cpum_sf: Check for SDBT and SDB consistency
* libfdt: define INT32_MAX and UINT32_MAX in libfdt_env.h
include/linux/libfdt_env.h
s390/zcrypt: handle new reply code FILTERED_BY_HYPERVISOR
perf regs: Make perf_reg_name() return "unknown" instead of NULL
perf script: Fix brstackinsn for AUXTRACE
cdrom: respect device capabilities during opening action
* scripts/kallsyms: fix definitely-lost memory leak
scripts/kallsyms.c
apparmor: fix unsigned len comparison with less than zero
gpio: mpc8xxx: Don't overwrite default irq_set_type callback
scsi: target: iscsi: Wait for all commands to finish before freeing a session
scsi: iscsi: Don't send data to unbound connection
scsi: NCR5380: Add disconnect_mask module parameter
scsi: scsi_debug: num_tgts must be >= 0
* scsi: ufs: Fix error handing during hibern8 enter
drivers/scsi/ufs/ufshcd.c
scsi: pm80xx: Fix for SATA device discovery
* HID: Improve Windows Precision Touchpad detection.
drivers/hid/hid-core.c
libnvdimm/btt: fix variable 'rc' set but not used
HID: logitech-hidpp: Silence intermittent get_battery_capacity errors
bcache: at least try to shrink 1 node in bch_mca_scan()
clk: pxa: fix one of the pxa RTC clocks
scsi: atari_scsi: sun3_scsi: Set sg_tablesize to 1 instead of SG_NONE
powerpc/security: Fix wrong message when RFI Flush is disable
powerpc/pseries/cmm: Implement release() function for sysfs device
* scsi: ufs: fix potential bug which ends in system hang
drivers/scsi/ufs/ufshcd.c
scsi: lpfc: fix: Coverity: lpfc_cmpl_els_rsp(): Null pointer dereferences
* fs/quota: handle overflows of sysctl fs.quota.* and report as unsigned long
fs/quota/dquot.c
include/linux/quota.h
irqchip: ingenic: Error out if IRQ domain creation failed
irqchip/irq-bcm7038-l1: Enable parent IRQ if necessary
* clk: qcom: Allow constant ratio freq tables for rcg
drivers/clk/qcom/clk-rcg2.c
drivers/clk/qcom/common.c
* f2fs: fix to update dir's i_pino during cross_rename
fs/f2fs/namei.c
scsi: lpfc: Fix duplicate unreg_rpi error in port offline flow
* scsi: tracing: Fix handling of TRANSFER LENGTH == 0 for READ(6) and WRITE(6)
drivers/scsi/scsi_trace.c
* jbd2: Fix statistics for the number of logged blocks
fs/jbd2/commit.c
* ext4: update direct I/O read lock pattern for IOCB_NOWAIT
fs/ext4/inode.c
powerpc/book3s64/hash: Add cond_resched to avoid soft lockup warning
powerpc/security/book3s64: Report L1TF status in sysfs
clocksource/drivers/asm9260: Add a check for of_clk_get
dma-debug: add a schedule point in debug_dma_dump_mappings()
powerpc/tools: Don't quote $objdump in scripts
powerpc/pseries: Don't fail hash page table insert for bolted mapping
powerpc/pseries: Mark accumulate_stolen_time() as notrace
scsi: csiostor: Don't enable IRQs too early
scsi: lpfc: Fix SLI3 hba in loop mode not discovering devices
scsi: target: compare full CHAP_A Algorithm strings
iommu/tegra-smmu: Fix page tables in > 4 GiB memory
Input: atmel_mxt_ts - disable IRQ across suspend
scsi: lpfc: Fix locking on mailbox command completion
scsi: mpt3sas: Fix clear pending bit in ioctl status
scsi: lpfc: Fix discovery failures when target device connectivity bounces
* ANDROID: serdev: Fix platform device support
drivers/tty/serdev/core.c
Merge 4.14.161 into android-4.14
Linux 4.14.161
perf probe: Fix to show function entry line as probe-able
nbd: fix shutdown and recv work deadlock v2
mmc: sdhci-of-esdhc: fix P2020 errata handling
mmc: sdhci: Update the tuning failed messages to pr_debug level
mmc: sdhci-of-esdhc: Revert "mmc: sdhci-of-esdhc: add erratum A-009204 support"
powerpc/irq: fix stack overflow verification
x86/MCE/AMD: Allow Reserved types to be overwritten in smca_banks[]
x86/MCE/AMD: Do not use rdmsr_safe_on_cpu() in smca_configure()
KVM: arm64: Ensure 'params' is initialised when looking up sys register
* ext4: unlock on error in ext4_expand_extra_isize()
fs/ext4/inode.c
* ext4: check for directory entries too close to block end
fs/ext4/dir.c
* ext4: fix ext4_empty_dir() for directories with holes
fs/ext4/namei.c
staging: comedi: gsc_hpdi: check dma_alloc_coherent() return value
platform/x86: hp-wmi: Make buffer for HPWMI_FEATURE2_QUERY 128 bytes
intel_th: pci: Add Elkhart Lake SOC support
intel_th: pci: Add Comet Lake PCH-V support
USB: EHCI: Do not return -EPIPE when hub is disconnected
usbip: Fix error path of vhci_recv_ret_submit()
usbip: Fix receive error in vhci-hcd when using scatter-gather
btrfs: abort transaction after failed inode updates in create_subvol
btrfs: return error pointer from alloc_test_extent_buffer
s390/ftrace: fix endless recursion in function_graph tracer
* usb: xhci: Fix build warning seen with CONFIG_PM=n
drivers/usb/host/xhci-pci.c
mmc: mediatek: fix CMD_TA to 2 for MT8173 HS200/HS400 mode
Revert "mmc: sdhci: Fix incorrect switch to HS mode"
btrfs: don't prematurely free work in scrub_missing_raid56_worker()
btrfs: don't prematurely free work in reada_start_machine_worker()
* net: phy: initialise phydev speed and duplex sanely
drivers/net/phy/phy_device.c
mips: fix build when "48 bits virtual memory" is enabled
libtraceevent: Fix memory leakage in copy_filter_type
crypto: vmx - Avoid weird build failures
mac80211: consider QoS Null frames for STA_NULLFUNC_ACKED
crypto: sun4i-ss - Fix 64-bit size_t warnings on sun4i-ss-hash.c
crypto: sun4i-ss - Fix 64-bit size_t warnings
fbtft: Make sure string is NULL terminated
iwlwifi: check kasprintf() return value
x86/insn: Add some Intel instructions to the opcode map
spi: st-ssc4: add missed pm_runtime_disable
btrfs: don't prematurely free work in run_ordered_work()
btrfs: don't prematurely free work in end_workqueue_fn()
mmc: tmio: Add MMC_CAP_ERASE to allow erase/discard/trim requests
crypto: virtio - deal with unsupported input sizes
spi: tegra20-slink: add missed clk_unprepare
iwlwifi: mvm: fix unaligned read of rx_pkt_status
x86/crash: Add a forward declaration of struct kimage
* cpufreq: Register drivers only after CPU devices have been registered
drivers/cpufreq/cpufreq.c
parport: load lowlevel driver if ports not found
s390/disassembler: don't hide instruction addresses
ASoC: Intel: kbl_rt5663_rt5514_max98927: Add dmic format constraint
ASoC: rt5677: Mark reg RT5677_PWR_ANLG2 as volatile
spi: pxa2xx: Add missed security checks
EDAC/ghes: Fix grain calculation
media: si470x-i2c: add missed operations in remove
media: pvrusb2: Fix oops on tear-down when radio support is not present
fsi: core: Fix small accesses and unaligned offsets via sysfs
ath10k: fix get invalid tx rate for Mesh metric
perf probe: Filter out instances except for inlined subroutine and subprogram
perf probe: Skip end-of-sequence and non statement lines
perf probe: Fix to show calling lines of inlined functions
perf probe: Return a better scope DIE if there is no best scope
perf probe: Skip overlapped location on searching variables
perf parse: If pmu configuration fails free terms
drm/amdgpu: fix potential double drop fence reference
perf probe: Fix to probe a function which has no entry pc
libsubcmd: Use -O0 with DEBUG=1
perf probe: Fix to show inlined function callsite without entry_pc
perf probe: Fix to show ranges of variables in functions without entry_pc
perf probe: Fix to probe an inline function which has no entry pc
perf probe: Walk function lines in lexical blocks
perf probe: Fix to list probe event with correct line number
perf probe: Fix to find range-only function instance
rtlwifi: fix memory leak in rtl92c_set_fw_rsvdpagepkt()
* ALSA: timer: Limit max amount of slave instances
sound/core/timer.c
spi: img-spfi: fix potential double release
bnx2x: Fix PF-VF communication over multi-cos queues.
* rfkill: allocate static minor
include/linux/miscdevice.h
net/rfkill/core.c
* media: v4l2-core: fix touch support in v4l_g_fmt
drivers/media/v4l2-core/v4l2-ioctl.c
media: rcar_drif: fix a memory disclosure
ixgbe: protect TX timestamping from API misuse
pinctrl: amd: fix __iomem annotation in amd_gpio_irq_handler()
* Bluetooth: Fix advertising duplicated flags
net/bluetooth/hci_request.c
iio: dln2-adc: fix iio_triggered_buffer_postenable() position
pinctrl: sh-pfc: sh7734: Fix duplicate TCLK1_B
* loop: fix no-unmap write-zeroes request behavior
drivers/block/loop.c
libata: Ensure ata_port probe has completed before detach
s390/mm: add mm_pxd_folded() checks to pxd_free()
s390/time: ensure get_clock_monotonic() returns monotonic values
phy: qcom-usb-hs: Fix extcon double register after power cycle
* net: dsa: LAN9303: select REGMAP when LAN9303 enable
drivers/net/dsa/Kconfig
gpu: host1x: Allocate gather copy for host1x
RDMA/qedr: Fix memory leak in user qp and mr
net: phy: dp83867: enable robust auto-mdix
* arm64: psci: Reduce the waiting time for cpu_psci_cpu_kill()
arch/arm64/kernel/psci.c
x86/ioapic: Prevent inconsistent state when moving an interrupt
rtl8xxxu: fix RTL8723BU connection failure issue after warm reboot
drm/gma500: fix memory disclosures due to uninitialized bytes
x86/mce: Lower throttling MCE messages' priority to warning
* Bluetooth: hci_core: fix init for HCI_USER_CHANNEL
net/bluetooth/hci_core.c
* Bluetooth: missed cpu_to_le16 conversion in hci_init4_req
net/bluetooth/hci_core.c
iio: adc: max1027: Reset the device at probe time
* usb: usbfs: Suppress problematic bind and unbind uevents.
drivers/usb/core/devio.c
perf report: Add warning when libunwind not compiled in
perf test: Report failure for mmap events
drm/bridge: dw-hdmi: Restore audio when setting a mode
x86/mm: Use the correct function type for native_set_fixmap()
extcon: sm5502: Reset registers during initialization
media: ti-vpe: vpe: fix a v4l2-compliance failure about invalid sizeimage
media: ti-vpe: vpe: ensure buffers are cleaned up properly in abort cases
media: ti-vpe: vpe: fix a v4l2-compliance failure causing a kernel panic
media: ti-vpe: vpe: Make sure YUYV is set as default format
media: ti-vpe: vpe: fix a v4l2-compliance failure about frame sequence number
media: ti-vpe: vpe: fix a v4l2-compliance warning about invalid pixel format
media: ti-vpe: vpe: Fix Motion Vector vpdma stride
media: cx88: Fix some error handling path in 'cx8800_initdev()'
mwifiex: pcie: Fix memory leak in mwifiex_pcie_init_evt_ring
block: Fix writeback throttling W=1 compiler warnings
samples: pktgen: fix proc_cmd command result check logic
drm/bridge: dw-hdmi: Refuse DDC/CI transfers on the internal I2C controller
media: cec-funcs.h: add status_req checks
media: flexcop-usb: fix NULL-ptr deref in flexcop_usb_transfer_init()
regulator: max8907: Fix the usage of uninitialized variable in max8907_regulator_probe()
hwrng: omap3-rom - Call clk_disable_unprepare() on exit only if not idled
usb: renesas_usbhs: add suspend event support in gadget mode
selftests/bpf: Correct path to include msg + path
* pinctrl: devicetree: Avoid taking direct reference to device name string
drivers/pinctrl/devicetree.c
ath10k: fix offchannel tx failure when no ath10k_mac_tx_frm_has_freq
media: venus: core: Fix msm8996 frequency table
tools/power/cpupower: Fix initializer override in hsw_ext_cstates
media: ov6650: Fix stored crop rectangle not in sync with hardware
media: ov6650: Fix stored frame format not in sync with hardware
media: i2c: ov2659: Fix missing 720p register config
media: ov6650: Fix crop rectangle alignment not passed back
media: i2c: ov2659: fix s_stream return value
media: am437x-vpfe: Setting STD to current value is not an error
IB/iser: bound protection_sg size by data_sg size
libertas: fix a potential NULL pointer dereference
rtlwifi: prevent memory leak in rtl_usb_probe
staging: rtl8188eu: fix possible null dereference
staging: rtl8192u: fix multiple memory leaks on error path
* spi: Add call to spi_slave_abort() function when spidev driver is released
drivers/spi/spidev.c
iio: light: bh1750: Resolve compiler warning and make code more readable
drm/bridge: analogix-anx78xx: silence -EPROBE_DEFER warnings
* drm: mst: Fix query_payload ack reply struct
include/drm/drm_dp_mst_helper.h
ALSA: hda/ca0132 - Avoid endless loop
ALSA: hda/ca0132 - Keep power on during processing DSP response
* ALSA: pcm: Avoid possible info leaks from PCM stream buffers
sound/core/pcm_native.c
Btrfs: fix removal logic of the tree mod log that leads to use-after-free issues
btrfs: handle ENOENT in btrfs_uuid_tree_iterate
btrfs: do not leak reloc root if we fail to read the fs root
btrfs: skip log replay on orphaned roots
btrfs: do not call synchronize_srcu() in inode_tree_del
btrfs: don't double lock the subvol_sem for rename exchange
sctp: fully initialize v4 addr in some functions
qede: Fix multicast mac configuration
net: usb: lan78xx: Fix suspend/resume PHY register access error
net: qlogic: Fix error paths in ql_alloc_large_buffers()
net: nfc: nci: fix a possible sleep-in-atomic-context bug in nci_uart_tty_receive()
net: hisilicon: Fix a BUG trigered by wrong bytes_compl
* net: dst: Force 4-byte alignment of dst_metrics
include/net/dst.h
* mod_devicetable: fix PHY module format
include/linux/mod_devicetable.h
fjes: fix missed check in fjes_acpi_add
* af_packet: set defaule value for tmo
net/packet/af_packet.c
ANDROID: cuttlefish_defconfig: Disable TRANSPARENT_HUGEPAGE
Change-Id: If5fe1dee627f430a68a27040c29f1015fd7c5ed1
Signed-off-by: lucaswei <lucaswei@google.com>
diff --git a/Documentation/dev-tools/kcov.rst b/Documentation/dev-tools/kcov.rst
index 44886c9..f254173b 100644
--- a/Documentation/dev-tools/kcov.rst
+++ b/Documentation/dev-tools/kcov.rst
@@ -12,19 +12,31 @@
and instrumentation of some inherently non-deterministic parts of kernel is
disabled (e.g. scheduler, locking).
-Usage
------
+kcov is also able to collect comparison operands from the instrumented code
+(this feature currently requires that the kernel is compiled with clang).
+
+Prerequisites
+-------------
Configure the kernel with::
CONFIG_KCOV=y
CONFIG_KCOV requires gcc built on revision 231296 or later.
+
+If the comparison operands need to be collected, set::
+
+ CONFIG_KCOV_ENABLE_COMPARISONS=y
+
Profiling data will only become accessible once debugfs has been mounted::
mount -t debugfs none /sys/kernel/debug
-The following program demonstrates kcov usage from within a test program:
+Coverage collection
+-------------------
+
+The following program demonstrates coverage collection from within a test
+program using kcov:
.. code-block:: c
@@ -44,6 +56,9 @@
#define KCOV_DISABLE _IO('c', 101)
#define COVER_SIZE (64<<10)
+ #define KCOV_TRACE_PC 0
+ #define KCOV_TRACE_CMP 1
+
int main(int argc, char **argv)
{
int fd;
@@ -64,7 +79,7 @@
if ((void*)cover == MAP_FAILED)
perror("mmap"), exit(1);
/* Enable coverage collection on the current thread. */
- if (ioctl(fd, KCOV_ENABLE, 0))
+ if (ioctl(fd, KCOV_ENABLE, KCOV_TRACE_PC))
perror("ioctl"), exit(1);
/* Reset coverage from the tail of the ioctl() call. */
__atomic_store_n(&cover[0], 0, __ATOMIC_RELAXED);
@@ -111,3 +126,208 @@
That is, a parent process opens /sys/kernel/debug/kcov, enables trace mode,
mmaps coverage buffer and then forks child processes in a loop. Child processes
only need to enable coverage (disable happens automatically on thread end).
+
+Comparison operands collection
+------------------------------
+
+Comparison operands collection is similar to coverage collection:
+
+.. code-block:: c
+
+ /* Same includes and defines as above. */
+
+ /* Number of 64-bit words per record. */
+ #define KCOV_WORDS_PER_CMP 4
+
+ /*
+ * The format for the types of collected comparisons.
+ *
+ * Bit 0 shows whether one of the arguments is a compile-time constant.
+ * Bits 1 & 2 contain log2 of the argument size, up to 8 bytes.
+ */
+
+ #define KCOV_CMP_CONST (1 << 0)
+ #define KCOV_CMP_SIZE(n) ((n) << 1)
+ #define KCOV_CMP_MASK KCOV_CMP_SIZE(3)
+
+ int main(int argc, char **argv)
+ {
+ int fd;
+ uint64_t *cover, type, arg1, arg2, is_const, size;
+ unsigned long n, i;
+
+ fd = open("/sys/kernel/debug/kcov", O_RDWR);
+ if (fd == -1)
+ perror("open"), exit(1);
+ if (ioctl(fd, KCOV_INIT_TRACE, COVER_SIZE))
+ perror("ioctl"), exit(1);
+ /*
+ * Note that the buffer pointer is of type uint64_t*, because all
+ * the comparison operands are promoted to uint64_t.
+ */
+ cover = (uint64_t *)mmap(NULL, COVER_SIZE * sizeof(unsigned long),
+ PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ if ((void*)cover == MAP_FAILED)
+ perror("mmap"), exit(1);
+ /* Note KCOV_TRACE_CMP instead of KCOV_TRACE_PC. */
+ if (ioctl(fd, KCOV_ENABLE, KCOV_TRACE_CMP))
+ perror("ioctl"), exit(1);
+ __atomic_store_n(&cover[0], 0, __ATOMIC_RELAXED);
+ read(-1, NULL, 0);
+ /* Read number of comparisons collected. */
+ n = __atomic_load_n(&cover[0], __ATOMIC_RELAXED);
+ for (i = 0; i < n; i++) {
+ type = cover[i * KCOV_WORDS_PER_CMP + 1];
+ /* arg1 and arg2 - operands of the comparison. */
+ arg1 = cover[i * KCOV_WORDS_PER_CMP + 2];
+ arg2 = cover[i * KCOV_WORDS_PER_CMP + 3];
+ /* ip - caller address. */
+ ip = cover[i * KCOV_WORDS_PER_CMP + 4];
+ /* size of the operands. */
+ size = 1 << ((type & KCOV_CMP_MASK) >> 1);
+ /* is_const - true if either operand is a compile-time constant.*/
+ is_const = type & KCOV_CMP_CONST;
+ printf("ip: 0x%lx type: 0x%lx, arg1: 0x%lx, arg2: 0x%lx, "
+ "size: %lu, %s\n",
+ ip, type, arg1, arg2, size,
+ is_const ? "const" : "non-const");
+ }
+ if (ioctl(fd, KCOV_DISABLE, 0))
+ perror("ioctl"), exit(1);
+ /* Free resources. */
+ if (munmap(cover, COVER_SIZE * sizeof(unsigned long)))
+ perror("munmap"), exit(1);
+ if (close(fd))
+ perror("close"), exit(1);
+ return 0;
+ }
+
+Note that the kcov modes (coverage collection or comparison operands) are
+mutually exclusive.
+
+Remote coverage collection
+--------------------------
+
+With KCOV_ENABLE coverage is collected only for syscalls that are issued
+from the current process. With KCOV_REMOTE_ENABLE it's possible to collect
+coverage for arbitrary parts of the kernel code, provided that those parts
+are annotated with kcov_remote_start()/kcov_remote_stop().
+
+This allows to collect coverage from two types of kernel background
+threads: the global ones, that are spawned during kernel boot in a limited
+number of instances (e.g. one USB hub_event() worker thread is spawned per
+USB HCD); and the local ones, that are spawned when a user interacts with
+some kernel interface (e.g. vhost workers).
+
+To enable collecting coverage from a global background thread, a unique
+global handle must be assigned and passed to the corresponding
+kcov_remote_start() call. Then a userspace process can pass a list of such
+handles to the KCOV_REMOTE_ENABLE ioctl in the handles array field of the
+kcov_remote_arg struct. This will attach the used kcov device to the code
+sections, that are referenced by those handles.
+
+Since there might be many local background threads spawned from different
+userspace processes, we can't use a single global handle per annotation.
+Instead, the userspace process passes a non-zero handle through the
+common_handle field of the kcov_remote_arg struct. This common handle gets
+saved to the kcov_handle field in the current task_struct and needs to be
+passed to the newly spawned threads via custom annotations. Those threads
+should in turn be annotated with kcov_remote_start()/kcov_remote_stop().
+
+Internally kcov stores handles as u64 integers. The top byte of a handle
+is used to denote the id of a subsystem that this handle belongs to, and
+the lower 4 bytes are used to denote the id of a thread instance within
+that subsystem. A reserved value 0 is used as a subsystem id for common
+handles as they don't belong to a particular subsystem. The bytes 4-7 are
+currently reserved and must be zero. In the future the number of bytes
+used for the subsystem or handle ids might be increased.
+
+When a particular userspace proccess collects coverage by via a common
+handle, kcov will collect coverage for each code section that is annotated
+to use the common handle obtained as kcov_handle from the current
+task_struct. However non common handles allow to collect coverage
+selectively from different subsystems.
+
+.. code-block:: c
+
+ struct kcov_remote_arg {
+ __u32 trace_mode;
+ __u32 area_size;
+ __u32 num_handles;
+ __aligned_u64 common_handle;
+ __aligned_u64 handles[0];
+ };
+
+ #define KCOV_INIT_TRACE _IOR('c', 1, unsigned long)
+ #define KCOV_DISABLE _IO('c', 101)
+ #define KCOV_REMOTE_ENABLE _IOW('c', 102, struct kcov_remote_arg)
+
+ #define COVER_SIZE (64 << 10)
+
+ #define KCOV_TRACE_PC 0
+
+ #define KCOV_SUBSYSTEM_COMMON (0x00ull << 56)
+ #define KCOV_SUBSYSTEM_USB (0x01ull << 56)
+
+ #define KCOV_SUBSYSTEM_MASK (0xffull << 56)
+ #define KCOV_INSTANCE_MASK (0xffffffffull)
+
+ static inline __u64 kcov_remote_handle(__u64 subsys, __u64 inst)
+ {
+ if (subsys & ~KCOV_SUBSYSTEM_MASK || inst & ~KCOV_INSTANCE_MASK)
+ return 0;
+ return subsys | inst;
+ }
+
+ #define KCOV_COMMON_ID 0x42
+ #define KCOV_USB_BUS_NUM 1
+
+ int main(int argc, char **argv)
+ {
+ int fd;
+ unsigned long *cover, n, i;
+ struct kcov_remote_arg *arg;
+
+ fd = open("/sys/kernel/debug/kcov", O_RDWR);
+ if (fd == -1)
+ perror("open"), exit(1);
+ if (ioctl(fd, KCOV_INIT_TRACE, COVER_SIZE))
+ perror("ioctl"), exit(1);
+ cover = (unsigned long*)mmap(NULL, COVER_SIZE * sizeof(unsigned long),
+ PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ if ((void*)cover == MAP_FAILED)
+ perror("mmap"), exit(1);
+
+ /* Enable coverage collection via common handle and from USB bus #1. */
+ arg = calloc(1, sizeof(*arg) + sizeof(uint64_t));
+ if (!arg)
+ perror("calloc"), exit(1);
+ arg->trace_mode = KCOV_TRACE_PC;
+ arg->area_size = COVER_SIZE;
+ arg->num_handles = 1;
+ arg->common_handle = kcov_remote_handle(KCOV_SUBSYSTEM_COMMON,
+ KCOV_COMMON_ID);
+ arg->handles[0] = kcov_remote_handle(KCOV_SUBSYSTEM_USB,
+ KCOV_USB_BUS_NUM);
+ if (ioctl(fd, KCOV_REMOTE_ENABLE, arg))
+ perror("ioctl"), free(arg), exit(1);
+ free(arg);
+
+ /*
+ * Here the user needs to trigger execution of a kernel code section
+ * that is either annotated with the common handle, or to trigger some
+ * activity on USB bus #1.
+ */
+ sleep(2);
+
+ n = __atomic_load_n(&cover[0], __ATOMIC_RELAXED);
+ for (i = 0; i < n; i++)
+ printf("0x%lx\n", cover[i + 1]);
+ if (ioctl(fd, KCOV_DISABLE, 0))
+ perror("ioctl"), exit(1);
+ if (munmap(cover, COVER_SIZE * sizeof(unsigned long)))
+ perror("munmap"), exit(1);
+ if (close(fd))
+ perror("close"), exit(1);
+ return 0;
+ }
diff --git a/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt b/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt
index e96e085..83f6c6a 100644
--- a/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt
+++ b/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt
@@ -46,7 +46,7 @@
Example (R-Car H3):
usb2_clksel: clock-controller@e6590630 {
- compatible = "renesas,r8a77950-rcar-usb2-clock-sel",
+ compatible = "renesas,r8a7795-rcar-usb2-clock-sel",
"renesas,rcar-gen3-usb2-clock-sel";
reg = <0 0xe6590630 0 0x02>;
clocks = <&cpg CPG_MOD 703>, <&usb_extal>, <&usb_xtal>;
diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst
index 8a0700a..471a511 100644
--- a/Documentation/filesystems/fscrypt.rst
+++ b/Documentation/filesystems/fscrypt.rst
@@ -256,13 +256,8 @@
the master keys may be wrapped in userspace, e.g. as is done by the
`fscrypt <https://github.com/google/fscrypt>`_ tool.
-Including the inode number in the IVs was considered. However, it was
-rejected as it would have prevented ext4 filesystems from being
-resized, and by itself still wouldn't have been sufficient to prevent
-the same key from being directly reused for both XTS and CTS-CBC.
-
-DIRECT_KEY and per-mode keys
-----------------------------
+DIRECT_KEY policies
+-------------------
The Adiantum encryption mode (see `Encryption modes and usage`_) is
suitable for both contents and filenames encryption, and it accepts
@@ -285,6 +280,21 @@
key derived using the KDF. Users may use the same master key for
other v2 encryption policies.
+IV_INO_LBLK_64 policies
+-----------------------
+
+When FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 is set in the fscrypt policy,
+the encryption keys are derived from the master key, encryption mode
+number, and filesystem UUID. This normally results in all files
+protected by the same master key sharing a single contents encryption
+key and a single filenames encryption key. To still encrypt different
+files' data differently, inode numbers are included in the IVs.
+Consequently, shrinking the filesystem may not be allowed.
+
+This format is optimized for use with inline encryption hardware
+compliant with the UFS or eMMC standards, which support only 64 IV
+bits per I/O request and may have only a small number of keyslots.
+
Key identifiers
---------------
@@ -308,8 +318,9 @@
AES-128-CBC was added only for low-powered embedded devices with
crypto accelerators such as CAAM or CESA that do not support XTS. To
-use AES-128-CBC, CONFIG_CRYPTO_SHA256 (or another SHA-256
-implementation) must be enabled so that ESSIV can be used.
+use AES-128-CBC, CONFIG_CRYPTO_ESSIV and CONFIG_CRYPTO_SHA256 (or
+another SHA-256 implementation) must be enabled so that ESSIV can be
+used.
Adiantum is a (primarily) stream cipher-based mode that is fast even
on CPUs without dedicated crypto instructions. It's also a true
@@ -341,10 +352,16 @@
is encrypted with AES-256 where the AES-256 key is the SHA-256 hash
of the file's data encryption key.
-- In the "direct key" configuration (FSCRYPT_POLICY_FLAG_DIRECT_KEY
- set in the fscrypt_policy), the file's nonce is also appended to the
- IV. Currently this is only allowed with the Adiantum encryption
- mode.
+- With `DIRECT_KEY policies`_, the file's nonce is appended to the IV.
+ Currently this is only allowed with the Adiantum encryption mode.
+
+- With `IV_INO_LBLK_64 policies`_, the logical block number is limited
+ to 32 bits and is placed in bits 0-31 of the IV. The inode number
+ (which is also limited to 32 bits) is placed in bits 32-63.
+
+Note that because file logical block numbers are included in the IVs,
+filesystems must enforce that blocks are never shifted around within
+encrypted files, e.g. via "collapse range" or "insert range".
Filenames encryption
--------------------
@@ -354,10 +371,10 @@
filenames of up to 255 bytes, the same IV is used for every filename
in a directory.
-However, each encrypted directory still uses a unique key; or
-alternatively (for the "direct key" configuration) has the file's
-nonce included in the IVs. Thus, IV reuse is limited to within a
-single directory.
+However, each encrypted directory still uses a unique key, or
+alternatively has the file's nonce (for `DIRECT_KEY policies`_) or
+inode number (for `IV_INO_LBLK_64 policies`_) included in the IVs.
+Thus, IV reuse is limited to within a single directory.
With CTS-CBC, the IV reuse means that when the plaintext filenames
share a common prefix at least as long as the cipher block size (16
@@ -431,12 +448,15 @@
(1) for ``contents_encryption_mode`` and FSCRYPT_MODE_AES_256_CTS
(4) for ``filenames_encryption_mode``.
-- ``flags`` must contain a value from ``<linux/fscrypt.h>`` which
- identifies the amount of NUL-padding to use when encrypting
- filenames. If unsure, use FSCRYPT_POLICY_FLAGS_PAD_32 (0x3).
- Additionally, if the encryption modes are both
- FSCRYPT_MODE_ADIANTUM, this can contain
- FSCRYPT_POLICY_FLAG_DIRECT_KEY; see `DIRECT_KEY and per-mode keys`_.
+- ``flags`` contains optional flags from ``<linux/fscrypt.h>``:
+
+ - FSCRYPT_POLICY_FLAGS_PAD_*: The amount of NUL padding to use when
+ encrypting filenames. If unsure, use FSCRYPT_POLICY_FLAGS_PAD_32
+ (0x3).
+ - FSCRYPT_POLICY_FLAG_DIRECT_KEY: See `DIRECT_KEY policies`_.
+ - FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64: See `IV_INO_LBLK_64
+ policies`_. This is mutually exclusive with DIRECT_KEY and is not
+ supported on v1 policies.
- For v2 encryption policies, ``__reserved`` must be zeroed.
@@ -1089,7 +1109,7 @@
context structs also contain a nonce. The nonce is randomly generated
by the kernel and is used as KDF input or as a tweak to cause
different files to be encrypted differently; see `Per-file keys`_ and
-`DIRECT_KEY and per-mode keys`_.
+`DIRECT_KEY policies`_.
Data path changes
-----------------
diff --git a/Documentation/filesystems/fsverity.rst b/Documentation/filesystems/fsverity.rst
index 42a0b6d..a95536b 100644
--- a/Documentation/filesystems/fsverity.rst
+++ b/Documentation/filesystems/fsverity.rst
@@ -226,6 +226,14 @@
The verity flag is not settable via FS_IOC_SETFLAGS. You must use
FS_IOC_ENABLE_VERITY instead, since parameters must be provided.
+statx
+-----
+
+Since Linux v5.5, the statx() system call sets STATX_ATTR_VERITY if
+the file has fs-verity enabled. This can perform better than
+FS_IOC_GETFLAGS and FS_IOC_MEASURE_VERITY because it doesn't require
+opening the file, and opening verity files can be expensive.
+
Accessing verity files
======================
@@ -398,7 +406,7 @@
ext4
----
-ext4 supports fs-verity since Linux TODO and e2fsprogs v1.45.2.
+ext4 supports fs-verity since Linux v5.4 and e2fsprogs v1.45.2.
To create verity files on an ext4 filesystem, the filesystem must have
been formatted with ``-O verity`` or had ``tune2fs -O verity`` run on
@@ -434,7 +442,7 @@
f2fs
----
-f2fs supports fs-verity since Linux TODO and f2fs-tools v1.11.0.
+f2fs supports fs-verity since Linux v5.4 and f2fs-tools v1.11.0.
To create verity files on an f2fs filesystem, the filesystem must have
been formatted with ``-O verity``.
diff --git a/Makefile b/Makefile
index 336801b..e2a4f3f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 14
-SUBLEVEL = 160
+SUBLEVEL = 165
EXTRAVERSION =
NAME = Petit Gorille
@@ -659,8 +659,7 @@
CFLAGS_GCOV := -fprofile-arcs -ftest-coverage \
$(call cc-option,-fno-tree-loop-im) \
$(call cc-disable-warning,maybe-uninitialized,)
-CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,)
-export CFLAGS_GCOV CFLAGS_KCOV
+export CFLAGS_GCOV
# Make toolchain changes before including arch/$(SRCARCH)/Makefile to ensure
# ar/cc/ld-* macros return correct values.
@@ -719,6 +718,7 @@
KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
endif
+include scripts/Makefile.kcov
include scripts/Makefile.gcc-plugins
ifdef CONFIG_READABLE_ASM
diff --git a/arch/arm/boot/compressed/libfdt_env.h b/arch/arm/boot/compressed/libfdt_env.h
index b36c028..6a0f1f5 100644
--- a/arch/arm/boot/compressed/libfdt_env.h
+++ b/arch/arm/boot/compressed/libfdt_env.h
@@ -2,11 +2,13 @@
#ifndef _ARM_LIBFDT_ENV_H
#define _ARM_LIBFDT_ENV_H
+#include <linux/limits.h>
#include <linux/types.h>
#include <linux/string.h>
#include <asm/byteorder.h>
-#define INT_MAX ((int)(~0U>>1))
+#define INT32_MAX S32_MAX
+#define UINT32_MAX U32_MAX
typedef __be16 fdt16_t;
typedef __be32 fdt32_t;
diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts
index afb8eb0..051823b 100644
--- a/arch/arm/boot/dts/am437x-gp-evm.dts
+++ b/arch/arm/boot/dts/am437x-gp-evm.dts
@@ -83,7 +83,7 @@
};
lcd0: display {
- compatible = "osddisplays,osd057T0559-34ts", "panel-dpi";
+ compatible = "osddisplays,osd070t1718-19ts", "panel-dpi";
label = "lcd";
panel-timing {
diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
index 081fa68..c4279b0 100644
--- a/arch/arm/boot/dts/am43x-epos-evm.dts
+++ b/arch/arm/boot/dts/am43x-epos-evm.dts
@@ -45,7 +45,7 @@
};
lcd0: display {
- compatible = "osddisplays,osd057T0559-34ts", "panel-dpi";
+ compatible = "osddisplays,osd070t1718-19ts", "panel-dpi";
label = "lcd";
panel-timing {
diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi
index 8b2c65cd..b822952 100644
--- a/arch/arm/boot/dts/bcm-cygnus.dtsi
+++ b/arch/arm/boot/dts/bcm-cygnus.dtsi
@@ -165,8 +165,8 @@
mdio: mdio@18002000 {
compatible = "brcm,iproc-mdio";
reg = <0x18002000 0x8>;
- #size-cells = <1>;
- #address-cells = <0>;
+ #size-cells = <0>;
+ #address-cells = <1>;
status = "disabled";
gphy0: ethernet-phy@0 {
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index 4745e3c..fdb018e 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -38,7 +38,7 @@
trips {
cpu-crit {
- temperature = <80000>;
+ temperature = <90000>;
hysteresis = <0>;
type = "critical";
};
diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c
index fe48852..635b0d5 100644
--- a/arch/arm/mach-vexpress/spc.c
+++ b/arch/arm/mach-vexpress/spc.c
@@ -555,8 +555,9 @@ static struct clk *ve_spc_clk_register(struct device *cpu_dev)
static int __init ve_spc_clk_init(void)
{
- int cpu;
+ int cpu, cluster;
struct clk *clk;
+ bool init_opp_table[MAX_CLUSTERS] = { false };
if (!info)
return 0; /* Continue only if SPC is initialised */
@@ -582,8 +583,17 @@ static int __init ve_spc_clk_init(void)
continue;
}
+ cluster = topology_physical_package_id(cpu_dev->id);
+ if (init_opp_table[cluster])
+ continue;
+
if (ve_init_opp_table(cpu_dev))
pr_warn("failed to initialise cpu%d opp table\n", cpu);
+ else if (dev_pm_opp_set_sharing_cpus(cpu_dev,
+ topology_core_cpumask(cpu_dev->id)))
+ pr_warn("failed to mark OPPs shared for cpu%d\n", cpu);
+ else
+ init_opp_table[cluster] = true;
}
platform_device_register_simple("vexpress-spc-cpufreq", -1, NULL, 0);
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
index 4ea23df..5da604e 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
@@ -295,7 +295,7 @@
};
&usb0_phy {
- status = "okay";
+ status = "disabled";
phy-supply = <&usb_otg_pwr>;
};
@@ -305,7 +305,7 @@
};
&usb0 {
- status = "okay";
+ status = "disabled";
};
&usb1 {
diff --git a/arch/arm64/configs/cuttlefish_defconfig b/arch/arm64/configs/cuttlefish_defconfig
index 3346572..d22e5b8 100644
--- a/arch/arm64/configs/cuttlefish_defconfig
+++ b/arch/arm64/configs/cuttlefish_defconfig
@@ -52,7 +52,6 @@
CONFIG_HZ_100=y
# CONFIG_SPARSEMEM_VMEMMAP is not set
CONFIG_KSM=y
-CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_ZSMALLOC=y
CONFIG_SECCOMP=y
CONFIG_PARAVIRT=y
@@ -151,6 +150,7 @@
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
CONFIG_NETFILTER_XT_MATCH_QUOTA=y
CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
CONFIG_NETFILTER_XT_MATCH_SOCKET=y
CONFIG_NETFILTER_XT_MATCH_STATE=y
CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
@@ -369,6 +369,7 @@
CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
CONFIG_HID_SPEEDLINK=y
+CONFIG_HID_STEAM=y
CONFIG_HID_SUNPLUS=y
CONFIG_HID_GREENASIA=y
CONFIG_GREENASIA_FF=y
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 20e4573..26efe25 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -76,13 +76,12 @@
#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
-#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
#define __P010 PAGE_READONLY
#define __P011 PAGE_READONLY
-#define __P100 PAGE_EXECONLY
+#define __P100 PAGE_READONLY_EXEC
#define __P101 PAGE_READONLY_EXEC
#define __P110 PAGE_READONLY_EXEC
#define __P111 PAGE_READONLY_EXEC
@@ -91,7 +90,7 @@
#define __S001 PAGE_READONLY
#define __S010 PAGE_SHARED
#define __S011 PAGE_SHARED
-#define __S100 PAGE_EXECONLY
+#define __S100 PAGE_READONLY_EXEC
#define __S101 PAGE_READONLY_EXEC
#define __S110 PAGE_SHARED_EXEC
#define __S111 PAGE_SHARED_EXEC
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index f9c88ae..332a180 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -91,12 +91,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
-/*
- * Execute-only user mappings do not have the PTE_USER bit set. All valid
- * kernel mappings have the PTE_UXN bit set.
- */
#define pte_valid_not_user(pte) \
- ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
+ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
#define pte_valid_young(pte) \
((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
#define pte_valid_user(pte) \
@@ -112,8 +108,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
/*
* p??_access_permitted() is true for valid user mappings (subject to the
- * write permission check) other than user execute-only which do not have the
- * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set.
+ * write permission check). PROT_NONE mappings do not have the PTE_VALID bit
+ * set.
*/
#define pte_access_permitted(pte, write) \
(pte_valid_user(pte) && (!(write) || pte_write(pte)))
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 8d07901..3fddbfa 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -806,13 +806,6 @@ static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int _
MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
}
-#ifdef CONFIG_ARM64_VHE
-static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
-{
- return is_kernel_in_hyp_mode();
-}
-#endif /* CONFIG_ARM64_VHE */
-
static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry,
int __unused)
{
@@ -1024,6 +1017,11 @@ static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap,
#endif
#ifdef CONFIG_ARM64_VHE
+static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
+{
+ return is_kernel_in_hyp_mode();
+}
+
static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
{
/*
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 462cc76..ffa3349 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -85,7 +85,8 @@ static void cpu_psci_cpu_die(unsigned int cpu)
static int cpu_psci_cpu_kill(unsigned int cpu)
{
- int err, i;
+ int err;
+ unsigned long start, end;
if (!psci_ops.affinity_info)
return 0;
@@ -95,16 +96,18 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
* while it is dying. So, try again a few times.
*/
- for (i = 0; i < 10; i++) {
+ start = jiffies;
+ end = start + msecs_to_jiffies(100);
+ do {
err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) {
- pr_debug("CPU%d killed.\n", cpu);
+ pr_debug("CPU%d killed (polled %d ms)\n", cpu,
+ jiffies_to_msecs(jiffies - start));
return 0;
}
- msleep(10);
- pr_debug("Retrying again to check for CPU kill\n");
- }
+ usleep_range(100, 1000);
+ } while (time_before(jiffies, end));
pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n",
cpu, err);
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index cfbf7bd..32ae5c9 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1785,8 +1785,11 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
return NULL;
+ if (!index_to_params(id, ¶ms))
+ return NULL;
+
table = get_target_table(vcpu->arch.target, true, &num);
- r = find_reg_by_id(id, ¶ms, table, num);
+ r = find_reg(¶ms, table, num);
if (!r)
r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 2acae7e..b6d3a04 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -419,7 +419,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
struct task_struct *tsk;
struct mm_struct *mm;
int fault, sig, code, major = 0;
- unsigned long vm_flags = VM_READ | VM_WRITE;
+ unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
bool was_major = false;
ktime_t event_ts;
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index a2252c2..d0b9912 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -18,10 +18,12 @@
#include <asm/fixmap.h>
#define __ARCH_USE_5LEVEL_HACK
-#if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48)
+#if CONFIG_PGTABLE_LEVELS == 2
#include <asm-generic/pgtable-nopmd.h>
-#elif !(defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_MIPS_VA_BITS_48))
+#elif CONFIG_PGTABLE_LEVELS == 3
#include <asm-generic/pgtable-nopud.h>
+#else
+#include <asm-generic/5level-fixup.h>
#endif
/*
@@ -222,6 +224,9 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
return pgd_val(pgd);
}
+#define pgd_phys(pgd) virt_to_phys((void *)pgd_val(pgd))
+#define pgd_page(pgd) (pfn_to_page(pgd_phys(pgd) >> PAGE_SHIFT))
+
static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
{
return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 5e8927f..a0338db 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -52,8 +52,26 @@ struct thread_info {
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
-/* How to get the thread information struct from C. */
+/*
+ * A pointer to the struct thread_info for the currently executing thread is
+ * held in register $28/$gp.
+ *
+ * We declare __current_thread_info as a global register variable rather than a
+ * local register variable within current_thread_info() because clang doesn't
+ * support explicit local register variables.
+ *
+ * When building the VDSO we take care not to declare the global register
+ * variable because this causes GCC to not preserve the value of $28/$gp in
+ * functions that change its value (which is common in the PIC VDSO when
+ * accessing the GOT). Since the VDSO shouldn't be accessing
+ * __current_thread_info anyway we declare it extern in order to cause a link
+ * failure if it's referenced.
+ */
+#ifdef __VDSO__
+extern struct thread_info *__current_thread_info;
+#else
register struct thread_info *__current_thread_info __asm__("$28");
+#endif
static inline struct thread_info *current_thread_info(void)
{
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
index 42faa95..57a7a9d 100644
--- a/arch/mips/net/ebpf_jit.c
+++ b/arch/mips/net/ebpf_jit.c
@@ -612,6 +612,7 @@ static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value)
static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
{
int off, b_off;
+ int tcc_reg;
ctx->flags |= EBPF_SEEN_TC;
/*
@@ -624,14 +625,14 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
b_off = b_imm(this_idx + 1, ctx);
emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off);
/*
- * if (--TCC < 0)
+ * if (TCC-- < 0)
* goto out;
*/
/* Delay slot */
- emit_instr(ctx, daddiu, MIPS_R_T5,
- (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4, -1);
+ tcc_reg = (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4;
+ emit_instr(ctx, daddiu, MIPS_R_T5, tcc_reg, -1);
b_off = b_imm(this_idx + 1, ctx);
- emit_instr(ctx, bltz, MIPS_R_T5, b_off);
+ emit_instr(ctx, bltz, tcc_reg, b_off);
/*
* prog = array->ptrs[index];
* if (prog == NULL)
diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h
index f627c37..ab5c215 100644
--- a/arch/parisc/include/asm/cmpxchg.h
+++ b/arch/parisc/include/asm/cmpxchg.h
@@ -44,8 +44,14 @@ __xchg(unsigned long x, __volatile__ void *ptr, int size)
** if (((unsigned long)p & 0xf) == 0)
** return __ldcw(p);
*/
-#define xchg(ptr, x) \
- ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
+#define xchg(ptr, x) \
+({ \
+ __typeof__(*(ptr)) __ret; \
+ __typeof__(*(ptr)) _x_ = (x); \
+ __ret = (__typeof__(*(ptr))) \
+ __xchg((unsigned long)_x_, (ptr), sizeof(*(ptr))); \
+ __ret; \
+})
/* bug catcher for when unsupported size is used - won't link */
extern void __cmpxchg_called_with_bad_pointer(void);
diff --git a/arch/powerpc/boot/libfdt_env.h b/arch/powerpc/boot/libfdt_env.h
index 39155d3..ac5d3c9 100644
--- a/arch/powerpc/boot/libfdt_env.h
+++ b/arch/powerpc/boot/libfdt_env.h
@@ -6,6 +6,8 @@
#include <string.h>
#define INT_MAX ((int)(~0U>>1))
+#define UINT32_MAX ((u32)~0U)
+#define INT32_MAX ((s32)(UINT32_MAX >> 1))
#include "of.h"
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 0ce8b0e..207ba53 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -561,8 +561,6 @@ void __do_irq(struct pt_regs *regs)
trace_irq_entry(regs);
- check_stack_overflow();
-
/*
* Query the platform PIC for the interrupt & ack it.
*
@@ -594,6 +592,8 @@ void do_IRQ(struct pt_regs *regs)
irqtp = hardirq_ctx[raw_smp_processor_id()];
sirqtp = softirq_ctx[raw_smp_processor_id()];
+ check_stack_overflow();
+
/* Already there ? */
if (unlikely(curtp == irqtp || curtp == sirqtp)) {
__do_irq(regs);
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
index f5d6541b..b3f540c 100644
--- a/arch/powerpc/kernel/security.c
+++ b/arch/powerpc/kernel/security.c
@@ -134,32 +134,33 @@ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, cha
thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV);
- if (rfi_flush || thread_priv) {
+ if (rfi_flush) {
struct seq_buf s;
seq_buf_init(&s, buf, PAGE_SIZE - 1);
- seq_buf_printf(&s, "Mitigation: ");
-
- if (rfi_flush)
- seq_buf_printf(&s, "RFI Flush");
-
- if (rfi_flush && thread_priv)
- seq_buf_printf(&s, ", ");
-
+ seq_buf_printf(&s, "Mitigation: RFI Flush");
if (thread_priv)
- seq_buf_printf(&s, "L1D private per thread");
+ seq_buf_printf(&s, ", L1D private per thread");
seq_buf_printf(&s, "\n");
return s.len;
}
+ if (thread_priv)
+ return sprintf(buf, "Vulnerable: L1D private per thread\n");
+
if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
!security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
return sprintf(buf, "Not affected\n");
return sprintf(buf, "Vulnerable\n");
}
+
+ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_meltdown(dev, attr, buf);
+}
#endif
ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 14f3f28..66a9987 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -241,7 +241,7 @@ static u64 scan_dispatch_log(u64 stop_tb)
* Accumulate stolen time by scanning the dispatch trace log.
* Called on entry from user mode.
*/
-void accumulate_stolen_time(void)
+void notrace accumulate_stolen_time(void)
{
u64 sst, ust;
u8 save_soft_enabled = local_paca->soft_enabled;
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 58c1474..387600e 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -292,10 +292,18 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot,
HPTE_V_BOLTED, psize, psize,
ssize);
-
+ if (ret == -1) {
+ /* Try to remove a non bolted entry */
+ ret = mmu_hash_ops.hpte_remove(hpteg);
+ if (ret != -1)
+ ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot,
+ HPTE_V_BOLTED, psize, psize,
+ ssize);
+ }
if (ret < 0)
break;
+ cond_resched();
#ifdef CONFIG_DEBUG_PAGEALLOC
if (debug_pagealloc_enabled() &&
(paddr >> PAGE_SHIFT) < linear_map_hash_count)
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 30bf13b..3c5abfb 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -353,6 +353,14 @@ void __init mem_init(void)
BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
#ifdef CONFIG_SWIOTLB
+ /*
+ * Some platforms (e.g. 85xx) limit DMA-able memory way below
+ * 4G. We force memblock to bottom-up mode to ensure that the
+ * memory allocated in swiotlb_init() is DMA-able.
+ * As it's the last memblock allocation, no need to reset it
+ * back to to-down.
+ */
+ memblock_set_bottom_up(true);
swiotlb_init(0);
#endif
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index 560aefd..5a8e4ca 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -425,6 +425,10 @@ static struct bus_type cmm_subsys = {
.dev_name = "cmm",
};
+static void cmm_release_device(struct device *dev)
+{
+}
+
/**
* cmm_sysfs_register - Register with sysfs
*
@@ -440,6 +444,7 @@ static int cmm_sysfs_register(struct device *dev)
dev->id = 0;
dev->bus = &cmm_subsys;
+ dev->release = cmm_release_device;
if ((rc = device_register(dev)))
goto subsys_unregister;
diff --git a/arch/powerpc/platforms/pseries/hvconsole.c b/arch/powerpc/platforms/pseries/hvconsole.c
index 74da18d..73ec15c 100644
--- a/arch/powerpc/platforms/pseries/hvconsole.c
+++ b/arch/powerpc/platforms/pseries/hvconsole.c
@@ -62,7 +62,7 @@ EXPORT_SYMBOL(hvc_get_chars);
* @vtermno: The vtermno or unit_address of the adapter from which the data
* originated.
* @buf: The character buffer that contains the character data to send to
- * firmware.
+ * firmware. Must be at least 16 bytes, even if count is less than 16.
* @count: Send this number of characters.
*/
int hvc_put_chars(uint32_t vtermno, const char *buf, int count)
diff --git a/arch/powerpc/tools/relocs_check.sh b/arch/powerpc/tools/relocs_check.sh
index ec2d5c83..d6c16e7 100755
--- a/arch/powerpc/tools/relocs_check.sh
+++ b/arch/powerpc/tools/relocs_check.sh
@@ -23,7 +23,7 @@
vmlinux="$2"
bad_relocs=$(
-"$objdump" -R "$vmlinux" |
+$objdump -R "$vmlinux" |
# Only look at relocation lines.
grep -E '\<R_' |
# These relocations are okay
diff --git a/arch/powerpc/tools/unrel_branch_check.sh b/arch/powerpc/tools/unrel_branch_check.sh
index 1e972df..7711475 100755
--- a/arch/powerpc/tools/unrel_branch_check.sh
+++ b/arch/powerpc/tools/unrel_branch_check.sh
@@ -18,14 +18,14 @@
#__end_interrupts should be located within the first 64K
end_intr=0x$(
-"$objdump" -R "$vmlinux" -d --start-address=0xc000000000000000 \
+$objdump -R "$vmlinux" -d --start-address=0xc000000000000000 \
--stop-address=0xc000000000010000 |
grep '\<__end_interrupts>:' |
awk '{print $1}'
)
BRANCHES=$(
-"$objdump" -R "$vmlinux" -D --start-address=0xc000000000000000 \
+$objdump -R "$vmlinux" -D --start-address=0xc000000000000000 \
--stop-address=${end_intr} |
grep -e "^c[0-9a-f]*:[[:space:]]*\([0-9a-f][0-9a-f][[:space:]]\)\{4\}[[:space:]]*b" |
grep -v '\<__start_initialization_multiplatform>' |
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index bbe99cb..11857fe 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -70,7 +70,12 @@ static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
crst_table_init(table, _REGION2_ENTRY_EMPTY);
return (p4d_t *) table;
}
-#define p4d_free(mm, p4d) crst_table_free(mm, (unsigned long *) p4d)
+
+static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
+{
+ if (!mm_p4d_folded(mm))
+ crst_table_free(mm, (unsigned long *) p4d);
+}
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
{
@@ -79,7 +84,12 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
crst_table_init(table, _REGION3_ENTRY_EMPTY);
return (pud_t *) table;
}
-#define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
+
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ if (!mm_pud_folded(mm))
+ crst_table_free(mm, (unsigned long *) pud);
+}
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
{
@@ -97,6 +107,8 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
+ if (mm_pmd_folded(mm))
+ return;
pgtable_pmd_page_dtor(virt_to_page(pmd));
crst_table_free(mm, (unsigned long *) pmd);
}
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 64539c2..2dc9eb4 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -10,8 +10,9 @@
#ifndef _ASM_S390_TIMEX_H
#define _ASM_S390_TIMEX_H
-#include <asm/lowcore.h>
+#include <linux/preempt.h>
#include <linux/time64.h>
+#include <asm/lowcore.h>
/* The value of the TOD clock for 1.1.1970. */
#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
@@ -186,15 +187,18 @@ extern unsigned char tod_clock_base[16] __aligned(8);
/**
* get_clock_monotonic - returns current time in clock rate units
*
- * The caller must ensure that preemption is disabled.
* The clock and tod_clock_base get changed via stop_machine.
- * Therefore preemption must be disabled when calling this
- * function, otherwise the returned value is not guaranteed to
- * be monotonic.
+ * Therefore preemption must be disabled, otherwise the returned
+ * value is not guaranteed to be monotonic.
*/
static inline unsigned long long get_tod_clock_monotonic(void)
{
- return get_tod_clock() - *(unsigned long long *) &tod_clock_base[1];
+ unsigned long long tod;
+
+ preempt_disable_notrace();
+ tod = get_tod_clock() - *(unsigned long long *) &tod_clock_base[1];
+ preempt_enable_notrace();
+ return tod;
}
/**
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 2394557..6d15406 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -1930,10 +1930,11 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
ptr += sprintf(ptr, "%%c%i", value);
else if (operand->flags & OPERAND_VR)
ptr += sprintf(ptr, "%%v%i", value);
- else if (operand->flags & OPERAND_PCREL)
- ptr += sprintf(ptr, "%lx", (signed int) value
- + addr);
- else if (operand->flags & OPERAND_SIGNED)
+ else if (operand->flags & OPERAND_PCREL) {
+ void *pcrel = (void *)((int)value + addr);
+
+ ptr += sprintf(ptr, "%px", pcrel);
+ } else if (operand->flags & OPERAND_SIGNED)
ptr += sprintf(ptr, "%i", value);
else
ptr += sprintf(ptr, "%u", value);
@@ -2005,7 +2006,7 @@ void show_code(struct pt_regs *regs)
else
*ptr++ = ' ';
addr = regs->psw.addr + start - 32;
- ptr += sprintf(ptr, "%016lx: ", addr);
+ ptr += sprintf(ptr, "%px: ", (void *)addr);
if (start + opsize >= end)
break;
for (i = 0; i < opsize; i++)
@@ -2033,7 +2034,7 @@ void print_fn_code(unsigned char *code, unsigned long len)
opsize = insn_length(*code);
if (opsize > len)
break;
- ptr += sprintf(ptr, "%p: ", code);
+ ptr += sprintf(ptr, "%px: ", code);
for (i = 0; i < opsize; i++)
ptr += sprintf(ptr, "%02x", code[i]);
*ptr++ = '\t';
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 2e2fd95..b652593 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -185,7 +185,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
unsigned long num_sdb, gfp_t gfp_flags)
{
int i, rc;
- unsigned long *new, *tail;
+ unsigned long *new, *tail, *tail_prev = NULL;
if (!sfb->sdbt || !sfb->tail)
return -EINVAL;
@@ -224,6 +224,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
sfb->num_sdbt++;
/* Link current page to tail of chain */
*tail = (unsigned long)(void *) new + 1;
+ tail_prev = tail;
tail = new;
}
@@ -233,10 +234,22 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
* issue, a new realloc call (if required) might succeed.
*/
rc = alloc_sample_data_block(tail, gfp_flags);
- if (rc)
+ if (rc) {
+ /* Undo last SDBT. An SDBT with no SDB at its first
+ * entry but with an SDBT entry instead can not be
+ * handled by the interrupt handler code.
+ * Avoid this situation.
+ */
+ if (tail_prev) {
+ sfb->num_sdbt--;
+ free_page((unsigned long) new);
+ tail = tail_prev;
+ }
break;
+ }
sfb->num_sdb++;
tail++;
+ tail_prev = new = NULL; /* Allocated at least one SBD */
}
/* Link sampling buffer to its origin */
@@ -1281,18 +1294,28 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
*/
if (flush_all && done)
break;
-
- /* If an event overflow happened, discard samples by
- * processing any remaining sample-data-blocks.
- */
- if (event_overflow)
- flush_all = 1;
}
/* Account sample overflows in the event hardware structure */
if (sampl_overflow)
OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) +
sampl_overflow, 1 + num_sdb);
+
+ /* Perf_event_overflow() and perf_event_account_interrupt() limit
+ * the interrupt rate to an upper limit. Roughly 1000 samples per
+ * task tick.
+ * Hitting this limit results in a large number
+ * of throttled REF_REPORT_THROTTLE entries and the samples
+ * are dropped.
+ * Slightly increase the interval to avoid hitting this limit.
+ */
+ if (event_overflow) {
+ SAMPL_RATE(hwc) += DIV_ROUND_UP(SAMPL_RATE(hwc), 10);
+ debug_sprintf_event(sfdbg, 1, "%s: rate adjustment %ld\n",
+ __func__,
+ DIV_ROUND_UP(SAMPL_RATE(hwc), 10));
+ }
+
if (sampl_overflow || event_overflow)
debug_sprintf_event(sfdbg, 4, "hw_perf_event_update: "
"overflow stats: sample=%llu event=%llu\n",
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 27258db..b649a65 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -725,39 +725,67 @@ static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
static int smp_add_present_cpu(int cpu);
-static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
+static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
+ bool configured, bool early)
{
struct pcpu *pcpu;
- cpumask_t avail;
- int cpu, nr, i, j;
+ int cpu, nr, i;
u16 address;
nr = 0;
- cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
- cpu = cpumask_first(&avail);
- for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
- if (sclp.has_core_type && info->core[i].type != boot_core_type)
+ if (sclp.has_core_type && core->type != boot_core_type)
+ return nr;
+ cpu = cpumask_first(avail);
+ address = core->core_id << smp_cpu_mt_shift;
+ for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) {
+ if (pcpu_find_address(cpu_present_mask, address + i))
continue;
- address = info->core[i].core_id << smp_cpu_mt_shift;
- for (j = 0; j <= smp_cpu_mtid; j++) {
- if (pcpu_find_address(cpu_present_mask, address + j))
- continue;
- pcpu = pcpu_devices + cpu;
- pcpu->address = address + j;
- pcpu->state =
- (cpu >= info->configured*(smp_cpu_mtid + 1)) ?
- CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
- smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
- set_cpu_present(cpu, true);
- if (sysfs_add && smp_add_present_cpu(cpu) != 0)
- set_cpu_present(cpu, false);
- else
- nr++;
- cpu = cpumask_next(cpu, &avail);
- if (cpu >= nr_cpu_ids)
+ pcpu = pcpu_devices + cpu;
+ pcpu->address = address + i;
+ if (configured)
+ pcpu->state = CPU_STATE_CONFIGURED;
+ else
+ pcpu->state = CPU_STATE_STANDBY;
+ smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
+ set_cpu_present(cpu, true);
+ if (!early && smp_add_present_cpu(cpu) != 0)
+ set_cpu_present(cpu, false);
+ else
+ nr++;
+ cpumask_clear_cpu(cpu, avail);
+ cpu = cpumask_next(cpu, avail);
+ }
+ return nr;
+}
+
+static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
+{
+ struct sclp_core_entry *core;
+ cpumask_t avail;
+ bool configured;
+ u16 core_id;
+ int nr, i;
+
+ nr = 0;
+ cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
+ /*
+ * Add IPL core first (which got logical CPU number 0) to make sure
+ * that all SMT threads get subsequent logical CPU numbers.
+ */
+ if (early) {
+ core_id = pcpu_devices[0].address >> smp_cpu_mt_shift;
+ for (i = 0; i < info->configured; i++) {
+ core = &info->core[i];
+ if (core->core_id == core_id) {
+ nr += smp_add_core(core, &avail, true, early);
break;
+ }
}
}
+ for (i = 0; i < info->combined; i++) {
+ configured = i < info->configured;
+ nr += smp_add_core(&info->core[i], &avail, configured, early);
+ }
return nr;
}
@@ -803,7 +831,7 @@ void __init smp_detect_cpus(void)
/* Add CPUs present at boot */
get_online_cpus();
- __smp_rescan_cpus(info, 0);
+ __smp_rescan_cpus(info, true);
put_online_cpus();
memblock_free_early((unsigned long)info, sizeof(*info));
}
@@ -1156,7 +1184,7 @@ int __ref smp_rescan_cpus(void)
smp_get_core_info(info, 0);
get_online_cpus();
mutex_lock(&smp_cpu_state_mutex);
- nr = __smp_rescan_cpus(info, 1);
+ nr = __smp_rescan_cpus(info, false);
mutex_unlock(&smp_cpu_state_mutex);
put_online_cpus();
kfree(info);
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7734.h b/arch/sh/include/cpu-sh4/cpu/sh7734.h
index 96f0246..82b6320 100644
--- a/arch/sh/include/cpu-sh4/cpu/sh7734.h
+++ b/arch/sh/include/cpu-sh4/cpu/sh7734.h
@@ -134,7 +134,7 @@ enum {
GPIO_FN_EX_WAIT1, GPIO_FN_SD1_DAT0_A, GPIO_FN_DREQ2, GPIO_FN_CAN1_TX_C,
GPIO_FN_ET0_LINK_C, GPIO_FN_ET0_ETXD5_A,
GPIO_FN_EX_WAIT0, GPIO_FN_TCLK1_B,
- GPIO_FN_RD_WR, GPIO_FN_TCLK0,
+ GPIO_FN_RD_WR, GPIO_FN_TCLK0, GPIO_FN_CAN_CLK_B, GPIO_FN_ET0_ETXD4,
GPIO_FN_EX_CS5, GPIO_FN_SD1_CMD_A, GPIO_FN_ATADIR, GPIO_FN_QSSL_B,
GPIO_FN_ET0_ETXD3_A,
GPIO_FN_EX_CS4, GPIO_FN_SD1_WP_A, GPIO_FN_ATAWR, GPIO_FN_QMI_QIO1_B,
diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig
index 38e8765..9e92a37 100644
--- a/arch/x86/configs/x86_64_cuttlefish_defconfig
+++ b/arch/x86/configs/x86_64_cuttlefish_defconfig
@@ -55,7 +55,6 @@
CONFIG_X86_CPUID=y
CONFIG_KSM=y
CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
-CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_ZSMALLOC=y
# CONFIG_MTRR is not set
CONFIG_HZ_100=y
@@ -160,6 +159,7 @@
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
CONFIG_NETFILTER_XT_MATCH_QUOTA=y
CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
CONFIG_NETFILTER_XT_MATCH_SOCKET=y
CONFIG_NETFILTER_XT_MATCH_STATE=y
CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
@@ -282,7 +282,6 @@
# CONFIG_WLAN_VENDOR_TI is not set
# CONFIG_WLAN_VENDOR_ZYDAS is not set
# CONFIG_WLAN_VENDOR_QUANTENNA is not set
-CONFIG_MAC80211_HWSIM=y
CONFIG_VIRT_WIFI=y
CONFIG_INPUT_MOUSEDEV=y
CONFIG_INPUT_EVDEV=y
@@ -391,6 +390,7 @@
CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
CONFIG_HID_SPEEDLINK=y
+CONFIG_HID_STEAM=y
CONFIG_HID_SUNPLUS=y
CONFIG_HID_GREENASIA=y
CONFIG_GREENASIA_FF=y
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 6ed99de..c1f7b3c 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -375,7 +375,7 @@ int x86_add_exclusive(unsigned int what)
* LBR and BTS are still mutually exclusive.
*/
if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
- return 0;
+ goto out;
if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
mutex_lock(&pmc_reserve_mutex);
@@ -387,6 +387,7 @@ int x86_add_exclusive(unsigned int what)
mutex_unlock(&pmc_reserve_mutex);
}
+out:
atomic_inc(&active_events);
return 0;
@@ -397,11 +398,15 @@ int x86_add_exclusive(unsigned int what)
void x86_del_exclusive(unsigned int what)
{
+ atomic_dec(&active_events);
+
+ /*
+ * See the comment in x86_add_exclusive().
+ */
if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
return;
atomic_dec(&x86_pmu.lbr_exclusive[what]);
- atomic_dec(&active_events);
}
int x86_setup_perfctr(struct perf_event *event)
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index 6816216..5a1cd9c 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -71,9 +71,17 @@ struct bts_buffer {
static struct pmu bts_pmu;
+static int buf_nr_pages(struct page *page)
+{
+ if (!PagePrivate(page))
+ return 1;
+
+ return 1 << page_private(page);
+}
+
static size_t buf_size(struct page *page)
{
- return 1 << (PAGE_SHIFT + page_private(page));
+ return buf_nr_pages(page) * PAGE_SIZE;
}
static void *
@@ -91,9 +99,7 @@ bts_buffer_setup_aux(struct perf_event *event, void **pages,
/* count all the high order buffers */
for (pg = 0, nbuf = 0; pg < nr_pages;) {
page = virt_to_page(pages[pg]);
- if (WARN_ON_ONCE(!PagePrivate(page) && nr_pages > 1))
- return NULL;
- pg += 1 << page_private(page);
+ pg += buf_nr_pages(page);
nbuf++;
}
@@ -117,7 +123,7 @@ bts_buffer_setup_aux(struct perf_event *event, void **pages,
unsigned int __nr_pages;
page = virt_to_page(pages[pg]);
- __nr_pages = PagePrivate(page) ? 1 << page_private(page) : 1;
+ __nr_pages = buf_nr_pages(page);
buf->buf[nbuf].page = page;
buf->buf[nbuf].offset = offset;
buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0);
diff --git a/arch/x86/include/asm/crash.h b/arch/x86/include/asm/crash.h
index a7adb2bfb..6b8ad6f 100644
--- a/arch/x86/include/asm/crash.h
+++ b/arch/x86/include/asm/crash.h
@@ -2,6 +2,8 @@
#ifndef _ASM_X86_CRASH_H
#define _ASM_X86_CRASH_H
+struct kimage;
+
int crash_load_segments(struct kimage *image);
int crash_copy_backup_region(struct kimage *image);
int crash_setup_memmap_entries(struct kimage *image,
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 6390bd8..5e12b23 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -159,7 +159,7 @@ extern pte_t *kmap_pte;
extern pte_t *pkmap_page_table;
void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
-void native_set_fixmap(enum fixed_addresses idx,
+void native_set_fixmap(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags);
#ifndef CONFIG_PARAVIRT
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 566b7bc..2271adb 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1690,9 +1690,10 @@ static bool io_apic_level_ack_pending(struct mp_chip_data *data)
static inline bool ioapic_irqd_mask(struct irq_data *data)
{
- /* If we are moving the irq we need to mask it */
+ /* If we are moving the IRQ we need to mask it */
if (unlikely(irqd_is_setaffinity_pending(data))) {
- mask_ioapic_irq(data);
+ if (!irqd_irq_masked(data))
+ mask_ioapic_irq(data);
return true;
}
return false;
@@ -1729,7 +1730,9 @@ static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked)
*/
if (!io_apic_level_ack_pending(data->chip_data))
irq_move_masked_irq(data);
- unmask_ioapic_irq(data);
+ /* If the IRQ is masked in the core, leave it: */
+ if (!irqd_irq_masked(data))
+ unmask_ioapic_irq(data);
}
}
#else
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index c7bd2e5..0b0e44f 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -802,8 +802,8 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
if (quirk_no_way_out)
quirk_no_way_out(i, m, regs);
+ m->bank = i;
if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
- m->bank = i;
mce_read_aux(m, i);
*msg = tmp;
return 1;
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index b434780a..a8f4769 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -228,10 +228,10 @@ static void smca_configure(unsigned int bank, unsigned int cpu)
}
/* Return early if this bank was already initialized. */
- if (smca_banks[bank].hwid)
+ if (smca_banks[bank].hwid && smca_banks[bank].hwid->hwid_mcatype != 0)
return;
- if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) {
+ if (rdmsr_safe(MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) {
pr_warn("Failed to read MCA_IPID for bank %d\n", bank);
return;
}
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index ee229ce..ec6a07b 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -185,7 +185,7 @@ static void therm_throt_process(bool new_event, int event, int level)
/* if we just entered the thermal event */
if (new_event) {
if (event == THERMAL_THROTTLING_EVENT)
- pr_crit("CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
+ pr_warn("CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
this_cpu,
level == CORE_LEVEL ? "Core" : "Package",
state->count);
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index e0b8593..0a0e911 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -333,7 +333,7 @@
06: CLTS
07: SYSRET (o64)
08: INVD
-09: WBINVD
+09: WBINVD | WBNOINVD (F3)
0a:
0b: UD2 (1B)
0c:
@@ -364,7 +364,7 @@
# a ModR/M byte.
1a: BNDCL Gv,Ev (F3) | BNDCU Gv,Ev (F2) | BNDMOV Gv,Ev (66) | BNDLDX Gv,Ev
1b: BNDCN Gv,Ev (F2) | BNDMOV Ev,Gv (66) | BNDMK Gv,Ev (F3) | BNDSTX Ev,Gv
-1c:
+1c: Grp20 (1A),(1C)
1d:
1e:
1f: NOP Ev
@@ -792,6 +792,8 @@
f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v)
f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
+f8: MOVDIR64B Gv,Mdqq (66) | ENQCMD Gv,Mdqq (F2) | ENQCMDS Gv,Mdqq (F3)
+f9: MOVDIRI My,Gy
EndTable
Table: 3-byte opcode 2 (0x0f 0x3a)
@@ -943,9 +945,9 @@
EndTable
GrpTable: Grp7
-0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B)
-1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B)
-2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B)
+0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) | PCONFIG (101),(11B) | ENCLV (000),(11B)
+1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B) | ENCLS (111),(11B)
+2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B) | ENCLU (111),(11B)
3: LIDT Ms
4: SMSW Mw/Rv
5: rdpkru (110),(11B) | wrpkru (111),(11B)
@@ -1020,7 +1022,7 @@
3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
4: XSAVE | ptwrite Ey (F3),(11B)
5: XRSTOR | lfence (11B)
-6: XSAVEOPT | clwb (66) | mfence (11B)
+6: XSAVEOPT | clwb (66) | mfence (11B) | TPAUSE Rd (66),(11B) | UMONITOR Rv (F3),(11B) | UMWAIT Rd (F2),(11B)
7: clflush | clflushopt (66) | sfence (11B)
EndTable
@@ -1051,6 +1053,10 @@
6: vscatterpf1qps/d Wx (66),(ev)
EndTable
+GrpTable: Grp20
+0: cldemote Mb
+EndTable
+
# AMD's Prefetch Group
GrpTable: GrpP
0: PREFETCH
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index b4fd362..55338b3 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -590,8 +590,8 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
fixmaps_set++;
}
-void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
- pgprot_t flags)
+void native_set_fixmap(unsigned /* enum fixed_addresses */ idx,
+ phys_addr_t phys, pgprot_t flags)
{
__native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
}
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 5b513cc..cadd7fd 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -257,10 +257,6 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
return;
}
- /* No need to reserve regions that will never be freed. */
- if (md.attribute & EFI_MEMORY_RUNTIME)
- return;
-
size += addr % EFI_PAGE_SIZE;
size = round_up(size, EFI_PAGE_SIZE);
addr = round_down(addr, EFI_PAGE_SIZE);
@@ -290,6 +286,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
early_memunmap(new, new_size);
efi_memmap_install(new_phys, num_entries);
+ e820__range_update(addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED);
+ e820__update_table(e820_table);
}
/*
diff --git a/block/blk-map.c b/block/blk-map.c
index e31be14..f72a3af 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -152,7 +152,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
return 0;
unmap_rq:
- __blk_rq_unmap_user(bio);
+ blk_rq_unmap_user(bio);
fail:
rq->bio = NULL;
return ret;
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 6ca015f..6490b27 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -6,6 +6,7 @@
#include <linux/compat.h>
#include <linux/elevator.h>
#include <linux/hdreg.h>
+#include <linux/pr.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/types.h>
@@ -354,6 +355,8 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
* but we call blkdev_ioctl, which gets the lock for us
*/
case BLKRRPART:
+ case BLKREPORTZONE:
+ case BLKRESETZONE:
return blkdev_ioctl(bdev, mode, cmd,
(unsigned long)compat_ptr(arg));
case BLKBSZSET_32:
@@ -401,6 +404,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
case BLKTRACETEARDOWN: /* compatible */
ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg));
return ret;
+ case IOC_PR_REGISTER:
+ case IOC_PR_RESERVE:
+ case IOC_PR_RELEASE:
+ case IOC_PR_PREEMPT:
+ case IOC_PR_PREEMPT_ABORT:
+ case IOC_PR_CLEAR:
+ return blkdev_ioctl(bdev, mode, cmd,
+ (unsigned long)compat_ptr(arg));
default:
if (disk->fops->compat_ioctl)
ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index ee4880b..3a90d51 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -20,6 +20,18 @@
Android process, using Binder to identify, invoke and pass arguments
between said processes.
+config ANDROID_BINDERFS
+ bool "Android Binderfs filesystem"
+ depends on ANDROID_BINDER_IPC
+ default n
+ ---help---
+ Binderfs is a pseudo-filesystem for the Android Binder IPC driver
+ which can be mounted per-ipc namespace allowing to run multiple
+ instances of Android.
+ Each binderfs mount initially only contains a binder-control device.
+ It can be used to dynamically allocate new binder IPC devices via
+ ioctls.
+
config ANDROID_BINDER_DEVICES
string "Android Binder devices"
depends on ANDROID_BINDER_IPC
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
index a01254c..c7856e3 100644
--- a/drivers/android/Makefile
+++ b/drivers/android/Makefile
@@ -1,4 +1,5 @@
ccflags-y += -I$(src) # needed for trace events
+obj-$(CONFIG_ANDROID_BINDERFS) += binderfs.o
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 2c8b629..920b1ca 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -75,6 +75,7 @@
#include <uapi/linux/android/binder.h>
#include <uapi/linux/sched/types.h>
#include "binder_alloc.h"
+#include "binder_internal.h"
#include "binder_trace.h"
static HLIST_HEAD(binder_deferred_list);
@@ -91,22 +92,8 @@ static struct dentry *binder_debugfs_dir_entry_root;
static struct dentry *binder_debugfs_dir_entry_proc;
static atomic_t binder_last_id;
-#define BINDER_DEBUG_ENTRY(name) \
-static int binder_##name##_open(struct inode *inode, struct file *file) \
-{ \
- return single_open(file, binder_##name##_show, inode->i_private); \
-} \
-\
-static const struct file_operations binder_##name##_fops = { \
- .owner = THIS_MODULE, \
- .open = binder_##name##_open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = single_release, \
-}
-
-static int binder_proc_show(struct seq_file *m, void *unused);
-BINDER_DEBUG_ENTRY(proc);
+static int proc_show(struct seq_file *m, void *unused);
+DEFINE_SHOW_ATTRIBUTE(proc);
/* This is only defined in include/asm-arm/sizes.h */
#ifndef SZ_1K
@@ -140,7 +127,7 @@ static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
module_param_named(debug_mask, binder_debug_mask, uint, 0644);
-static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
+char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
module_param_named(devices, binder_devices_param, charp, 0444);
static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
@@ -214,30 +201,8 @@ static inline void binder_stats_created(enum binder_stat_types type)
atomic_inc(&binder_stats.obj_created[type]);
}
-struct binder_transaction_log_entry {
- int debug_id;
- int debug_id_done;
- int call_type;
- int from_proc;
- int from_thread;
- int target_handle;
- int to_proc;
- int to_thread;
- int to_node;
- int data_size;
- int offsets_size;
- int return_error_line;
- uint32_t return_error;
- uint32_t return_error_param;
- const char *context_name;
-};
-struct binder_transaction_log {
- atomic_t cur;
- bool full;
- struct binder_transaction_log_entry entry[32];
-};
-static struct binder_transaction_log binder_transaction_log;
-static struct binder_transaction_log binder_transaction_log_failed;
+struct binder_transaction_log binder_transaction_log;
+struct binder_transaction_log binder_transaction_log_failed;
static struct binder_transaction_log_entry *binder_transaction_log_add(
struct binder_transaction_log *log)
@@ -259,20 +224,6 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
return e;
}
-struct binder_context {
- struct binder_node *binder_context_mgr_node;
- struct mutex context_mgr_node_lock;
-
- kuid_t binder_context_mgr_uid;
- const char *name;
-};
-
-struct binder_device {
- struct hlist_node hlist;
- struct miscdevice miscdev;
- struct binder_context context;
-};
-
/**
* struct binder_work - work enqueued on a worklist
* @entry: node enqueued on list
@@ -537,6 +488,7 @@ struct binder_priority {
* @inner_lock: can nest under outer_lock and/or node lock
* @outer_lock: no nesting under innor or node lock
* Lock order: 1) outer, 2) node, 3) inner
+ * @binderfs_entry: process-specific binderfs log file
*
* Bookkeeping structure for binder processes
*/
@@ -568,6 +520,7 @@ struct binder_proc {
struct binder_context *context;
spinlock_t inner_lock;
spinlock_t outer_lock;
+ struct dentry *binderfs_entry;
};
enum {
@@ -5207,6 +5160,8 @@ static int binder_open(struct inode *nodp, struct file *filp)
{
struct binder_proc *proc;
struct binder_device *binder_dev;
+ struct binderfs_info *info;
+ struct dentry *binder_binderfs_dir_entry_proc = NULL;
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
current->group_leader->pid, current->pid);
@@ -5228,8 +5183,15 @@ static int binder_open(struct inode *nodp, struct file *filp)
proc->default_priority.prio = NICE_TO_PRIO(0);
}
- binder_dev = container_of(filp->private_data, struct binder_device,
- miscdev);
+ /* binderfs stashes devices in i_private */
+ if (is_binderfs_device(nodp)) {
+ binder_dev = nodp->i_private;
+ info = nodp->i_sb->s_fs_info;
+ binder_binderfs_dir_entry_proc = info->proc_log_dir;
+ } else {
+ binder_dev = container_of(filp->private_data,
+ struct binder_device, miscdev);
+ }
proc->context = &binder_dev->context;
binder_alloc_init(&proc->alloc);
@@ -5257,7 +5219,36 @@ static int binder_open(struct inode *nodp, struct file *filp)
proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
binder_debugfs_dir_entry_proc,
(void *)(unsigned long)proc->pid,
- &binder_proc_fops);
+ &proc_fops);
+ }
+
+ if (binder_binderfs_dir_entry_proc) {
+ char strbuf[11];
+ struct dentry *binderfs_entry;
+
+ snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
+ /*
+ * Similar to debugfs, the process specific log file is shared
+ * between contexts. If the file has already been created for a
+ * process, the following binderfs_create_file() call will
+ * fail with error code EEXIST if another context of the same
+ * process invoked binder_open(). This is ok since same as
+ * debugfs, the log file will contain information on all
+ * contexts of a given PID.
+ */
+ binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
+ strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
+ if (!IS_ERR(binderfs_entry)) {
+ proc->binderfs_entry = binderfs_entry;
+ } else {
+ int error;
+
+ error = PTR_ERR(binderfs_entry);
+ if (error != -EEXIST) {
+ pr_warn("Unable to create file %s in binderfs (error %d)\n",
+ strbuf, error);
+ }
+ }
}
return 0;
@@ -5299,6 +5290,12 @@ static int binder_release(struct inode *nodp, struct file *filp)
struct binder_proc *proc = filp->private_data;
debugfs_remove(proc->debugfs_entry);
+
+ if (proc->binderfs_entry) {
+ binderfs_remove_file(proc->binderfs_entry);
+ proc->binderfs_entry = NULL;
+ }
+
binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
return 0;
@@ -5905,7 +5902,7 @@ static void print_binder_proc_stats(struct seq_file *m,
}
-static int binder_state_show(struct seq_file *m, void *unused)
+int binder_state_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
struct binder_node *node;
@@ -5944,7 +5941,7 @@ static int binder_state_show(struct seq_file *m, void *unused)
return 0;
}
-static int binder_stats_show(struct seq_file *m, void *unused)
+int binder_stats_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
@@ -5960,7 +5957,7 @@ static int binder_stats_show(struct seq_file *m, void *unused)
return 0;
}
-static int binder_transactions_show(struct seq_file *m, void *unused)
+int binder_transactions_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
@@ -5973,7 +5970,7 @@ static int binder_transactions_show(struct seq_file *m, void *unused)
return 0;
}
-static int binder_proc_show(struct seq_file *m, void *unused)
+static int proc_show(struct seq_file *m, void *unused)
{
struct binder_proc *itr;
int pid = (unsigned long)m->private;
@@ -6016,7 +6013,7 @@ static void print_binder_transaction_log_entry(struct seq_file *m,
"\n" : " (incomplete)\n");
}
-static int binder_transaction_log_show(struct seq_file *m, void *unused)
+int binder_transaction_log_show(struct seq_file *m, void *unused)
{
struct binder_transaction_log *log = m->private;
unsigned int log_cur = atomic_read(&log->cur);
@@ -6037,7 +6034,7 @@ static int binder_transaction_log_show(struct seq_file *m, void *unused)
return 0;
}
-static const struct file_operations binder_fops = {
+const struct file_operations binder_fops = {
.owner = THIS_MODULE,
.poll = binder_poll,
.unlocked_ioctl = binder_ioctl,
@@ -6048,11 +6045,6 @@ static const struct file_operations binder_fops = {
.release = binder_release,
};
-BINDER_DEBUG_ENTRY(state);
-BINDER_DEBUG_ENTRY(stats);
-BINDER_DEBUG_ENTRY(transactions);
-BINDER_DEBUG_ENTRY(transaction_log);
-
static int __init init_binder_device(const char *name)
{
int ret;
@@ -6084,9 +6076,10 @@ static int __init init_binder_device(const char *name)
static int __init binder_init(void)
{
int ret;
- char *device_name, *device_names, *device_tmp;
+ char *device_name, *device_tmp;
struct binder_device *device;
struct hlist_node *tmp;
+ char *device_names = NULL;
ret = binder_alloc_shrinker_init();
if (ret)
@@ -6128,24 +6121,30 @@ static int __init binder_init(void)
&binder_transaction_log_fops);
}
- /*
- * Copy the module_parameter string, because we don't want to
- * tokenize it in-place.
- */
- device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
- if (!device_names) {
- ret = -ENOMEM;
- goto err_alloc_device_names_failed;
- }
- strcpy(device_names, binder_devices_param);
+ if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
+ strcmp(binder_devices_param, "") != 0) {
+ /*
+ * Copy the module_parameter string, because we don't want to
+ * tokenize it in-place.
+ */
+ device_names = kstrdup(binder_devices_param, GFP_KERNEL);
+ if (!device_names) {
+ ret = -ENOMEM;
+ goto err_alloc_device_names_failed;
+ }
- device_tmp = device_names;
- while ((device_name = strsep(&device_tmp, ","))) {
- ret = init_binder_device(device_name);
- if (ret)
- goto err_init_binder_device_failed;
+ device_tmp = device_names;
+ while ((device_name = strsep(&device_tmp, ","))) {
+ ret = init_binder_device(device_name);
+ if (ret)
+ goto err_init_binder_device_failed;
+ }
}
+ ret = init_binderfs();
+ if (ret)
+ goto err_init_binder_device_failed;
+
return ret;
err_init_binder_device_failed:
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
new file mode 100644
index 0000000..bd47f7f
--- /dev/null
+++ b/drivers/android/binder_internal.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_BINDER_INTERNAL_H
+#define _LINUX_BINDER_INTERNAL_H
+
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <linux/uidgid.h>
+
+struct binder_context {
+ struct binder_node *binder_context_mgr_node;
+ struct mutex context_mgr_node_lock;
+ kuid_t binder_context_mgr_uid;
+ const char *name;
+};
+
+/**
+ * struct binder_device - information about a binder device node
+ * @hlist: list of binder devices (only used for devices requested via
+ * CONFIG_ANDROID_BINDER_DEVICES)
+ * @miscdev: information about a binder character device node
+ * @context: binder context information
+ * @binderfs_inode: This is the inode of the root dentry of the super block
+ * belonging to a binderfs mount.
+ */
+struct binder_device {
+ struct hlist_node hlist;
+ struct miscdevice miscdev;
+ struct binder_context context;
+ struct inode *binderfs_inode;
+};
+
+/**
+ * binderfs_mount_opts - mount options for binderfs
+ * @max: maximum number of allocatable binderfs binder devices
+ * @stats_mode: enable binder stats in binderfs.
+ */
+struct binderfs_mount_opts {
+ int max;
+ int stats_mode;
+};
+
+/**
+ * binderfs_info - information about a binderfs mount
+ * @ipc_ns: The ipc namespace the binderfs mount belongs to.
+ * @control_dentry: This records the dentry of this binderfs mount
+ * binder-control device.
+ * @root_uid: uid that needs to be used when a new binder device is
+ * created.
+ * @root_gid: gid that needs to be used when a new binder device is
+ * created.
+ * @mount_opts: The mount options in use.
+ * @device_count: The current number of allocated binder devices.
+ * @proc_log_dir: Pointer to the directory dentry containing process-specific
+ * logs.
+ */
+struct binderfs_info {
+ struct ipc_namespace *ipc_ns;
+ struct dentry *control_dentry;
+ kuid_t root_uid;
+ kgid_t root_gid;
+ struct binderfs_mount_opts mount_opts;
+ int device_count;
+ struct dentry *proc_log_dir;
+};
+
+extern const struct file_operations binder_fops;
+
+extern char *binder_devices_param;
+
+#ifdef CONFIG_ANDROID_BINDERFS
+extern bool is_binderfs_device(const struct inode *inode);
+extern struct dentry *binderfs_create_file(struct dentry *dir, const char *name,
+ const struct file_operations *fops,
+ void *data);
+extern void binderfs_remove_file(struct dentry *dentry);
+#else
+static inline bool is_binderfs_device(const struct inode *inode)
+{
+ return false;
+}
+static inline struct dentry *binderfs_create_file(struct dentry *dir,
+ const char *name,
+ const struct file_operations *fops,
+ void *data)
+{
+ return NULL;
+}
+static inline void binderfs_remove_file(struct dentry *dentry) {}
+#endif
+
+#ifdef CONFIG_ANDROID_BINDERFS
+extern int __init init_binderfs(void);
+#else
+static inline int __init init_binderfs(void)
+{
+ return 0;
+}
+#endif
+
+int binder_stats_show(struct seq_file *m, void *unused);
+DEFINE_SHOW_ATTRIBUTE(binder_stats);
+
+int binder_state_show(struct seq_file *m, void *unused);
+DEFINE_SHOW_ATTRIBUTE(binder_state);
+
+int binder_transactions_show(struct seq_file *m, void *unused);
+DEFINE_SHOW_ATTRIBUTE(binder_transactions);
+
+int binder_transaction_log_show(struct seq_file *m, void *unused);
+DEFINE_SHOW_ATTRIBUTE(binder_transaction_log);
+
+struct binder_transaction_log_entry {
+ int debug_id;
+ int debug_id_done;
+ int call_type;
+ int from_proc;
+ int from_thread;
+ int target_handle;
+ int to_proc;
+ int to_thread;
+ int to_node;
+ int data_size;
+ int offsets_size;
+ int return_error_line;
+ uint32_t return_error;
+ uint32_t return_error_param;
+ const char *context_name;
+};
+
+struct binder_transaction_log {
+ atomic_t cur;
+ bool full;
+ struct binder_transaction_log_entry entry[32];
+};
+
+extern struct binder_transaction_log binder_transaction_log;
+extern struct binder_transaction_log binder_transaction_log_failed;
+#endif /* _LINUX_BINDER_INTERNAL_H */
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
new file mode 100644
index 0000000..a4f73af
--- /dev/null
+++ b/drivers/android/binderfs.c
@@ -0,0 +1,790 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/compiler_types.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/fsnotify.h>
+#include <linux/gfp.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/ipc_namespace.h>
+#include <linux/kdev_t.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/namei.h>
+#include <linux/magic.h>
+#include <linux/major.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mount.h>
+#include <linux/parser.h>
+#include <linux/radix-tree.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock_types.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/user_namespace.h>
+//#include <linux/xarray.h>
+#include <uapi/asm-generic/errno-base.h>
+#include <uapi/linux/android/binder.h>
+#include <uapi/linux/android/binderfs.h>
+
+#include "binder_internal.h"
+
+#define FIRST_INODE 1
+#define SECOND_INODE 2
+#define INODE_OFFSET 3
+#define INTSTRLEN 21
+#define BINDERFS_MAX_MINOR (1U << MINORBITS)
+/* Ensure that the initial ipc namespace always has devices available. */
+#define BINDERFS_MAX_MINOR_CAPPED (BINDERFS_MAX_MINOR - 4)
+
+static dev_t binderfs_dev;
+static DEFINE_MUTEX(binderfs_minors_mutex);
+static DEFINE_IDA(binderfs_minors);
+
+enum {
+ Opt_max,
+ Opt_stats_mode,
+ Opt_err
+};
+
+enum binderfs_stats_mode {
+ STATS_NONE,
+ STATS_GLOBAL,
+};
+
+static const match_table_t tokens = {
+ { Opt_max, "max=%d" },
+ { Opt_stats_mode, "stats=%s" },
+ { Opt_err, NULL }
+};
+
+static inline struct binderfs_info *BINDERFS_I(const struct inode *inode)
+{
+ return inode->i_sb->s_fs_info;
+}
+
+bool is_binderfs_device(const struct inode *inode)
+{
+ if (inode->i_sb->s_magic == BINDERFS_SUPER_MAGIC)
+ return true;
+
+ return false;
+}
+
+/**
+ * binderfs_binder_device_create - allocate inode from super block of a
+ * binderfs mount
+ * @ref_inode: inode from wich the super block will be taken
+ * @userp: buffer to copy information about new device for userspace to
+ * @req: struct binderfs_device as copied from userspace
+ *
+ * This function allocates a new binder_device and reserves a new minor
+ * number for it.
+ * Minor numbers are limited and tracked globally in binderfs_minors. The
+ * function will stash a struct binder_device for the specific binder
+ * device in i_private of the inode.
+ * It will go on to allocate a new inode from the super block of the
+ * filesystem mount, stash a struct binder_device in its i_private field
+ * and attach a dentry to that inode.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+static int binderfs_binder_device_create(struct inode *ref_inode,
+ struct binderfs_device __user *userp,
+ struct binderfs_device *req)
+{
+ int minor, ret;
+ struct dentry *dentry, *root;
+ struct binder_device *device;
+ char *name = NULL;
+ size_t name_len;
+ struct inode *inode = NULL;
+ struct super_block *sb = ref_inode->i_sb;
+ struct binderfs_info *info = sb->s_fs_info;
+#if defined(CONFIG_IPC_NS)
+ bool use_reserve = (info->ipc_ns == &init_ipc_ns);
+#else
+ bool use_reserve = true;
+#endif
+
+ /* Reserve new minor number for the new device. */
+ mutex_lock(&binderfs_minors_mutex);
+ if (++info->device_count <= info->mount_opts.max)
+ minor = ida_simple_get(&binderfs_minors, 0,
+ use_reserve ? BINDERFS_MAX_MINOR + 1:
+ BINDERFS_MAX_MINOR_CAPPED + 1,
+ GFP_KERNEL);
+ else
+ minor = -ENOSPC;
+ if (minor < 0) {
+ --info->device_count;
+ mutex_unlock(&binderfs_minors_mutex);
+ return minor;
+ }
+ mutex_unlock(&binderfs_minors_mutex);
+
+ ret = -ENOMEM;
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device)
+ goto err;
+
+ inode = new_inode(sb);
+ if (!inode)
+ goto err;
+
+ inode->i_ino = minor + INODE_OFFSET;
+ inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ init_special_inode(inode, S_IFCHR | 0600,
+ MKDEV(MAJOR(binderfs_dev), minor));
+ inode->i_fop = &binder_fops;
+ inode->i_uid = info->root_uid;
+ inode->i_gid = info->root_gid;
+
+ req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */
+ name_len = strlen(req->name);
+ /* Make sure to include terminating NUL byte */
+ name = kmemdup(req->name, name_len + 1, GFP_KERNEL);
+ if (!name)
+ goto err;
+
+ device->binderfs_inode = inode;
+ device->context.binder_context_mgr_uid = INVALID_UID;
+ device->context.name = name;
+ device->miscdev.name = name;
+ device->miscdev.minor = minor;
+ mutex_init(&device->context.context_mgr_node_lock);
+
+ req->major = MAJOR(binderfs_dev);
+ req->minor = minor;
+
+ if (userp && copy_to_user(userp, req, sizeof(*req))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ root = sb->s_root;
+ inode_lock(d_inode(root));
+
+ /* look it up */
+ dentry = lookup_one_len(name, root, name_len);
+ if (IS_ERR(dentry)) {
+ inode_unlock(d_inode(root));
+ ret = PTR_ERR(dentry);
+ goto err;
+ }
+
+ if (d_really_is_positive(dentry)) {
+ /* already exists */
+ dput(dentry);
+ inode_unlock(d_inode(root));
+ ret = -EEXIST;
+ goto err;
+ }
+
+ inode->i_private = device;
+ d_instantiate(dentry, inode);
+ fsnotify_create(root->d_inode, dentry);
+ inode_unlock(d_inode(root));
+
+ return 0;
+
+err:
+ kfree(name);
+ kfree(device);
+ mutex_lock(&binderfs_minors_mutex);
+ --info->device_count;
+ ida_remove(&binderfs_minors, minor);
+ mutex_unlock(&binderfs_minors_mutex);
+ iput(inode);
+
+ return ret;
+}
+
+/**
+ * binderfs_ctl_ioctl - handle binder device node allocation requests
+ *
+ * The request handler for the binder-control device. All requests operate on
+ * the binderfs mount the binder-control device resides in:
+ * - BINDER_CTL_ADD
+ * Allocate a new binder device.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+static long binder_ctl_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = -EINVAL;
+ struct inode *inode = file_inode(file);
+ struct binderfs_device __user *device = (struct binderfs_device __user *)arg;
+ struct binderfs_device device_req;
+
+ switch (cmd) {
+ case BINDER_CTL_ADD:
+ ret = copy_from_user(&device_req, device, sizeof(device_req));
+ if (ret) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = binderfs_binder_device_create(inode, device, &device_req);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void binderfs_evict_inode(struct inode *inode)
+{
+ struct binder_device *device = inode->i_private;
+ struct binderfs_info *info = BINDERFS_I(inode);
+
+ clear_inode(inode);
+
+ if (!S_ISCHR(inode->i_mode) || !device)
+ return;
+
+ mutex_lock(&binderfs_minors_mutex);
+ --info->device_count;
+ ida_remove(&binderfs_minors, device->miscdev.minor);
+ mutex_unlock(&binderfs_minors_mutex);
+
+ kfree(device->context.name);
+ kfree(device);
+}
+
+/**
+ * binderfs_parse_mount_opts - parse binderfs mount options
+ * @data: options to set (can be NULL in which case defaults are used)
+ */
+static int binderfs_parse_mount_opts(char *data,
+ struct binderfs_mount_opts *opts)
+{
+ char *p, *stats;
+ opts->max = BINDERFS_MAX_MINOR;
+ opts->stats_mode = STATS_NONE;
+
+ while ((p = strsep(&data, ",")) != NULL) {
+ substring_t args[MAX_OPT_ARGS];
+ int token;
+ int max_devices;
+
+ if (!*p)
+ continue;
+
+ token = match_token(p, tokens, args);
+ switch (token) {
+ case Opt_max:
+ if (match_int(&args[0], &max_devices) ||
+ (max_devices < 0 ||
+ (max_devices > BINDERFS_MAX_MINOR)))
+ return -EINVAL;
+
+ opts->max = max_devices;
+ break;
+ case Opt_stats_mode:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EINVAL;
+
+ stats = match_strdup(&args[0]);
+ if (!stats)
+ return -ENOMEM;
+
+ if (strcmp(stats, "global") != 0) {
+ kfree(stats);
+ return -EINVAL;
+ }
+
+ opts->stats_mode = STATS_GLOBAL;
+ kfree(stats);
+ break;
+ default:
+ pr_err("Invalid mount options\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int binderfs_remount(struct super_block *sb, int *flags, char *data)
+{
+ int prev_stats_mode, ret;
+ struct binderfs_info *info = sb->s_fs_info;
+
+ prev_stats_mode = info->mount_opts.stats_mode;
+ ret = binderfs_parse_mount_opts(data, &info->mount_opts);
+ if (ret)
+ return ret;
+
+ if (prev_stats_mode != info->mount_opts.stats_mode) {
+ pr_err("Binderfs stats mode cannot be changed during a remount\n");
+ info->mount_opts.stats_mode = prev_stats_mode;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int binderfs_show_mount_opts(struct seq_file *seq, struct dentry *root)
+{
+ struct binderfs_info *info;
+
+ info = root->d_sb->s_fs_info;
+ if (info->mount_opts.max <= BINDERFS_MAX_MINOR)
+ seq_printf(seq, ",max=%d", info->mount_opts.max);
+ if (info->mount_opts.stats_mode == STATS_GLOBAL)
+ seq_printf(seq, ",stats=global");
+
+ return 0;
+}
+
+static const struct super_operations binderfs_super_ops = {
+ .evict_inode = binderfs_evict_inode,
+ .remount_fs = binderfs_remount,
+ .show_options = binderfs_show_mount_opts,
+ .statfs = simple_statfs,
+};
+
+static inline bool is_binderfs_control_device(const struct dentry *dentry)
+{
+ struct binderfs_info *info = dentry->d_sb->s_fs_info;
+ return info->control_dentry == dentry;
+}
+
+static int binderfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
+{
+ if (is_binderfs_control_device(old_dentry) ||
+ is_binderfs_control_device(new_dentry))
+ return -EPERM;
+
+ return simple_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
+}
+
+static int binderfs_unlink(struct inode *dir, struct dentry *dentry)
+{
+ if (is_binderfs_control_device(dentry))
+ return -EPERM;
+
+ return simple_unlink(dir, dentry);
+}
+
+static const struct file_operations binder_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = nonseekable_open,
+ .unlocked_ioctl = binder_ctl_ioctl,
+ .compat_ioctl = binder_ctl_ioctl,
+ .llseek = noop_llseek,
+};
+
+/**
+ * binderfs_binder_ctl_create - create a new binder-control device
+ * @sb: super block of the binderfs mount
+ *
+ * This function creates a new binder-control device node in the binderfs mount
+ * referred to by @sb.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+static int binderfs_binder_ctl_create(struct super_block *sb)
+{
+ int minor, ret;
+ struct dentry *dentry;
+ struct binder_device *device;
+ struct inode *inode = NULL;
+ struct dentry *root = sb->s_root;
+ struct binderfs_info *info = sb->s_fs_info;
+#if defined(CONFIG_IPC_NS)
+ bool use_reserve = (info->ipc_ns == &init_ipc_ns);
+#else
+ bool use_reserve = true;
+#endif
+
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device)
+ return -ENOMEM;
+
+ /* If we have already created a binder-control node, return. */
+ if (info->control_dentry) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = -ENOMEM;
+ inode = new_inode(sb);
+ if (!inode)
+ goto out;
+
+ /* Reserve a new minor number for the new device. */
+ mutex_lock(&binderfs_minors_mutex);
+ minor = ida_simple_get(&binderfs_minors, 0,
+ use_reserve ? BINDERFS_MAX_MINOR + 1:
+ BINDERFS_MAX_MINOR_CAPPED + 1,
+ GFP_KERNEL);
+ mutex_unlock(&binderfs_minors_mutex);
+ if (minor < 0) {
+ ret = minor;
+ goto out;
+ }
+
+ inode->i_ino = SECOND_INODE;
+ inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ init_special_inode(inode, S_IFCHR | 0600,
+ MKDEV(MAJOR(binderfs_dev), minor));
+ inode->i_fop = &binder_ctl_fops;
+ inode->i_uid = info->root_uid;
+ inode->i_gid = info->root_gid;
+
+ device->binderfs_inode = inode;
+ device->miscdev.minor = minor;
+
+ dentry = d_alloc_name(root, "binder-control");
+ if (!dentry)
+ goto out;
+
+ inode->i_private = device;
+ info->control_dentry = dentry;
+ d_add(dentry, inode);
+
+ return 0;
+
+out:
+ kfree(device);
+ iput(inode);
+
+ return ret;
+}
+
+static const struct inode_operations binderfs_dir_inode_operations = {
+ .lookup = simple_lookup,
+ .rename = binderfs_rename,
+ .unlink = binderfs_unlink,
+};
+
+static struct inode *binderfs_make_inode(struct super_block *sb, int mode)
+{
+ struct inode *ret;
+
+ ret = new_inode(sb);
+ if (ret) {
+ ret->i_ino = iunique(sb, BINDERFS_MAX_MINOR + INODE_OFFSET);
+ ret->i_mode = mode;
+ ret->i_atime = ret->i_mtime = ret->i_ctime = current_time(ret);
+ }
+ return ret;
+}
+
+static struct dentry *binderfs_create_dentry(struct dentry *parent,
+ const char *name)
+{
+ struct dentry *dentry;
+
+ dentry = lookup_one_len(name, parent, strlen(name));
+ if (IS_ERR(dentry))
+ return dentry;
+
+ /* Return error if the file/dir already exists. */
+ if (d_really_is_positive(dentry)) {
+ dput(dentry);
+ return ERR_PTR(-EEXIST);
+ }
+
+ return dentry;
+}
+
+void binderfs_remove_file(struct dentry *dentry)
+{
+ struct inode *parent_inode;
+
+ parent_inode = d_inode(dentry->d_parent);
+ inode_lock(parent_inode);
+ if (simple_positive(dentry)) {
+ dget(dentry);
+ simple_unlink(parent_inode, dentry);
+ d_delete(dentry);
+ dput(dentry);
+ }
+ inode_unlock(parent_inode);
+}
+
+struct dentry *binderfs_create_file(struct dentry *parent, const char *name,
+ const struct file_operations *fops,
+ void *data)
+{
+ struct dentry *dentry;
+ struct inode *new_inode, *parent_inode;
+ struct super_block *sb;
+
+ parent_inode = d_inode(parent);
+ inode_lock(parent_inode);
+
+ dentry = binderfs_create_dentry(parent, name);
+ if (IS_ERR(dentry))
+ goto out;
+
+ sb = parent_inode->i_sb;
+ new_inode = binderfs_make_inode(sb, S_IFREG | 0444);
+ if (!new_inode) {
+ dput(dentry);
+ dentry = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ new_inode->i_fop = fops;
+ new_inode->i_private = data;
+ d_instantiate(dentry, new_inode);
+ fsnotify_create(parent_inode, dentry);
+
+out:
+ inode_unlock(parent_inode);
+ return dentry;
+}
+
+static struct dentry *binderfs_create_dir(struct dentry *parent,
+ const char *name)
+{
+ struct dentry *dentry;
+ struct inode *new_inode, *parent_inode;
+ struct super_block *sb;
+
+ parent_inode = d_inode(parent);
+ inode_lock(parent_inode);
+
+ dentry = binderfs_create_dentry(parent, name);
+ if (IS_ERR(dentry))
+ goto out;
+
+ sb = parent_inode->i_sb;
+ new_inode = binderfs_make_inode(sb, S_IFDIR | 0755);
+ if (!new_inode) {
+ dput(dentry);
+ dentry = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ new_inode->i_fop = &simple_dir_operations;
+ new_inode->i_op = &simple_dir_inode_operations;
+
+ set_nlink(new_inode, 2);
+ d_instantiate(dentry, new_inode);
+ inc_nlink(parent_inode);
+ fsnotify_mkdir(parent_inode, dentry);
+
+out:
+ inode_unlock(parent_inode);
+ return dentry;
+}
+
+static int init_binder_logs(struct super_block *sb)
+{
+ struct dentry *binder_logs_root_dir, *dentry, *proc_log_dir;
+ struct binderfs_info *info;
+ int ret = 0;
+
+ binder_logs_root_dir = binderfs_create_dir(sb->s_root,
+ "binder_logs");
+ if (IS_ERR(binder_logs_root_dir)) {
+ ret = PTR_ERR(binder_logs_root_dir);
+ goto out;
+ }
+
+ dentry = binderfs_create_file(binder_logs_root_dir, "stats",
+ &binder_stats_fops, NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ dentry = binderfs_create_file(binder_logs_root_dir, "state",
+ &binder_state_fops, NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ dentry = binderfs_create_file(binder_logs_root_dir, "transactions",
+ &binder_transactions_fops, NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ dentry = binderfs_create_file(binder_logs_root_dir,
+ "transaction_log",
+ &binder_transaction_log_fops,
+ &binder_transaction_log);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ dentry = binderfs_create_file(binder_logs_root_dir,
+ "failed_transaction_log",
+ &binder_transaction_log_fops,
+ &binder_transaction_log_failed);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ proc_log_dir = binderfs_create_dir(binder_logs_root_dir, "proc");
+ if (IS_ERR(proc_log_dir)) {
+ ret = PTR_ERR(proc_log_dir);
+ goto out;
+ }
+ info = sb->s_fs_info;
+ info->proc_log_dir = proc_log_dir;
+
+out:
+ return ret;
+}
+
+static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
+{
+ int ret;
+ struct binderfs_info *info;
+ struct inode *inode = NULL;
+ struct binderfs_device device_info = { 0 };
+ const char *name;
+ size_t len;
+
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
+
+ /*
+ * The binderfs filesystem can be mounted by userns root in a
+ * non-initial userns. By default such mounts have the SB_I_NODEV flag
+ * set in s_iflags to prevent security issues where userns root can
+ * just create random device nodes via mknod() since it owns the
+ * filesystem mount. But binderfs does not allow to create any files
+ * including devices nodes. The only way to create binder devices nodes
+ * is through the binder-control device which userns root is explicitly
+ * allowed to do. So removing the SB_I_NODEV flag from s_iflags is both
+ * necessary and safe.
+ */
+ sb->s_iflags &= ~SB_I_NODEV;
+ sb->s_iflags |= SB_I_NOEXEC;
+ sb->s_magic = BINDERFS_SUPER_MAGIC;
+ sb->s_op = &binderfs_super_ops;
+ sb->s_time_gran = 1;
+
+ sb->s_fs_info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL);
+ if (!sb->s_fs_info)
+ return -ENOMEM;
+ info = sb->s_fs_info;
+
+ info->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
+
+ ret = binderfs_parse_mount_opts(data, &info->mount_opts);
+ if (ret)
+ return ret;
+
+ info->root_gid = make_kgid(sb->s_user_ns, 0);
+ if (!gid_valid(info->root_gid))
+ info->root_gid = GLOBAL_ROOT_GID;
+ info->root_uid = make_kuid(sb->s_user_ns, 0);
+ if (!uid_valid(info->root_uid))
+ info->root_uid = GLOBAL_ROOT_UID;
+
+ inode = new_inode(sb);
+ if (!inode)
+ return -ENOMEM;
+
+ inode->i_ino = FIRST_INODE;
+ inode->i_fop = &simple_dir_operations;
+ inode->i_mode = S_IFDIR | 0755;
+ inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_op = &binderfs_dir_inode_operations;
+ set_nlink(inode, 2);
+
+ sb->s_root = d_make_root(inode);
+ if (!sb->s_root)
+ return -ENOMEM;
+
+ ret = binderfs_binder_ctl_create(sb);
+ if (ret)
+ return ret;
+
+ name = binder_devices_param;
+ for (len = strcspn(name, ","); len > 0; len = strcspn(name, ",")) {
+ strscpy(device_info.name, name, len + 1);
+ ret = binderfs_binder_device_create(inode, NULL, &device_info);
+ if (ret)
+ return ret;
+ name += len;
+ if (*name == ',')
+ name++;
+ }
+
+ if (info->mount_opts.stats_mode == STATS_GLOBAL)
+ return init_binder_logs(sb);
+
+ return 0;
+}
+
+static struct dentry *binderfs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data)
+{
+ return mount_nodev(fs_type, flags, data, binderfs_fill_super);
+}
+
+static void binderfs_kill_super(struct super_block *sb)
+{
+ struct binderfs_info *info = sb->s_fs_info;
+
+ kill_litter_super(sb);
+
+ if (info && info->ipc_ns)
+ put_ipc_ns(info->ipc_ns);
+
+ kfree(info);
+}
+
+static struct file_system_type binder_fs_type = {
+ .name = "binder",
+ .mount = binderfs_mount,
+ .kill_sb = binderfs_kill_super,
+ .fs_flags = FS_USERNS_MOUNT,
+};
+
+int __init init_binderfs(void)
+{
+ int ret;
+ const char *name;
+ size_t len;
+
+ /* Verify that the default binderfs device names are valid. */
+ name = binder_devices_param;
+ for (len = strcspn(name, ","); len > 0; len = strcspn(name, ",")) {
+ if (len > BINDERFS_MAX_NAME)
+ return -E2BIG;
+ name += len;
+ if (*name == ',')
+ name++;
+ }
+
+ /* Allocate new major number for binderfs. */
+ ret = alloc_chrdev_region(&binderfs_dev, 0, BINDERFS_MAX_MINOR,
+ "binder");
+ if (ret)
+ return ret;
+
+ ret = register_filesystem(&binder_fs_type);
+ if (ret) {
+ unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR);
+ return ret;
+ }
+
+ return ret;
+}
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index 5936d16..8beb81b 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <linux/string.h>
#include "ahci.h"
@@ -87,6 +88,7 @@ struct brcm_ahci_priv {
u32 port_mask;
u32 quirks;
enum brcm_ahci_version version;
+ struct reset_control *rcdev;
};
static const struct ata_port_info ahci_brcm_port_info = {
@@ -221,19 +223,12 @@ static void brcm_sata_phys_disable(struct brcm_ahci_priv *priv)
brcm_sata_phy_disable(priv, i);
}
-static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
+static u32 brcm_ahci_get_portmask(struct ahci_host_priv *hpriv,
struct brcm_ahci_priv *priv)
{
- void __iomem *ahci;
- struct resource *res;
u32 impl;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ahci");
- ahci = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(ahci))
- return 0;
-
- impl = readl(ahci + HOST_PORTS_IMPL);
+ impl = readl(hpriv->mmio + HOST_PORTS_IMPL);
if (fls(impl) > SATA_TOP_MAX_PHYS)
dev_warn(priv->dev, "warning: more ports than PHYs (%#x)\n",
@@ -241,9 +236,6 @@ static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
else if (!impl)
dev_info(priv->dev, "no ports found\n");
- devm_iounmap(&pdev->dev, ahci);
- devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
-
return impl;
}
@@ -270,11 +262,10 @@ static int brcm_ahci_suspend(struct device *dev)
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct brcm_ahci_priv *priv = hpriv->plat_data;
- int ret;
- ret = ahci_platform_suspend(dev);
brcm_sata_phys_disable(priv);
- return ret;
+
+ return ahci_platform_suspend(dev);
}
static int brcm_ahci_resume(struct device *dev)
@@ -282,11 +273,44 @@ static int brcm_ahci_resume(struct device *dev)
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct brcm_ahci_priv *priv = hpriv->plat_data;
+ int ret;
+
+ /* Make sure clocks are turned on before re-configuration */
+ ret = ahci_platform_enable_clks(hpriv);
+ if (ret)
+ return ret;
brcm_sata_init(priv);
brcm_sata_phys_enable(priv);
brcm_sata_alpm_init(hpriv);
- return ahci_platform_resume(dev);
+
+ /* Since we had to enable clocks earlier on, we cannot use
+ * ahci_platform_resume() as-is since a second call to
+ * ahci_platform_enable_resources() would bump up the resources
+ * (regulators, clocks, PHYs) count artificially so we copy the part
+ * after ahci_platform_enable_resources().
+ */
+ ret = ahci_platform_enable_phys(hpriv);
+ if (ret)
+ goto out_disable_phys;
+
+ ret = ahci_platform_resume_host(dev);
+ if (ret)
+ goto out_disable_platform_phys;
+
+ /* We resumed so update PM runtime state */
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+
+out_disable_platform_phys:
+ ahci_platform_disable_phys(hpriv);
+out_disable_phys:
+ brcm_sata_phys_disable(priv);
+ ahci_platform_disable_clks(hpriv);
+ return ret;
}
#endif
@@ -327,44 +351,74 @@ static int brcm_ahci_probe(struct platform_device *pdev)
if (IS_ERR(priv->top_ctrl))
return PTR_ERR(priv->top_ctrl);
+ /* Reset is optional depending on platform */
+ priv->rcdev = devm_reset_control_get(&pdev->dev, "ahci");
+ if (!IS_ERR_OR_NULL(priv->rcdev))
+ reset_control_deassert(priv->rcdev);
+
if ((priv->version == BRCM_SATA_BCM7425) ||
(priv->version == BRCM_SATA_NSP)) {
priv->quirks |= BRCM_AHCI_QUIRK_NO_NCQ;
priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE;
}
+ hpriv = ahci_platform_get_resources(pdev);
+ if (IS_ERR(hpriv)) {
+ ret = PTR_ERR(hpriv);
+ goto out_reset;
+ }
+
+ ret = ahci_platform_enable_clks(hpriv);
+ if (ret)
+ goto out_reset;
+
+ /* Must be first so as to configure endianness including that
+ * of the standard AHCI register space.
+ */
brcm_sata_init(priv);
- priv->port_mask = brcm_ahci_get_portmask(pdev, priv);
- if (!priv->port_mask)
- return -ENODEV;
+ /* Initializes priv->port_mask which is used below */
+ priv->port_mask = brcm_ahci_get_portmask(hpriv, priv);
+ if (!priv->port_mask) {
+ ret = -ENODEV;
+ goto out_disable_clks;
+ }
+ /* Must be done before ahci_platform_enable_phys() */
brcm_sata_phys_enable(priv);
- hpriv = ahci_platform_get_resources(pdev);
- if (IS_ERR(hpriv))
- return PTR_ERR(hpriv);
hpriv->plat_data = priv;
hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP;
brcm_sata_alpm_init(hpriv);
- ret = ahci_platform_enable_resources(hpriv);
- if (ret)
- return ret;
-
if (priv->quirks & BRCM_AHCI_QUIRK_NO_NCQ)
hpriv->flags |= AHCI_HFLAG_NO_NCQ;
hpriv->flags |= AHCI_HFLAG_NO_WRITE_TO_RO;
+ ret = ahci_platform_enable_phys(hpriv);
+ if (ret)
+ goto out_disable_phys;
+
ret = ahci_platform_init_host(pdev, hpriv, &ahci_brcm_port_info,
&ahci_platform_sht);
if (ret)
- return ret;
+ goto out_disable_platform_phys;
dev_info(dev, "Broadcom AHCI SATA3 registered\n");
return 0;
+
+out_disable_platform_phys:
+ ahci_platform_disable_phys(hpriv);
+out_disable_phys:
+ brcm_sata_phys_disable(priv);
+out_disable_clks:
+ ahci_platform_disable_clks(hpriv);
+out_reset:
+ if (!IS_ERR_OR_NULL(priv->rcdev))
+ reset_control_assert(priv->rcdev);
+ return ret;
}
static int brcm_ahci_remove(struct platform_device *pdev)
@@ -374,12 +428,12 @@ static int brcm_ahci_remove(struct platform_device *pdev)
struct brcm_ahci_priv *priv = hpriv->plat_data;
int ret;
+ brcm_sata_phys_disable(priv);
+
ret = ata_platform_remove_one(pdev);
if (ret)
return ret;
- brcm_sata_phys_disable(priv);
-
return 0;
}
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 70cdbf1..5929672 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -46,7 +46,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_ops);
* RETURNS:
* 0 on success otherwise a negative error code
*/
-static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
+int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
{
int rc, i;
@@ -71,6 +71,7 @@ static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
}
return rc;
}
+EXPORT_SYMBOL_GPL(ahci_platform_enable_phys);
/**
* ahci_platform_disable_phys - Disable PHYs
@@ -78,7 +79,7 @@ static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
*
* This function disables all PHYs found in hpriv->phys.
*/
-static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
+void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
{
int i;
@@ -87,6 +88,7 @@ static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
phy_exit(hpriv->phys[i]);
}
}
+EXPORT_SYMBOL_GPL(ahci_platform_disable_phys);
/**
* ahci_platform_enable_clks - Enable platform clocks
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index cbb162b..08f67c1 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -6676,6 +6676,9 @@ void ata_host_detach(struct ata_host *host)
{
int i;
+ /* Ensure ata_port probe has completed */
+ async_synchronize_full();
+
for (i = 0; i < host->n_ports; i++)
ata_port_detach(host->ports[i]);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index ec61dd8..453e372 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -414,18 +414,20 @@ static int lo_read_transfer(struct loop_device *lo, struct request *rq,
return ret;
}
-static int lo_discard(struct loop_device *lo, struct request *rq, loff_t pos)
+static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
+ int mode)
{
/*
- * We use punch hole to reclaim the free space used by the
- * image a.k.a. discard. However we do not support discard if
- * encryption is enabled, because it may give an attacker
- * useful information.
+ * We use fallocate to manipulate the space mappings used by the image
+ * a.k.a. discard/zerorange. However we do not support this if
+ * encryption is enabled, because it may give an attacker useful
+ * information.
*/
struct file *file = lo->lo_backing_file;
- int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
int ret;
+ mode |= FALLOC_FL_KEEP_SIZE;
+
if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) {
ret = -EOPNOTSUPP;
goto out;
@@ -565,9 +567,17 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
switch (req_op(rq)) {
case REQ_OP_FLUSH:
return lo_req_flush(lo, rq);
- case REQ_OP_DISCARD:
case REQ_OP_WRITE_ZEROES:
- return lo_discard(lo, rq, pos);
+ /*
+ * If the caller doesn't want deallocation, call zeroout to
+ * write zeroes the range. Otherwise, punch them out.
+ */
+ return lo_fallocate(lo, rq, pos,
+ (rq->cmd_flags & REQ_NOUNMAP) ?
+ FALLOC_FL_ZERO_RANGE :
+ FALLOC_FL_PUNCH_HOLE);
+ case REQ_OP_DISCARD:
+ return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
case REQ_OP_WRITE:
if (lo->transfer)
return lo_write_transfer(lo, rq, pos);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 929bd25..4c661ad 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1234,10 +1234,10 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
mutex_unlock(&nbd->config_lock);
ret = wait_event_interruptible(config->recv_wq,
atomic_read(&config->recv_threads) == 0);
- if (ret) {
+ if (ret)
sock_shutdown(nbd);
- flush_workqueue(nbd->recv_workq);
- }
+ flush_workqueue(nbd->recv_workq);
+
mutex_lock(&nbd->config_lock);
bd_set_size(bdev, 0);
/* user requested, ignore socket errors */
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 987d665..c1d1b94 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -929,6 +929,8 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring,
out_of_memory:
pr_alert("%s: out of memory\n", __func__);
put_free_pages(ring, pages_to_gnt, segs_to_map);
+ for (i = last_map; i < num; i++)
+ pages[i]->handle = BLKBACK_INVALID_HANDLE;
return -ENOMEM;
}
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index ed4e807..e9fa4a1 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -178,6 +178,15 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
blkif->domid = domid;
atomic_set(&blkif->refcnt, 1);
init_completion(&blkif->drain_complete);
+
+ /*
+ * Because freeing back to the cache may be deferred, it is not
+ * safe to unload the module (and hence destroy the cache) until
+ * this has completed. To prevent premature unloading, take an
+ * extra module reference here and release only when the object
+ * has been freed back to the cache.
+ */
+ __module_get(THIS_MODULE);
INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);
return blkif;
@@ -327,6 +336,7 @@ static void xen_blkif_free(struct xen_blkif *blkif)
/* Make sure everything is drained before shutting down */
kmem_cache_free(xen_blkif_cachep, blkif);
+ module_put(THIS_MODULE);
}
int __init xen_blkif_interface_init(void)
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 73561bf..424f399 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -1123,7 +1123,7 @@ static int btusb_open(struct hci_dev *hdev)
if (data->setup_on_usb) {
err = data->setup_on_usb(hdev);
if (err < 0)
- return err;
+ goto setup_fail;
}
data->intf->needs_remote_wakeup = 1;
@@ -1155,6 +1155,7 @@ static int btusb_open(struct hci_dev *hdev)
failed:
clear_bit(BTUSB_INTR_RUNNING, &data->flags);
+setup_fail:
usb_autopm_put_interface(data->intf);
return err;
}
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 90dd8e7..1c90da4 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -995,6 +995,12 @@ static void cdrom_count_tracks(struct cdrom_device_info *cdi, tracktype *tracks)
tracks->xa = 0;
tracks->error = 0;
cd_dbg(CD_COUNT_TRACKS, "entering cdrom_count_tracks\n");
+
+ if (!CDROM_CAN(CDC_PLAY_AUDIO)) {
+ tracks->error = CDS_NO_INFO;
+ return;
+ }
+
/* Grab the TOC header so we can see how many tracks there are */
ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header);
if (ret) {
@@ -1161,7 +1167,8 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
ret = open_for_data(cdi);
if (ret)
goto err;
- cdrom_mmc3_profile(cdi);
+ if (CDROM_CAN(CDC_GENERIC_PACKET))
+ cdrom_mmc3_profile(cdi);
if (mode & FMODE_WRITE) {
ret = -EROFS;
if (cdrom_open_write(cdi))
@@ -2878,6 +2885,9 @@ int cdrom_get_last_written(struct cdrom_device_info *cdi, long *last_written)
it doesn't give enough information or fails. then we return
the toc contents. */
use_toc:
+ if (!CDROM_CAN(CDC_PLAY_AUDIO))
+ return -ENOSYS;
+
toc.cdte_format = CDROM_MSF;
toc.cdte_track = CDROM_LEADOUT;
if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &toc)))
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
index 38b7190..648e39c 100644
--- a/drivers/char/hw_random/omap3-rom-rng.c
+++ b/drivers/char/hw_random/omap3-rom-rng.c
@@ -121,7 +121,8 @@ static int omap3_rom_rng_remove(struct platform_device *pdev)
{
cancel_delayed_work_sync(&idle_work);
hwrng_unregister(&omap3_rom_rng_ops);
- clk_disable_unprepare(rng_clk);
+ if (!rng_idle)
+ clk_disable_unprepare(rng_clk);
return 0;
}
diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c
index 25a3019..b67ea86 100644
--- a/drivers/clk/pxa/clk-pxa27x.c
+++ b/drivers/clk/pxa/clk-pxa27x.c
@@ -462,6 +462,7 @@ struct dummy_clk {
};
static struct dummy_clk dummy_clks[] __initdata = {
DUMMY_CLK(NULL, "pxa27x-gpio", "osc_32_768khz"),
+ DUMMY_CLK(NULL, "pxa-rtc", "osc_32_768khz"),
DUMMY_CLK(NULL, "sa1100-rtc", "osc_32_768khz"),
DUMMY_CLK("UARTCLK", "pxa2xx-ir", "STUART"),
};
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 8a06a61..171a090 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -351,6 +351,8 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
p = clk_hw_get_parent_by_index(hw, index);
if (clk_flags & CLK_SET_RATE_PARENT) {
if (f->pre_div) {
+ if (!rate)
+ rate = req->rate;
rate /= 2;
rate *= f->pre_div + 1;
}
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index f82a70d..26595b2 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -39,6 +39,9 @@ struct freq_tbl *qcom_find_freq(const struct freq_tbl *f, unsigned long rate)
if (!f)
return NULL;
+ if (!f->freq)
+ return f;
+
for (; f->freq; f++)
if (rate <= f->freq)
return f;
diff --git a/drivers/clocksource/asm9260_timer.c b/drivers/clocksource/asm9260_timer.c
index 38cd2fe..0ce7607 100644
--- a/drivers/clocksource/asm9260_timer.c
+++ b/drivers/clocksource/asm9260_timer.c
@@ -198,6 +198,10 @@ static int __init asm9260_timer_init(struct device_node *np)
}
clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get clk!\n");
+ return PTR_ERR(clk);
+ }
ret = clk_prepare_enable(clk);
if (ret) {
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index a33e1b3..78e5c3e 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2507,6 +2507,13 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
if (cpufreq_disabled())
return -ENODEV;
+ /*
+ * The cpufreq core depends heavily on the availability of device
+ * structure, make sure they are available before proceeding further.
+ */
+ if (!get_cpu_device(0))
+ return -EPROBE_DEFER;
+
if (!driver_data || !driver_data->verify || !driver_data->init ||
!(driver_data->setpolicy || driver_data->target_index ||
driver_data->target) ||
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
index 5cf64746..22e4918 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
@@ -81,7 +81,8 @@ static int sun4i_ss_opti_poll(struct skcipher_request *areq)
oi = 0;
oo = 0;
do {
- todo = min3(rx_cnt, ileft, (mi.length - oi) / 4);
+ todo = min(rx_cnt, ileft);
+ todo = min_t(size_t, todo, (mi.length - oi) / 4);
if (todo) {
ileft -= todo;
writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
@@ -96,7 +97,8 @@ static int sun4i_ss_opti_poll(struct skcipher_request *areq)
rx_cnt = SS_RXFIFO_SPACES(spaces);
tx_cnt = SS_TXFIFO_SPACES(spaces);
- todo = min3(tx_cnt, oleft, (mo.length - oo) / 4);
+ todo = min(tx_cnt, oleft);
+ todo = min_t(size_t, todo, (mo.length - oo) / 4);
if (todo) {
oleft -= todo;
readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
@@ -220,7 +222,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
* todo is the number of consecutive 4byte word that we
* can read from current SG
*/
- todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4);
+ todo = min(rx_cnt, ileft / 4);
+ todo = min_t(size_t, todo, (mi.length - oi) / 4);
if (todo && !ob) {
writesl(ss->base + SS_RXFIFO, mi.addr + oi,
todo);
@@ -234,8 +237,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
* we need to be able to write all buf in one
* pass, so it is why we min() with rx_cnt
*/
- todo = min3(rx_cnt * 4 - ob, ileft,
- mi.length - oi);
+ todo = min(rx_cnt * 4 - ob, ileft);
+ todo = min_t(size_t, todo, mi.length - oi);
memcpy(buf + ob, mi.addr + oi, todo);
ileft -= todo;
oi += todo;
@@ -255,7 +258,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
spaces = readl(ss->base + SS_FCSR);
rx_cnt = SS_RXFIFO_SPACES(spaces);
tx_cnt = SS_TXFIFO_SPACES(spaces);
- dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n",
+ dev_dbg(ss->dev,
+ "%x %u/%zu %u/%u cnt=%u %u/%zu %u/%u cnt=%u %u\n",
mode,
oi, mi.length, ileft, areq->cryptlen, rx_cnt,
oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob);
@@ -263,7 +267,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
if (!tx_cnt)
continue;
/* todo in 4bytes word */
- todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4);
+ todo = min(tx_cnt, oleft / 4);
+ todo = min_t(size_t, todo, (mo.length - oo) / 4);
if (todo) {
readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
oleft -= todo * 4;
@@ -287,7 +292,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
* no more than remaining buffer
* no need to test against oleft
*/
- todo = min(mo.length - oo, obl - obo);
+ todo = min_t(size_t,
+ mo.length - oo, obl - obo);
memcpy(mo.addr + oo, bufo + obo, todo);
oleft -= todo;
obo += todo;
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
index f6936bb..1a72426 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
@@ -276,8 +276,8 @@ static int sun4i_hash(struct ahash_request *areq)
*/
while (op->len < 64 && i < end) {
/* how many bytes we can read from current SG */
- in_r = min3(mi.length - in_i, end - i,
- 64 - op->len);
+ in_r = min(end - i, 64 - op->len);
+ in_r = min_t(size_t, mi.length - in_i, in_r);
memcpy(op->buf + op->len, mi.addr + in_i, in_r);
op->len += in_r;
i += in_r;
@@ -297,8 +297,8 @@ static int sun4i_hash(struct ahash_request *areq)
}
if (mi.length - in_i > 3 && i < end) {
/* how many bytes we can read from current SG */
- in_r = min3(mi.length - in_i, areq->nbytes - i,
- ((mi.length - in_i) / 4) * 4);
+ in_r = min_t(size_t, mi.length - in_i, areq->nbytes - i);
+ in_r = min_t(size_t, ((mi.length - in_i) / 4) * 4, in_r);
/* how many bytes we can write in the device*/
todo = min3((u32)(end - i) / 4, rx_cnt, (u32)in_r / 4);
writesl(ss->base + SS_RXFIFO, mi.addr + in_i, todo);
@@ -324,8 +324,8 @@ static int sun4i_hash(struct ahash_request *areq)
if ((areq->nbytes - i) < 64) {
while (i < areq->nbytes && in_i < mi.length && op->len < 64) {
/* how many bytes we can read from current SG */
- in_r = min3(mi.length - in_i, areq->nbytes - i,
- 64 - op->len);
+ in_r = min(areq->nbytes - i, 64 - op->len);
+ in_r = min_t(size_t, mi.length - in_i, in_r);
memcpy(op->buf + op->len, mi.addr + in_i, in_r);
op->len += in_r;
i += in_r;
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index 5035b0d..e2231a1 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -110,8 +110,6 @@ virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
*alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
break;
default:
- pr_err("virtio_crypto: Unsupported key length: %d\n",
- key_len);
return -EINVAL;
}
return 0;
@@ -485,6 +483,11 @@ static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
/* Use the first data virtqueue as default */
struct data_queue *data_vq = &vcrypto->data_vq[0];
+ if (!req->nbytes)
+ return 0;
+ if (req->nbytes % AES_BLOCK_SIZE)
+ return -EINVAL;
+
vc_req->dataq = data_vq;
vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
vc_sym_req->ablkcipher_ctx = ctx;
@@ -505,6 +508,11 @@ static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
/* Use the first data virtqueue as default */
struct data_queue *data_vq = &vcrypto->data_vq[0];
+ if (!req->nbytes)
+ return 0;
+ if (req->nbytes % AES_BLOCK_SIZE)
+ return -EINVAL;
+
vc_req->dataq = data_vq;
vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
vc_sym_req->ablkcipher_ctx = ctx;
diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile
index cab32cf..709670d 100644
--- a/drivers/crypto/vmx/Makefile
+++ b/drivers/crypto/vmx/Makefile
@@ -3,13 +3,13 @@
vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o
ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
-TARGET := linux-ppc64le
+override flavour := linux-ppc64le
else
-TARGET := linux-ppc64
+override flavour := linux-ppc64
endif
quiet_cmd_perl = PERL $@
- cmd_perl = $(PERL) $(<) $(TARGET) > $(@)
+ cmd_perl = $(PERL) $(<) $(flavour) > $(@)
targets += aesp8-ppc.S ghashp8-ppc.S
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index d659dba..0d6256d 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -507,11 +507,6 @@ static void devfreq_dev_release(struct device *dev)
struct devfreq *devfreq = to_devfreq(dev);
mutex_lock(&devfreq_list_lock);
- if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
- mutex_unlock(&devfreq_list_lock);
- dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n");
- return;
- }
list_del(&devfreq->node);
mutex_unlock(&devfreq_list_lock);
@@ -571,6 +566,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
devfreq->dev.parent = dev;
devfreq->dev.class = devfreq_class;
devfreq->dev.release = devfreq_dev_release;
+ INIT_LIST_HEAD(&devfreq->node);
devfreq->profile = profile;
strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
devfreq->previous_freq = profile->initial_freq;
@@ -1037,7 +1033,7 @@ static ssize_t available_governors_show(struct device *d,
* The devfreq with immutable governor (e.g., passive) shows
* only own governor.
*/
- if (df->governor->immutable) {
+ if (df->governor && df->governor->immutable) {
count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
"%s ", df->governor_name);
/*
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index 6f80eb6..acae392 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -187,6 +187,7 @@ void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
/* Cleans the error report buffer */
memset(e, 0, sizeof (*e));
e->error_count = 1;
+ e->grain = 1;
strcpy(e->label, "unknown label");
e->msg = pvt->msg;
e->other_detail = pvt->other_detail;
@@ -282,7 +283,7 @@ void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
/* Error grain */
if (mem_err->validation_bits & CPER_MEM_VALID_PA_MASK)
- e->grain = ~(mem_err->physical_addr_mask & ~PAGE_MASK);
+ e->grain = ~mem_err->physical_addr_mask + 1;
/* Memory error location, mapped on e->location */
p = e->location;
@@ -389,8 +390,13 @@ void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
if (p > pvt->other_detail)
*(p - 1) = '\0';
+ /* Sanity-check driver-supplied grain value. */
+ if (WARN_ON_ONCE(!e->grain))
+ e->grain = 1;
+
+ grain_bits = fls_long(e->grain - 1);
+
/* Generate the trace event */
- grain_bits = fls_long(e->grain);
snprintf(pvt->detail_location, sizeof(pvt->detail_location),
"APEI location: %s %s", e->location, e->other_detail);
trace_mc_event(type, e->msg, e->label, e->error_count,
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index 106ef029..1a1ee3d 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -69,6 +69,10 @@ struct sm5502_muic_info {
/* Default value of SM5502 register to bring up MUIC device. */
static struct reg_data sm5502_reg_data[] = {
{
+ .reg = SM5502_REG_RESET,
+ .val = SM5502_REG_RESET_MASK,
+ .invert = true,
+ }, {
.reg = SM5502_REG_CONTROL,
.val = SM5502_REG_CONTROL_MASK_INT_MASK,
.invert = false,
diff --git a/drivers/extcon/extcon-sm5502.h b/drivers/extcon/extcon-sm5502.h
index 974b532..12f8b01 100644
--- a/drivers/extcon/extcon-sm5502.h
+++ b/drivers/extcon/extcon-sm5502.h
@@ -241,6 +241,8 @@ enum sm5502_reg {
#define DM_DP_SWITCH_UART ((DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
| (DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
+#define SM5502_REG_RESET_MASK (0x1)
+
/* SM5502 Interrupts */
enum sm5502_irq {
/* INT1 */
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 242359c..215f4f7 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -249,7 +249,11 @@ static int fwnet_header_cache(const struct neighbour *neigh,
h = (struct fwnet_header *)((u8 *)hh->hh_data + HH_DATA_OFF(sizeof(*h)));
h->h_proto = type;
memcpy(h->h_dest, neigh->ha, net->addr_len);
- hh->hh_len = FWNET_HLEN;
+
+ /* Pairs with the READ_ONCE() in neigh_resolve_output(),
+ * neigh_hh_output() and neigh_update_hhs().
+ */
+ smp_store_release(&hh->hh_len, FWNET_HLEN);
return 0;
}
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
index 24c461d..fd8053f 100644
--- a/drivers/firmware/efi/libstub/gop.c
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -86,30 +86,6 @@ setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
}
static efi_status_t
-__gop_query32(efi_system_table_t *sys_table_arg,
- struct efi_graphics_output_protocol_32 *gop32,
- struct efi_graphics_output_mode_info **info,
- unsigned long *size, u64 *fb_base)
-{
- struct efi_graphics_output_protocol_mode_32 *mode;
- efi_graphics_output_protocol_query_mode query_mode;
- efi_status_t status;
- unsigned long m;
-
- m = gop32->mode;
- mode = (struct efi_graphics_output_protocol_mode_32 *)m;
- query_mode = (void *)(unsigned long)gop32->query_mode;
-
- status = __efi_call_early(query_mode, (void *)gop32, mode->mode, size,
- info);
- if (status != EFI_SUCCESS)
- return status;
-
- *fb_base = mode->frame_buffer_base;
- return status;
-}
-
-static efi_status_t
setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
efi_guid_t *proto, unsigned long size, void **gop_handle)
{
@@ -121,7 +97,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
u64 fb_base;
struct efi_pixel_bitmask pixel_info;
int pixel_format;
- efi_status_t status = EFI_NOT_FOUND;
+ efi_status_t status;
u32 *handles = (u32 *)(unsigned long)gop_handle;
int i;
@@ -130,6 +106,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
nr_gops = size / sizeof(u32);
for (i = 0; i < nr_gops; i++) {
+ struct efi_graphics_output_protocol_mode_32 *mode;
struct efi_graphics_output_mode_info *info = NULL;
efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
bool conout_found = false;
@@ -147,9 +124,11 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
if (status == EFI_SUCCESS)
conout_found = true;
- status = __gop_query32(sys_table_arg, gop32, &info, &size,
- ¤t_fb_base);
- if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
+ mode = (void *)(unsigned long)gop32->mode;
+ info = (void *)(unsigned long)mode->info;
+ current_fb_base = mode->frame_buffer_base;
+
+ if ((!first_gop || conout_found) &&
info->pixel_format != PIXEL_BLT_ONLY) {
/*
* Systems that use the UEFI Console Splitter may
@@ -177,7 +156,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
/* Did we find any GOPs? */
if (!first_gop)
- goto out;
+ return EFI_NOT_FOUND;
/* EFI framebuffer */
si->orig_video_isVGA = VIDEO_TYPE_EFI;
@@ -199,32 +178,8 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
si->lfb_size = si->lfb_linelength * si->lfb_height;
si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
-out:
- return status;
-}
-static efi_status_t
-__gop_query64(efi_system_table_t *sys_table_arg,
- struct efi_graphics_output_protocol_64 *gop64,
- struct efi_graphics_output_mode_info **info,
- unsigned long *size, u64 *fb_base)
-{
- struct efi_graphics_output_protocol_mode_64 *mode;
- efi_graphics_output_protocol_query_mode query_mode;
- efi_status_t status;
- unsigned long m;
-
- m = gop64->mode;
- mode = (struct efi_graphics_output_protocol_mode_64 *)m;
- query_mode = (void *)(unsigned long)gop64->query_mode;
-
- status = __efi_call_early(query_mode, (void *)gop64, mode->mode, size,
- info);
- if (status != EFI_SUCCESS)
- return status;
-
- *fb_base = mode->frame_buffer_base;
- return status;
+ return EFI_SUCCESS;
}
static efi_status_t
@@ -239,7 +194,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
u64 fb_base;
struct efi_pixel_bitmask pixel_info;
int pixel_format;
- efi_status_t status = EFI_NOT_FOUND;
+ efi_status_t status;
u64 *handles = (u64 *)(unsigned long)gop_handle;
int i;
@@ -248,6 +203,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
nr_gops = size / sizeof(u64);
for (i = 0; i < nr_gops; i++) {
+ struct efi_graphics_output_protocol_mode_64 *mode;
struct efi_graphics_output_mode_info *info = NULL;
efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
bool conout_found = false;
@@ -265,9 +221,11 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
if (status == EFI_SUCCESS)
conout_found = true;
- status = __gop_query64(sys_table_arg, gop64, &info, &size,
- ¤t_fb_base);
- if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
+ mode = (void *)(unsigned long)gop64->mode;
+ info = (void *)(unsigned long)mode->info;
+ current_fb_base = mode->frame_buffer_base;
+
+ if ((!first_gop || conout_found) &&
info->pixel_format != PIXEL_BLT_ONLY) {
/*
* Systems that use the UEFI Console Splitter may
@@ -295,7 +253,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
/* Did we find any GOPs? */
if (!first_gop)
- goto out;
+ return EFI_NOT_FOUND;
/* EFI framebuffer */
si->orig_video_isVGA = VIDEO_TYPE_EFI;
@@ -317,8 +275,8 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
si->lfb_size = si->lfb_linelength * si->lfb_height;
si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
-out:
- return status;
+
+ return EFI_SUCCESS;
}
/*
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index 4ea63d9..8feca59 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -419,6 +419,31 @@ static int fsi_slave_scan(struct fsi_slave *slave)
return 0;
}
+static unsigned long aligned_access_size(size_t offset, size_t count)
+{
+ unsigned long offset_unit, count_unit;
+
+ /* Criteria:
+ *
+ * 1. Access size must be less than or equal to the maximum access
+ * width or the highest power-of-two factor of offset
+ * 2. Access size must be less than or equal to the amount specified by
+ * count
+ *
+ * The access width is optimal if we can calculate 1 to be strictly
+ * equal while still satisfying 2.
+ */
+
+ /* Find 1 by the bottom bit of offset (with a 4 byte access cap) */
+ offset_unit = BIT(__builtin_ctzl(offset | 4));
+
+ /* Find 2 by the top bit of count */
+ count_unit = BIT(8 * sizeof(unsigned long) - 1 - __builtin_clzl(count));
+
+ /* Constrain the maximum access width to the minimum of both criteria */
+ return BIT(__builtin_ctzl(offset_unit | count_unit));
+}
+
static ssize_t fsi_slave_sysfs_raw_read(struct file *file,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
@@ -434,8 +459,7 @@ static ssize_t fsi_slave_sysfs_raw_read(struct file *file,
return -EINVAL;
for (total_len = 0; total_len < count; total_len += read_len) {
- read_len = min_t(size_t, count, 4);
- read_len -= off & 0x3;
+ read_len = aligned_access_size(off, count - total_len);
rc = fsi_slave_read(slave, off, buf + total_len, read_len);
if (rc)
@@ -462,8 +486,7 @@ static ssize_t fsi_slave_sysfs_raw_write(struct file *file,
return -EINVAL;
for (total_len = 0; total_len < count; total_len += write_len) {
- write_len = min_t(size_t, count, 4);
- write_len -= off & 0x3;
+ write_len = aligned_access_size(off, count - total_len);
rc = fsi_slave_write(slave, off, buf + total_len, write_len);
if (rc)
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 8c93dec..e7783b8 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -337,7 +337,8 @@ static int mpc8xxx_probe(struct platform_device *pdev)
* It's assumed that only a single type of gpio controller is available
* on the current machine, so overwriting global data is fine.
*/
- mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type;
+ if (devtype->irq_set_type)
+ mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type;
if (devtype->gpio_dir_out)
gc->direction_output = devtype->gpio_dir_out;
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 3aa7fe6..c7b9125 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -24,11 +24,19 @@
#include "gpiolib.h"
+#define QUIRK_NO_EDGE_EVENTS_ON_BOOT 0x01l
+#define QUIRK_NO_WAKEUP 0x02l
+
static int run_edge_events_on_boot = -1;
module_param(run_edge_events_on_boot, int, 0444);
MODULE_PARM_DESC(run_edge_events_on_boot,
"Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
+static int honor_wakeup = -1;
+module_param(honor_wakeup, int, 0444);
+MODULE_PARM_DESC(honor_wakeup,
+ "Honor the ACPI wake-capable flag: 0=no, 1=yes, -1=auto");
+
/**
* struct acpi_gpio_event - ACPI GPIO event handler data
*
@@ -339,7 +347,7 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
event->handle = evt_handle;
event->handler = handler;
event->irq = irq;
- event->irq_is_wake = agpio->wake_capable == ACPI_WAKE_CAPABLE;
+ event->irq_is_wake = honor_wakeup && agpio->wake_capable == ACPI_WAKE_CAPABLE;
event->pin = pin;
event->desc = desc;
@@ -1312,7 +1320,7 @@ static int acpi_gpio_handle_deferred_request_irqs(void)
/* We must use _sync so that this runs after the first deferred_probe run */
late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
-static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
+static const struct dmi_system_id gpiolib_acpi_quirks[] = {
{
/*
* The Minix Neo Z83-4 has a micro-USB-B id-pin handler for
@@ -1322,7 +1330,8 @@ static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
- }
+ },
+ .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT,
},
{
/*
@@ -1334,20 +1343,52 @@ static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"),
DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"),
- }
+ },
+ .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT,
+ },
+ {
+ /*
+ * Various HP X2 10 Cherry Trail models use an external
+ * embedded-controller connected via I2C + an ACPI GPIO
+ * event handler. The embedded controller generates various
+ * spurious wakeup events when suspended. So disable wakeup
+ * for its handler (it uses the only ACPI GPIO event handler).
+ * This breaks wakeup when opening the lid, the user needs
+ * to press the power-button to wakeup the system. The
+ * alternative is suspend simply not working, which is worse.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"),
+ },
+ .driver_data = (void *)QUIRK_NO_WAKEUP,
},
{} /* Terminating entry */
};
static int acpi_gpio_setup_params(void)
{
+ const struct dmi_system_id *id;
+ long quirks = 0;
+
+ id = dmi_first_match(gpiolib_acpi_quirks);
+ if (id)
+ quirks = (long)id->driver_data;
+
if (run_edge_events_on_boot < 0) {
- if (dmi_check_system(run_edge_events_on_boot_blacklist))
+ if (quirks & QUIRK_NO_EDGE_EVENTS_ON_BOOT)
run_edge_events_on_boot = 0;
else
run_edge_events_on_boot = 1;
}
+ if (honor_wakeup < 0) {
+ if (quirks & QUIRK_NO_WAKEUP)
+ honor_wakeup = 0;
+ else
+ honor_wakeup = 1;
+ }
+
return 0;
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index a5dc1d7..f5a25fa 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -206,6 +206,14 @@ int gpiod_get_direction(struct gpio_desc *desc)
chip = gpiod_to_chip(desc);
offset = gpio_chip_hwgpio(desc);
+ /*
+ * Open drain emulation using input mode may incorrectly report
+ * input here, fix that up.
+ */
+ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags) &&
+ test_bit(FLAG_IS_OUT, &desc->flags))
+ return 0;
+
if (!chip->get_direction)
return status;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index ed8c373..b35b074 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -125,6 +125,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
}
dma_fence_put(fence);
+ fence = NULL;
r = amdgpu_bo_kmap(vram_obj, &vram_map);
if (r) {
@@ -170,6 +171,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
}
dma_fence_put(fence);
+ fence = NULL;
r = amdgpu_bo_kmap(gtt_obj[i], >t_map);
if (r) {
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c
index 9385eb0..cd2bfd7 100644
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c
@@ -725,7 +725,9 @@ static int anx78xx_init_pdata(struct anx78xx *anx78xx)
/* 1.0V digital core power regulator */
pdata->dvdd10 = devm_regulator_get(dev, "dvdd10");
if (IS_ERR(pdata->dvdd10)) {
- DRM_ERROR("DVDD10 regulator not found\n");
+ if (PTR_ERR(pdata->dvdd10) != -EPROBE_DEFER)
+ DRM_ERROR("DVDD10 regulator not found\n");
+
return PTR_ERR(pdata->dvdd10);
}
@@ -1344,7 +1346,9 @@ static int anx78xx_i2c_probe(struct i2c_client *client,
err = anx78xx_init_pdata(anx78xx);
if (err) {
- DRM_ERROR("Failed to initialize pdata: %d\n", err);
+ if (err != -EPROBE_DEFER)
+ DRM_ERROR("Failed to initialize pdata: %d\n", err);
+
return err;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 4db31b8..cc1094f 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -39,6 +39,7 @@
#include <media/cec-notifier.h>
+#define DDC_CI_ADDR 0x37
#define DDC_SEGMENT_ADDR 0x30
#define HDMI_EDID_LEN 512
@@ -320,6 +321,15 @@ static int dw_hdmi_i2c_xfer(struct i2c_adapter *adap,
u8 addr = msgs[0].addr;
int i, ret = 0;
+ if (addr == DDC_CI_ADDR)
+ /*
+ * The internal I2C controller does not support the multi-byte
+ * read and write operations needed for DDC/CI.
+ * TOFIX: Blacklist the DDC/CI address until we filter out
+ * unsupported I2C operations.
+ */
+ return -EOPNOTSUPP;
+
dev_dbg(hdmi->dev, "xfer: num: %d, addr: %#x\n", num, addr);
for (i = 0; i < num; i++) {
@@ -1733,7 +1743,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
/* HDMI Initialization Step E - Configure audio */
hdmi_clk_regenerator_update_pixel_clock(hdmi);
- hdmi_enable_audio_clk(hdmi, true);
+ hdmi_enable_audio_clk(hdmi, hdmi->audio_enable);
}
/* not for DVI mode */
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 0aefcb3..93cfb81 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -270,7 +270,7 @@ static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
idx += req->u.i2c_read.transactions[i].num_bytes;
- buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
+ buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
idx++;
}
@@ -1590,7 +1590,11 @@ static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
if (ret != 1)
DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
- txmsg->dst->tx_slots[txmsg->seqno] = NULL;
+ if (txmsg->seqno != -1) {
+ WARN_ON((unsigned int)txmsg->seqno >
+ ARRAY_SIZE(txmsg->dst->tx_slots));
+ txmsg->dst->tx_slots[txmsg->seqno] = NULL;
+ }
}
static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index f1259a0..eb6bf88 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1590,7 +1590,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
* Changes struct fb_var_screeninfo are currently not pushed back
* to KMS, hence fail if different settings are requested.
*/
- if (var->bits_per_pixel != fb->format->cpp[0] * 8 ||
+ if (var->bits_per_pixel > fb->format->cpp[0] * 8 ||
var->xres > fb->width || var->yres > fb->height ||
var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
DRM_DEBUG("fb requested width/height/bpp can't fit in current fb "
@@ -1616,6 +1616,11 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
}
/*
+ * Likewise, bits_per_pixel should be rounded up to a supported value.
+ */
+ var->bits_per_pixel = fb->format->cpp[0] * 8;
+
+ /*
* drm fbdev emulation doesn't support changing the pixel format at all,
* so reject all pixel format changing requests.
*/
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 0fff269..42785f3 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -139,6 +139,7 @@ static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
s32 freq_error, min_error = 100000;
memset(best_clock, 0, sizeof(*best_clock));
+ memset(&clock, 0, sizeof(clock));
for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
for (clock.n = limit->n.min; clock.n <= limit->n.max;
@@ -195,6 +196,7 @@ static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
int err = target;
memset(best_clock, 0, sizeof(*best_clock));
+ memset(&clock, 0, sizeof(clock));
for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index e0483c0..baff1f0 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1101,17 +1101,14 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
*batch++ = MI_NOOP;
- /* WaClearSlmSpaceAtContextSwitch:kbl */
- /* Actual scratch location is at 128 bytes offset */
- if (IS_KBL_REVID(engine->i915, 0, KBL_REVID_A0)) {
- batch = gen8_emit_pipe_control(batch,
- PIPE_CONTROL_FLUSH_L3 |
- PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE,
- i915_ggtt_offset(engine->scratch)
- + 2 * CACHELINE_BYTES);
- }
+ /* WaClearSlmSpaceAtContextSwitch:skl,bxt,kbl,glk,cfl */
+ batch = gen8_emit_pipe_control(batch,
+ PIPE_CONTROL_FLUSH_L3 |
+ PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE,
+ i915_ggtt_offset(engine->scratch) +
+ 2 * CACHELINE_BYTES);
/* WaMediaPoolStateCmdInWABB:bxt,glk */
if (HAS_POOLED_EU(engine->i915)) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index dc7454e..b46e99f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -29,6 +29,7 @@
#include <nvif/notify.h>
+#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_dp_helper.h>
@@ -37,6 +38,60 @@
struct nvkm_i2c_port;
+#define nouveau_conn_atom(p) \
+ container_of((p), struct nouveau_conn_atom, state)
+
+struct nouveau_conn_atom {
+ struct drm_connector_state state;
+
+ struct {
+ /* The enum values specifically defined here match nv50/gf119
+ * hw values, and the code relies on this.
+ */
+ enum {
+ DITHERING_MODE_OFF = 0x00,
+ DITHERING_MODE_ON = 0x01,
+ DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
+ DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
+ DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
+ DITHERING_MODE_AUTO
+ } mode;
+ enum {
+ DITHERING_DEPTH_6BPC = 0x00,
+ DITHERING_DEPTH_8BPC = 0x02,
+ DITHERING_DEPTH_AUTO
+ } depth;
+ } dither;
+
+ struct {
+ int mode; /* DRM_MODE_SCALE_* */
+ struct {
+ enum {
+ UNDERSCAN_OFF,
+ UNDERSCAN_ON,
+ UNDERSCAN_AUTO,
+ } mode;
+ u32 hborder;
+ u32 vborder;
+ } underscan;
+ bool full;
+ } scaler;
+
+ struct {
+ int color_vibrance;
+ int vibrant_hue;
+ } procamp;
+
+ union {
+ struct {
+ bool dither:1;
+ bool scaler:1;
+ bool procamp:1;
+ };
+ u8 mask;
+ } set;
+};
+
struct nouveau_connector {
struct drm_connector base;
enum dcb_connector_type type;
@@ -111,61 +166,6 @@ extern int nouveau_ignorelid;
extern int nouveau_duallink;
extern int nouveau_hdmimhz;
-#include <drm/drm_crtc.h>
-#define nouveau_conn_atom(p) \
- container_of((p), struct nouveau_conn_atom, state)
-
-struct nouveau_conn_atom {
- struct drm_connector_state state;
-
- struct {
- /* The enum values specifically defined here match nv50/gf119
- * hw values, and the code relies on this.
- */
- enum {
- DITHERING_MODE_OFF = 0x00,
- DITHERING_MODE_ON = 0x01,
- DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
- DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
- DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
- DITHERING_MODE_AUTO
- } mode;
- enum {
- DITHERING_DEPTH_6BPC = 0x00,
- DITHERING_DEPTH_8BPC = 0x02,
- DITHERING_DEPTH_AUTO
- } depth;
- } dither;
-
- struct {
- int mode; /* DRM_MODE_SCALE_* */
- struct {
- enum {
- UNDERSCAN_OFF,
- UNDERSCAN_ON,
- UNDERSCAN_AUTO,
- } mode;
- u32 hborder;
- u32 vborder;
- } underscan;
- bool full;
- } scaler;
-
- struct {
- int color_vibrance;
- int vibrant_hue;
- } procamp;
-
- union {
- struct {
- bool dither:1;
- bool scaler:1;
- bool procamp:1;
- };
- u8 mask;
- } set;
-};
-
void nouveau_conn_attach_properties(struct drm_connector *);
void nouveau_conn_reset(struct drm_connector *);
struct drm_connector_state *
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 3cf1a69..298d6a8 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -438,8 +438,6 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master,
struct sun4i_hdmi *hdmi = dev_get_drvdata(dev);
cec_unregister_adapter(hdmi->cec_adap);
- drm_connector_cleanup(&hdmi->connector);
- drm_encoder_cleanup(&hdmi->encoder);
i2c_del_adapter(hdmi->i2c);
clk_disable_unprepare(hdmi->mod_clk);
clk_disable_unprepare(hdmi->bus_clk);
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index acd9978..67f3c05 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -545,7 +545,8 @@ static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
return err;
}
-static inline int copy_gathers(struct host1x_job *job, struct device *dev)
+static inline int copy_gathers(struct device *host, struct host1x_job *job,
+ struct device *dev)
{
struct host1x_firewall fw;
size_t size = 0;
@@ -570,12 +571,12 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
* Try a non-blocking allocation from a higher priority pools first,
* as awaiting for the allocation here is a major performance hit.
*/
- job->gather_copy_mapped = dma_alloc_wc(dev, size, &job->gather_copy,
+ job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
GFP_NOWAIT);
/* the higher priority allocation failed, try the generic-blocking */
if (!job->gather_copy_mapped)
- job->gather_copy_mapped = dma_alloc_wc(dev, size,
+ job->gather_copy_mapped = dma_alloc_wc(host, size,
&job->gather_copy,
GFP_KERNEL);
if (!job->gather_copy_mapped)
@@ -636,7 +637,7 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
goto out;
if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
- err = copy_gathers(job, dev);
+ err = copy_gathers(host->dev, job, dev);
if (err)
goto out;
}
@@ -701,7 +702,7 @@ void host1x_job_unpin(struct host1x_job *job)
job->num_unpins = 0;
if (job->gather_copy_size)
- dma_free_wc(job->channel->dev, job->gather_copy_size,
+ dma_free_wc(host->dev, job->gather_copy_size,
job->gather_copy_mapped, job->gather_copy);
}
EXPORT_SYMBOL(host1x_job_unpin);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index e742064..69fac3d 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -268,6 +268,12 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
offset = report->size;
report->size += parser->global.report_size * parser->global.report_count;
+ /* Total size check: Allow for possible report index byte */
+ if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) {
+ hid_err(parser->device, "report is too long\n");
+ return -1;
+ }
+
if (!parser->local.usage_index) /* Ignore padding fields */
return 0;
@@ -760,6 +766,10 @@ static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage)
if (usage == 0xff0000c5 && parser->global.report_count == 256 &&
parser->global.report_size == 8)
parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
+
+ if (usage == 0xff0000c6 && parser->global.report_count == 1 &&
+ parser->global.report_size == 8)
+ parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
}
static void hid_scan_collection(struct hid_parser *parser, unsigned type)
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 9d24fb0..14e4003 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1116,9 +1116,15 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
}
mapped:
- if (device->driver->input_mapped && device->driver->input_mapped(device,
- hidinput, field, usage, &bit, &max) < 0)
- goto ignore;
+ if (device->driver->input_mapped &&
+ device->driver->input_mapped(device, hidinput, field, usage,
+ &bit, &max) < 0) {
+ /*
+ * The driver indicated that no further generic handling
+ * of the usage is desired.
+ */
+ return;
+ }
set_bit(usage->type, input->evbit);
@@ -1176,9 +1182,11 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
set_bit(MSC_SCAN, input->mscbit);
}
-ignore:
return;
+ignore:
+ usage->type = 0;
+ usage->code = 0;
}
void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct hid_usage *usage, __s32 value)
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 4706fb8..6ad776b 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -978,6 +978,9 @@ static int hidpp20_batterylevel_get_battery_capacity(struct hidpp_device *hidpp,
ret = hidpp_send_fap_command_sync(hidpp, feature_index,
CMD_BATTERY_LEVEL_STATUS_GET_BATTERY_LEVEL_STATUS,
NULL, 0, &response);
+ /* Ignore these intermittent errors */
+ if (ret == HIDPP_ERROR_RESOURCE_ERROR)
+ return -EIO;
if (ret > 0) {
hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
__func__, ret);
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 6f67d73..e63b761 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -25,6 +25,7 @@
#include <linux/spinlock.h>
#include <linux/uhid.h>
#include <linux/wait.h>
+#include <linux/eventpoll.h>
#define UHID_NAME "uhid"
#define UHID_BUFSIZE 32
@@ -774,7 +775,7 @@ static unsigned int uhid_char_poll(struct file *file, poll_table *wait)
if (uhid->head != uhid->tail)
return POLLIN | POLLRDNORM;
- return 0;
+ return EPOLLOUT | EPOLLWRNORM;
}
static const struct file_operations uhid_fops = {
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index ce342fd..bccd97c 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -254,12 +254,51 @@ static int hiddev_release(struct inode * inode, struct file * file)
return 0;
}
+static int __hiddev_open(struct hiddev *hiddev, struct file *file)
+{
+ struct hiddev_list *list;
+ int error;
+
+ lockdep_assert_held(&hiddev->existancelock);
+
+ list = vzalloc(sizeof(*list));
+ if (!list)
+ return -ENOMEM;
+
+ mutex_init(&list->thread_lock);
+ list->hiddev = hiddev;
+
+ if (!hiddev->open++) {
+ error = hid_hw_power(hiddev->hid, PM_HINT_FULLON);
+ if (error < 0)
+ goto err_drop_count;
+
+ error = hid_hw_open(hiddev->hid);
+ if (error < 0)
+ goto err_normal_power;
+ }
+
+ spin_lock_irq(&hiddev->list_lock);
+ list_add_tail(&list->node, &hiddev->list);
+ spin_unlock_irq(&hiddev->list_lock);
+
+ file->private_data = list;
+
+ return 0;
+
+err_normal_power:
+ hid_hw_power(hiddev->hid, PM_HINT_NORMAL);
+err_drop_count:
+ hiddev->open--;
+ vfree(list);
+ return error;
+}
+
/*
* open file op
*/
static int hiddev_open(struct inode *inode, struct file *file)
{
- struct hiddev_list *list;
struct usb_interface *intf;
struct hid_device *hid;
struct hiddev *hiddev;
@@ -268,66 +307,14 @@ static int hiddev_open(struct inode *inode, struct file *file)
intf = usbhid_find_interface(iminor(inode));
if (!intf)
return -ENODEV;
+
hid = usb_get_intfdata(intf);
hiddev = hid->hiddev;
- if (!(list = vzalloc(sizeof(struct hiddev_list))))
- return -ENOMEM;
- mutex_init(&list->thread_lock);
- list->hiddev = hiddev;
- file->private_data = list;
-
- /*
- * no need for locking because the USB major number
- * is shared which usbcore guards against disconnect
- */
- if (list->hiddev->exist) {
- if (!list->hiddev->open++) {
- res = hid_hw_open(hiddev->hid);
- if (res < 0)
- goto bail;
- }
- } else {
- res = -ENODEV;
- goto bail;
- }
-
- spin_lock_irq(&list->hiddev->list_lock);
- list_add_tail(&list->node, &hiddev->list);
- spin_unlock_irq(&list->hiddev->list_lock);
-
mutex_lock(&hiddev->existancelock);
- /*
- * recheck exist with existance lock held to
- * avoid opening a disconnected device
- */
- if (!list->hiddev->exist) {
- res = -ENODEV;
- goto bail_unlock;
- }
- if (!list->hiddev->open++)
- if (list->hiddev->exist) {
- struct hid_device *hid = hiddev->hid;
- res = hid_hw_power(hid, PM_HINT_FULLON);
- if (res < 0)
- goto bail_unlock;
- res = hid_hw_open(hid);
- if (res < 0)
- goto bail_normal_power;
- }
- mutex_unlock(&hiddev->existancelock);
- return 0;
-bail_normal_power:
- hid_hw_power(hid, PM_HINT_NORMAL);
-bail_unlock:
+ res = hiddev->exist ? __hiddev_open(hiddev, file) : -ENODEV;
mutex_unlock(&hiddev->existancelock);
- spin_lock_irq(&list->hiddev->list_lock);
- list_del(&list->node);
- spin_unlock_irq(&list->hiddev->list_lock);
-bail:
- file->private_data = NULL;
- vfree(list);
return res;
}
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index c224b92..fc37144 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -189,6 +189,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
+ /* Comet Lake PCH-V */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa3a6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
/* Ice Lake NNPI */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5),
.driver_data = (kernel_ulong_t)&intel_th_2x,
@@ -213,6 +218,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
+ {
+ /* Elkhart Lake */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
{ 0 },
};
diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c
index ab8d6ae..2a299bb 100644
--- a/drivers/iio/adc/dln2-adc.c
+++ b/drivers/iio/adc/dln2-adc.c
@@ -528,6 +528,10 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
u16 conflict;
unsigned int trigger_chan;
+ ret = iio_triggered_buffer_postenable(indio_dev);
+ if (ret)
+ return ret;
+
mutex_lock(&dln2->mutex);
/* Enable ADC */
@@ -541,6 +545,7 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
(int)conflict);
ret = -EBUSY;
}
+ iio_triggered_buffer_predisable(indio_dev);
return ret;
}
@@ -554,6 +559,7 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
mutex_unlock(&dln2->mutex);
if (ret < 0) {
dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
+ iio_triggered_buffer_predisable(indio_dev);
return ret;
}
} else {
@@ -561,12 +567,12 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
mutex_unlock(&dln2->mutex);
}
- return iio_triggered_buffer_postenable(indio_dev);
+ return 0;
}
static int dln2_adc_triggered_buffer_predisable(struct iio_dev *indio_dev)
{
- int ret;
+ int ret, ret2;
struct dln2_adc *dln2 = iio_priv(indio_dev);
mutex_lock(&dln2->mutex);
@@ -581,12 +587,14 @@ static int dln2_adc_triggered_buffer_predisable(struct iio_dev *indio_dev)
ret = dln2_adc_set_port_enabled(dln2, false, NULL);
mutex_unlock(&dln2->mutex);
- if (ret < 0) {
+ if (ret < 0)
dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
- return ret;
- }
- return iio_triggered_buffer_predisable(indio_dev);
+ ret2 = iio_triggered_buffer_predisable(indio_dev);
+ if (ret == 0)
+ ret = ret2;
+
+ return ret;
}
static const struct iio_buffer_setup_ops dln2_adc_buffer_setup_ops = {
diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
index ebc7159..03af027 100644
--- a/drivers/iio/adc/max1027.c
+++ b/drivers/iio/adc/max1027.c
@@ -462,6 +462,14 @@ static int max1027_probe(struct spi_device *spi)
goto fail_dev_register;
}
+ /* Internal reset */
+ st->reg = MAX1027_RST_REG;
+ ret = spi_write(st->spi, &st->reg, 1);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Failed to reset the ADC\n");
+ return ret;
+ }
+
/* Disable averaging */
st->reg = MAX1027_AVG_REG;
ret = spi_write(st->spi, &st->reg, 1);
diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
index 33be07c..8649a61 100644
--- a/drivers/iio/adc/max9611.c
+++ b/drivers/iio/adc/max9611.c
@@ -92,6 +92,12 @@
#define MAX9611_TEMP_SCALE_NUM 1000000
#define MAX9611_TEMP_SCALE_DIV 2083
+/*
+ * Conversion time is 2 ms (typically) at Ta=25 degreeC
+ * No maximum value is known, so play it safe.
+ */
+#define MAX9611_CONV_TIME_US_RANGE 3000, 3300
+
struct max9611_dev {
struct device *dev;
struct i2c_client *i2c_client;
@@ -239,11 +245,9 @@ static int max9611_read_single(struct max9611_dev *max9611,
return ret;
}
- /*
- * need a delay here to make register configuration
- * stabilize. 1 msec at least, from empirical testing.
- */
- usleep_range(1000, 2000);
+ /* need a delay here to make register configuration stabilize. */
+
+ usleep_range(MAX9611_CONV_TIME_US_RANGE);
ret = i2c_smbus_read_word_swapped(max9611->i2c_client, reg_addr);
if (ret < 0) {
@@ -511,7 +515,7 @@ static int max9611_init(struct max9611_dev *max9611)
MAX9611_REG_CTRL2, 0);
return ret;
}
- usleep_range(1000, 2000);
+ usleep_range(MAX9611_CONV_TIME_US_RANGE);
return 0;
}
diff --git a/drivers/iio/light/bh1750.c b/drivers/iio/light/bh1750.c
index 6c61187..0b7ba02 100644
--- a/drivers/iio/light/bh1750.c
+++ b/drivers/iio/light/bh1750.c
@@ -62,9 +62,9 @@ struct bh1750_chip_info {
u16 int_time_low_mask;
u16 int_time_high_mask;
-}
+};
-static const bh1750_chip_info_tbl[] = {
+static const struct bh1750_chip_info bh1750_chip_info_tbl[] = {
[BH1710] = { 140, 1022, 300, 400, 250000000, 2, 0x001F, 0x03E0 },
[BH1721] = { 140, 1020, 300, 400, 250000000, 2, 0x0010, 0x03E0 },
[BH1750] = { 31, 254, 69, 1740, 57500000, 1, 0x001F, 0x00E0 },
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index f698c6a..fc4630e 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -4568,6 +4568,7 @@ static int __init cma_init(void)
unregister_netdevice_notifier(&cma_nb);
rdma_addr_unregister_client(&addr_client);
ib_sa_unregister_client(&sa_client);
+ unregister_pernet_subsys(&cma_pernet_operations);
err_wq:
destroy_workqueue(cma_wq);
return ret;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 0299c06..7e73a1a 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -3073,16 +3073,17 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
ibdev->ib_active = false;
flush_workqueue(wq);
- mlx4_ib_close_sriov(ibdev);
- mlx4_ib_mad_cleanup(ibdev);
- ib_unregister_device(&ibdev->ib_dev);
- mlx4_ib_diag_cleanup(ibdev);
if (ibdev->iboe.nb.notifier_call) {
if (unregister_netdevice_notifier(&ibdev->iboe.nb))
pr_warn("failure unregistering notifier\n");
ibdev->iboe.nb.notifier_call = NULL;
}
+ mlx4_ib_close_sriov(ibdev);
+ mlx4_ib_mad_cleanup(ibdev);
+ ib_unregister_device(&ibdev->ib_dev);
+ mlx4_ib_diag_cleanup(ibdev);
+
mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
ibdev->steer_qpn_count);
kfree(ibdev->ib_uc_qpns_bitmap);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 7f4cc93..656e7c1 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1343,6 +1343,14 @@ static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
if (qp->urq.umem)
ib_umem_release(qp->urq.umem);
qp->urq.umem = NULL;
+
+ if (rdma_protocol_roce(&dev->ibdev, 1)) {
+ qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
+ qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
+ } else {
+ kfree(qp->usq.pbl_tbl);
+ kfree(qp->urq.pbl_tbl);
+ }
}
static int qedr_create_user_qp(struct qedr_dev *dev,
@@ -2331,8 +2339,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr)
dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
- if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
- qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
+ if (mr->type != QEDR_MR_DMA)
+ free_mr_info(dev, &mr->info);
/* it could be user registered memory. */
if (mr->umem)
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index 83412df..b7098f7 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -393,7 +393,7 @@ int rxe_rcv(struct sk_buff *skb)
calc_icrc = rxe_icrc_hdr(pkt, skb);
calc_icrc = rxe_crc32(rxe, calc_icrc, (u8 *)payload_addr(pkt),
- payload_size(pkt));
+ payload_size(pkt) + bth_pad(pkt));
calc_icrc = (__force u32)cpu_to_be32(~calc_icrc);
if (unlikely(calc_icrc != pack_icrc)) {
if (skb->protocol == htons(ETH_P_IPV6))
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 9fd4f04..e6785b1 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -500,6 +500,12 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
if (err)
return err;
}
+ if (bth_pad(pkt)) {
+ u8 *pad = payload_addr(pkt) + paylen;
+
+ memset(pad, 0, bth_pad(pkt));
+ crc = rxe_crc32(rxe, crc, pad, bth_pad(pkt));
+ }
}
p = payload_addr(pkt) + paylen + bth_pad(pkt);
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 9207682..a07a29b 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -738,6 +738,13 @@ static enum resp_states read_reply(struct rxe_qp *qp,
if (err)
pr_err("Failed copying memory\n");
+ if (bth_pad(&ack_pkt)) {
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ u8 *pad = payload_addr(&ack_pkt) + payload;
+
+ memset(pad, 0, bth_pad(&ack_pkt));
+ icrc = rxe_crc32(rxe, icrc, pad, bth_pad(&ack_pkt));
+ }
p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt);
*p = ~icrc;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 19624e0..b5a7895 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -648,6 +648,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
if (ib_conn->pi_support) {
u32 sig_caps = ib_conn->device->ib_device->attrs.sig_prot_cap;
+ shost->sg_prot_tablesize = shost->sg_tablesize;
scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps));
scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP |
SHOST_DIX_GUARD_CRC);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 05a268c..44bd113 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -865,16 +865,18 @@ static int input_default_setkeycode(struct input_dev *dev,
}
}
- __clear_bit(*old_keycode, dev->keybit);
- __set_bit(ke->keycode, dev->keybit);
-
- for (i = 0; i < dev->keycodemax; i++) {
- if (input_fetch_keycode(dev, i) == *old_keycode) {
- __set_bit(*old_keycode, dev->keybit);
- break; /* Setting the bit twice is useless, so break */
+ if (*old_keycode <= KEY_MAX) {
+ __clear_bit(*old_keycode, dev->keybit);
+ for (i = 0; i < dev->keycodemax; i++) {
+ if (input_fetch_keycode(dev, i) == *old_keycode) {
+ __set_bit(*old_keycode, dev->keybit);
+ /* Setting the bit twice is useless, so break */
+ break;
+ }
}
}
+ __set_bit(ke->keycode, dev->keybit);
return 0;
}
@@ -930,9 +932,13 @@ int input_set_keycode(struct input_dev *dev,
* Simulate keyup event if keycode is not present
* in the keymap anymore
*/
- if (test_bit(EV_KEY, dev->evbit) &&
- !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
- __test_and_clear_bit(old_keycode, dev->key)) {
+ if (old_keycode > KEY_MAX) {
+ dev_warn(dev->dev.parent ?: &dev->dev,
+ "%s: got too big old keycode %#x\n",
+ __func__, old_keycode);
+ } else if (test_bit(EV_KEY, dev->evbit) &&
+ !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
+ __test_and_clear_bit(old_keycode, dev->key)) {
struct input_value vals[] = {
{ EV_KEY, old_keycode, 0 },
input_value_sync
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 59aaac4..138d1f3 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -3257,6 +3257,8 @@ static int __maybe_unused mxt_suspend(struct device *dev)
mutex_unlock(&input_dev->mutex);
+ disable_irq(data->irq);
+
return 0;
}
@@ -3269,6 +3271,8 @@ static int __maybe_unused mxt_resume(struct device *dev)
if (!input_dev)
return 0;
+ enable_irq(data->irq);
+
mutex_lock(&input_dev->mutex);
if (input_dev->users)
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 40eb813..848dac3 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -156,9 +156,9 @@ static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
return (addr & smmu->pfn_mask) == addr;
}
-static dma_addr_t smmu_pde_to_dma(u32 pde)
+static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde)
{
- return pde << 12;
+ return (dma_addr_t)(pde & smmu->pfn_mask) << 12;
}
static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
@@ -543,6 +543,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
dma_addr_t *dmap)
{
unsigned int pd_index = iova_pd_index(iova);
+ struct tegra_smmu *smmu = as->smmu;
struct page *pt_page;
u32 *pd;
@@ -551,7 +552,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
return NULL;
pd = page_address(as->pd);
- *dmap = smmu_pde_to_dma(pd[pd_index]);
+ *dmap = smmu_pde_to_dma(smmu, pd[pd_index]);
return tegra_smmu_pte_offset(pt_page, iova);
}
@@ -593,7 +594,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
} else {
u32 *pd = page_address(as->pd);
- *dmap = smmu_pde_to_dma(pd[pde]);
+ *dmap = smmu_pde_to_dma(smmu, pd[pde]);
}
return tegra_smmu_pte_offset(as->pts[pde], iova);
@@ -618,7 +619,7 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
if (--as->count[pde] == 0) {
struct tegra_smmu *smmu = as->smmu;
u32 *pd = page_address(as->pd);
- dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]);
+ dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]);
tegra_smmu_set_pde(as, iova, 0);
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index 0b9a8b7..b32988c 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -284,6 +284,10 @@ static int __init bcm7038_l1_init_one(struct device_node *dn,
pr_err("failed to map parent interrupt %d\n", parent_irq);
return -EINVAL;
}
+
+ if (of_property_read_bool(dn, "brcm,irq-can-wake"))
+ enable_irq_wake(parent_irq);
+
irq_set_chained_handler_and_data(parent_irq, bcm7038_l1_irq_handle,
intc);
diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
index fc5953d..b2e16dc 100644
--- a/drivers/irqchip/irq-ingenic.c
+++ b/drivers/irqchip/irq-ingenic.c
@@ -117,6 +117,14 @@ static int __init ingenic_intc_of_init(struct device_node *node,
goto out_unmap_irq;
}
+ domain = irq_domain_add_legacy(node, num_chips * 32,
+ JZ4740_IRQ_BASE, 0,
+ &irq_domain_simple_ops, NULL);
+ if (!domain) {
+ err = -ENOMEM;
+ goto out_unmap_base;
+ }
+
for (i = 0; i < num_chips; i++) {
/* Mask all irqs */
writel(0xffffffff, intc->base + (i * CHIP_SIZE) +
@@ -143,14 +151,11 @@ static int __init ingenic_intc_of_init(struct device_node *node,
IRQ_NOPROBE | IRQ_LEVEL);
}
- domain = irq_domain_add_legacy(node, num_chips * 32, JZ4740_IRQ_BASE, 0,
- &irq_domain_simple_ops, NULL);
- if (!domain)
- pr_warn("unable to register IRQ domain\n");
-
setup_irq(parent_irq, &intc_cascade_action);
return 0;
+out_unmap_base:
+ iounmap(intc->base);
out_unmap_irq:
irq_dispose_mapping(parent_irq);
out_free:
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 9406326..96a6583 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -685,6 +685,8 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
* IO can always make forward progress:
*/
nr /= c->btree_pages;
+ if (nr == 0)
+ nr = 1;
nr = min_t(unsigned long, nr, mca_can_free(c));
i = 0;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f46ac9d..0a9d623 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2749,7 +2749,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
write_targets++;
}
}
- if (bio->bi_end_io) {
+ if (rdev && bio->bi_end_io) {
atomic_inc(&rdev->nr_pending);
bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
bio_set_dev(bio, rdev->bdev);
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index 27e5791..0d7d687 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -330,7 +330,8 @@ static void cec_data_cancel(struct cec_data *data)
} else {
list_del_init(&data->list);
if (!(data->msg.tx_status & CEC_TX_STATUS_OK))
- data->adap->transmit_queue_sz--;
+ if (!WARN_ON(!data->adap->transmit_queue_sz))
+ data->adap->transmit_queue_sz--;
}
/* Mark it as an error */
@@ -377,6 +378,14 @@ static void cec_flush(struct cec_adapter *adap)
* need to do anything special in that case.
*/
}
+ /*
+ * If something went wrong and this counter isn't what it should
+ * be, then this will reset it back to 0. Warn if it is not 0,
+ * since it indicates a bug, either in this framework or in a
+ * CEC driver.
+ */
+ if (WARN_ON(adap->transmit_queue_sz))
+ adap->transmit_queue_sz = 0;
}
/*
@@ -465,7 +474,8 @@ int cec_thread_func(void *_adap)
data = list_first_entry(&adap->transmit_queue,
struct cec_data, list);
list_del_init(&data->list);
- adap->transmit_queue_sz--;
+ if (!WARN_ON(!data->adap->transmit_queue_sz))
+ adap->transmit_queue_sz--;
/* Make this the current transmitting message */
adap->transmitting = data;
@@ -1031,11 +1041,11 @@ void cec_received_msg_ts(struct cec_adapter *adap,
valid_la = false;
else if (!cec_msg_is_broadcast(msg) && !(dir_fl & DIRECTED))
valid_la = false;
- else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST1_4))
+ else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST))
valid_la = false;
else if (cec_msg_is_broadcast(msg) &&
- adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0 &&
- !(dir_fl & BCAST2_0))
+ adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0 &&
+ !(dir_fl & BCAST1_4))
valid_la = false;
}
if (valid_la && min_len) {
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
index ce23f43..44b0584 100644
--- a/drivers/media/i2c/ov2659.c
+++ b/drivers/media/i2c/ov2659.c
@@ -419,10 +419,14 @@ static struct sensor_register ov2659_720p[] = {
{ REG_TIMING_YINC, 0x11 },
{ REG_TIMING_VERT_FORMAT, 0x80 },
{ REG_TIMING_HORIZ_FORMAT, 0x00 },
+ { 0x370a, 0x12 },
{ 0x3a03, 0xe8 },
{ 0x3a09, 0x6f },
{ 0x3a0b, 0x5d },
{ 0x3a15, 0x9a },
+ { REG_VFIFO_READ_START_H, 0x00 },
+ { REG_VFIFO_READ_START_L, 0x80 },
+ { REG_ISP_CTRL02, 0x00 },
{ REG_NULL, 0x00 },
};
@@ -1203,11 +1207,15 @@ static int ov2659_s_stream(struct v4l2_subdev *sd, int on)
goto unlock;
}
- ov2659_set_pixel_clock(ov2659);
- ov2659_set_frame_size(ov2659);
- ov2659_set_format(ov2659);
- ov2659_set_streaming(ov2659, 1);
- ov2659->streaming = on;
+ ret = ov2659_set_pixel_clock(ov2659);
+ if (!ret)
+ ret = ov2659_set_frame_size(ov2659);
+ if (!ret)
+ ret = ov2659_set_format(ov2659);
+ if (!ret) {
+ ov2659_set_streaming(ov2659, 1);
+ ov2659->streaming = on;
+ }
unlock:
mutex_unlock(&ov2659->lock);
diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
index 025869eec..348296b 100644
--- a/drivers/media/i2c/ov6650.c
+++ b/drivers/media/i2c/ov6650.c
@@ -469,38 +469,39 @@ static int ov6650_set_selection(struct v4l2_subdev *sd,
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov6650 *priv = to_ov6650(client);
- struct v4l2_rect rect = sel->r;
int ret;
if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- v4l_bound_align_image(&rect.width, 2, W_CIF, 1,
- &rect.height, 2, H_CIF, 1, 0);
- v4l_bound_align_image(&rect.left, DEF_HSTRT << 1,
- (DEF_HSTRT << 1) + W_CIF - (__s32)rect.width, 1,
- &rect.top, DEF_VSTRT << 1,
- (DEF_VSTRT << 1) + H_CIF - (__s32)rect.height, 1,
- 0);
+ v4l_bound_align_image(&sel->r.width, 2, W_CIF, 1,
+ &sel->r.height, 2, H_CIF, 1, 0);
+ v4l_bound_align_image(&sel->r.left, DEF_HSTRT << 1,
+ (DEF_HSTRT << 1) + W_CIF - (__s32)sel->r.width, 1,
+ &sel->r.top, DEF_VSTRT << 1,
+ (DEF_VSTRT << 1) + H_CIF - (__s32)sel->r.height,
+ 1, 0);
- ret = ov6650_reg_write(client, REG_HSTRT, rect.left >> 1);
+ ret = ov6650_reg_write(client, REG_HSTRT, sel->r.left >> 1);
if (!ret) {
- priv->rect.left = rect.left;
+ priv->rect.width += priv->rect.left - sel->r.left;
+ priv->rect.left = sel->r.left;
ret = ov6650_reg_write(client, REG_HSTOP,
- (rect.left + rect.width) >> 1);
+ (sel->r.left + sel->r.width) >> 1);
}
if (!ret) {
- priv->rect.width = rect.width;
- ret = ov6650_reg_write(client, REG_VSTRT, rect.top >> 1);
+ priv->rect.width = sel->r.width;
+ ret = ov6650_reg_write(client, REG_VSTRT, sel->r.top >> 1);
}
if (!ret) {
- priv->rect.top = rect.top;
+ priv->rect.height += priv->rect.top - sel->r.top;
+ priv->rect.top = sel->r.top;
ret = ov6650_reg_write(client, REG_VSTOP,
- (rect.top + rect.height) >> 1);
+ (sel->r.top + sel->r.height) >> 1);
}
if (!ret)
- priv->rect.height = rect.height;
+ priv->rect.height = sel->r.height;
return ret;
}
@@ -614,7 +615,6 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
dev_err(&client->dev, "Pixel format not handled: 0x%x\n", code);
return -EINVAL;
}
- priv->code = code;
if (code == MEDIA_BUS_FMT_Y8_1X8 ||
code == MEDIA_BUS_FMT_SBGGR8_1X8) {
@@ -640,7 +640,6 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
dev_dbg(&client->dev, "max resolution: CIF\n");
coma_mask |= COMA_QCIF;
}
- priv->half_scale = half_scale;
clkrc = CLKRC_12MHz;
mclk = 12000000;
@@ -658,8 +657,13 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
ret = ov6650_reg_rmw(client, REG_COMA, coma_set, coma_mask);
if (!ret)
ret = ov6650_reg_write(client, REG_CLKRC, clkrc);
- if (!ret)
+ if (!ret) {
+ priv->half_scale = half_scale;
+
ret = ov6650_reg_rmw(client, REG_COML, coml_set, coml_mask);
+ }
+ if (!ret)
+ priv->code = code;
if (!ret) {
mf->colorspace = priv->colorspace;
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index 7d25ecd..1748812 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -1310,7 +1310,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
core = cx88_core_get(dev->pci);
if (!core) {
err = -EINVAL;
- goto fail_free;
+ goto fail_disable;
}
dev->core = core;
@@ -1356,7 +1356,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
cc->step, cc->default_value);
if (!vc) {
err = core->audio_hdl.error;
- goto fail_core;
+ goto fail_irq;
}
vc->priv = (void *)cc;
}
@@ -1370,7 +1370,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
cc->step, cc->default_value);
if (!vc) {
err = core->video_hdl.error;
- goto fail_core;
+ goto fail_irq;
}
vc->priv = (void *)cc;
if (vc->id == V4L2_CID_CHROMA_AGC)
@@ -1533,11 +1533,14 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
fail_unreg:
cx8800_unregister_video(dev);
- free_irq(pci_dev->irq, dev);
mutex_unlock(&core->lock);
+fail_irq:
+ free_irq(pci_dev->irq, dev);
fail_core:
core->v4ldev = NULL;
cx88_core_put(core, dev->pci);
+fail_disable:
+ pci_disable_device(pci_dev);
fail_free:
kfree(dev);
return err;
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
index dfcc484..e92c5b5 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -1848,6 +1848,10 @@ static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
return -ENODATA;
+ /* if trying to set the same std then nothing to do */
+ if (vpfe_standards[vpfe->std_index].std_id == std_id)
+ return 0;
+
/* If streaming is started, return error */
if (vb2_is_busy(&vpfe->buffer_queue)) {
vpfe_err(vpfe, "%s device busy\n", __func__);
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index 769e9e6..9360b36 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -345,10 +345,11 @@ static const struct venus_resources msm8916_res = {
};
static const struct freq_tbl msm8996_freq_table[] = {
- { 1944000, 490000000 }, /* 4k UHD @ 60 */
- { 972000, 320000000 }, /* 4k UHD @ 30 */
- { 489600, 150000000 }, /* 1080p @ 60 */
- { 244800, 75000000 }, /* 1080p @ 30 */
+ { 1944000, 520000000 }, /* 4k UHD @ 60 (decode only) */
+ { 972000, 520000000 }, /* 4k UHD @ 30 */
+ { 489600, 346666667 }, /* 1080p @ 60 */
+ { 244800, 150000000 }, /* 1080p @ 30 */
+ { 108000, 75000000 }, /* 720p @ 30 */
};
static const struct reg_val msm8996_reg_preset[] = {
diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
index 522364f..3871ed6 100644
--- a/drivers/media/platform/rcar_drif.c
+++ b/drivers/media/platform/rcar_drif.c
@@ -915,6 +915,7 @@ static int rcar_drif_g_fmt_sdr_cap(struct file *file, void *priv,
{
struct rcar_drif_sdr *sdr = video_drvdata(file);
+ memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
f->fmt.sdr.pixelformat = sdr->fmt->pixelformat;
f->fmt.sdr.buffersize = sdr->fmt->buffersize;
diff --git a/drivers/media/platform/ti-vpe/vpdma.h b/drivers/media/platform/ti-vpe/vpdma.h
index 7e61150..f29074c 100644
--- a/drivers/media/platform/ti-vpe/vpdma.h
+++ b/drivers/media/platform/ti-vpe/vpdma.h
@@ -60,6 +60,7 @@ struct vpdma_data_format {
* line stride of source and dest
* buffers should be 16 byte aligned
*/
+#define VPDMA_MAX_STRIDE 65520 /* Max line stride 16 byte aligned */
#define VPDMA_DTD_DESC_SIZE 32 /* 8 words */
#define VPDMA_CFD_CTD_DESC_SIZE 16 /* 4 words */
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index 45bd105..2e8970c 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -352,20 +352,25 @@ enum {
};
/* find our format description corresponding to the passed v4l2_format */
-static struct vpe_fmt *find_format(struct v4l2_format *f)
+static struct vpe_fmt *__find_format(u32 fourcc)
{
struct vpe_fmt *fmt;
unsigned int k;
for (k = 0; k < ARRAY_SIZE(vpe_formats); k++) {
fmt = &vpe_formats[k];
- if (fmt->fourcc == f->fmt.pix.pixelformat)
+ if (fmt->fourcc == fourcc)
return fmt;
}
return NULL;
}
+static struct vpe_fmt *find_format(struct v4l2_format *f)
+{
+ return __find_format(f->fmt.pix.pixelformat);
+}
+
/*
* there is one vpe_dev structure in the driver, it is shared by
* all instances.
@@ -1044,11 +1049,14 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
dma_addr_t dma_addr;
u32 flags = 0;
u32 offset = 0;
+ u32 stride;
if (port == VPE_PORT_MV_OUT) {
vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
dma_addr = ctx->mv_buf_dma[mv_buf_selector];
q_data = &ctx->q_data[Q_DATA_SRC];
+ stride = ALIGN((q_data->width * vpdma_fmt->depth) >> 3,
+ VPDMA_STRIDE_ALIGN);
} else {
/* to incorporate interleaved formats */
int plane = fmt->coplanar ? p_data->vb_part : 0;
@@ -1075,6 +1083,7 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
}
/* Apply the offset */
dma_addr += offset;
+ stride = q_data->bytesperline[VPE_LUMA];
}
if (q_data->flags & Q_DATA_FRAME_1D)
@@ -1086,7 +1095,7 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
MAX_W, MAX_H);
vpdma_add_out_dtd(&ctx->desc_list, q_data->width,
- q_data->bytesperline[VPE_LUMA], &q_data->c_rect,
+ stride, &q_data->c_rect,
vpdma_fmt, dma_addr, MAX_OUT_WIDTH_REG1,
MAX_OUT_HEIGHT_REG1, p_data->channel, flags);
}
@@ -1105,10 +1114,13 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
dma_addr_t dma_addr;
u32 flags = 0;
u32 offset = 0;
+ u32 stride;
if (port == VPE_PORT_MV_IN) {
vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
dma_addr = ctx->mv_buf_dma[mv_buf_selector];
+ stride = ALIGN((q_data->width * vpdma_fmt->depth) >> 3,
+ VPDMA_STRIDE_ALIGN);
} else {
/* to incorporate interleaved formats */
int plane = fmt->coplanar ? p_data->vb_part : 0;
@@ -1135,6 +1147,7 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
}
/* Apply the offset */
dma_addr += offset;
+ stride = q_data->bytesperline[VPE_LUMA];
if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB) {
/*
@@ -1170,10 +1183,10 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
if (p_data->vb_part && fmt->fourcc == V4L2_PIX_FMT_NV12)
frame_height /= 2;
- vpdma_add_in_dtd(&ctx->desc_list, q_data->width,
- q_data->bytesperline[VPE_LUMA], &q_data->c_rect,
- vpdma_fmt, dma_addr, p_data->channel, field, flags, frame_width,
- frame_height, 0, 0);
+ vpdma_add_in_dtd(&ctx->desc_list, q_data->width, stride,
+ &q_data->c_rect, vpdma_fmt, dma_addr,
+ p_data->channel, field, flags, frame_width,
+ frame_height, 0, 0);
}
/*
@@ -1422,9 +1435,6 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
/* the previous dst mv buffer becomes the next src mv buffer */
ctx->src_mv_buf_selector = !ctx->src_mv_buf_selector;
- if (ctx->aborting)
- goto finished;
-
s_vb = ctx->src_vbs[0];
d_vb = ctx->dst_vb;
@@ -1435,6 +1445,7 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
d_vb->timecode = s_vb->timecode;
d_vb->sequence = ctx->sequence;
+ s_vb->sequence = ctx->sequence;
d_q_data = &ctx->q_data[Q_DATA_DST];
if (d_q_data->flags & Q_IS_INTERLACED) {
@@ -1488,6 +1499,9 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
ctx->src_vbs[0] = NULL;
ctx->dst_vb = NULL;
+ if (ctx->aborting)
+ goto finished;
+
ctx->bufs_completed++;
if (ctx->bufs_completed < ctx->bufs_per_job && job_ready(ctx)) {
device_run(ctx);
@@ -1600,9 +1614,9 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
unsigned int stride = 0;
if (!fmt || !(fmt->types & type)) {
- vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
+ vpe_dbg(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
pix->pixelformat);
- return -EINVAL;
+ fmt = __find_format(V4L2_PIX_FMT_YUYV);
}
if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE
@@ -1649,7 +1663,7 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
&pix->height, MIN_H, MAX_H, H_ALIGN,
S_ALIGN);
- if (!pix->num_planes)
+ if (!pix->num_planes || pix->num_planes > 2)
pix->num_planes = fmt->coplanar ? 2 : 1;
else if (pix->num_planes > 1 && !fmt->coplanar)
pix->num_planes = 1;
@@ -1688,6 +1702,10 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
if (stride > plane_fmt->bytesperline)
plane_fmt->bytesperline = stride;
+ plane_fmt->bytesperline = clamp_t(u32, plane_fmt->bytesperline,
+ stride,
+ VPDMA_MAX_STRIDE);
+
plane_fmt->bytesperline = ALIGN(plane_fmt->bytesperline,
VPDMA_STRIDE_ALIGN);
@@ -2308,7 +2326,7 @@ static int vpe_open(struct file *file)
v4l2_ctrl_handler_setup(hdl);
s_q_data = &ctx->q_data[Q_DATA_SRC];
- s_q_data->fmt = &vpe_formats[2];
+ s_q_data->fmt = __find_format(V4L2_PIX_FMT_YUYV);
s_q_data->width = 1920;
s_q_data->height = 1080;
s_q_data->nplanes = 1;
@@ -2386,6 +2404,12 @@ static int vpe_release(struct file *file)
mutex_lock(&dev->dev_mutex);
free_mv_buffers(ctx);
+
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf);
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb);
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_h);
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_v);
+
vpdma_free_desc_list(&ctx->desc_list);
vpdma_free_desc_buf(&ctx->mmr_adb);
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index 8ce6f9c..b60fb6ed 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -455,6 +455,8 @@ static int si470x_i2c_remove(struct i2c_client *client)
video_unregister_device(&radio->videodev);
kfree(radio);
+ v4l2_ctrl_handler_free(&radio->hdl);
+ v4l2_device_unregister(&radio->v4l2_dev);
return 0;
}
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index ac4fddf..427cda4 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -294,7 +294,7 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c,
mutex_unlock(&fc_usb->data_mutex);
- return 0;
+ return ret;
}
/* actual bus specific access functions,
@@ -503,7 +503,13 @@ static int flexcop_usb_transfer_init(struct flexcop_usb *fc_usb)
static int flexcop_usb_init(struct flexcop_usb *fc_usb)
{
/* use the alternate setting with the larges buffer */
- usb_set_interface(fc_usb->udev,0,1);
+ int ret = usb_set_interface(fc_usb->udev, 0, 1);
+
+ if (ret) {
+ err("set interface failed.");
+ return ret;
+ }
+
switch (fc_usb->udev->speed) {
case USB_SPEED_LOW:
err("cannot handle USB speed because it is too slow.");
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
index 986763b..c047a0b 100644
--- a/drivers/media/usb/dvb-usb/af9005.c
+++ b/drivers/media/usb/dvb-usb/af9005.c
@@ -985,8 +985,9 @@ static int af9005_identify_state(struct usb_device *udev,
else if (reply == 0x02)
*cold = 0;
else
- return -EIO;
- deb_info("Identify state cold = %d\n", *cold);
+ ret = -EIO;
+ if (!ret)
+ deb_info("Identify state cold = %d\n", *cold);
err:
kfree(buf);
diff --git a/drivers/media/usb/pulse8-cec/pulse8-cec.c b/drivers/media/usb/pulse8-cec/pulse8-cec.c
index 12da631..f1615fb 100644
--- a/drivers/media/usb/pulse8-cec/pulse8-cec.c
+++ b/drivers/media/usb/pulse8-cec/pulse8-cec.c
@@ -121,6 +121,7 @@ struct pulse8 {
unsigned int vers;
struct completion cmd_done;
struct work_struct work;
+ u8 work_result;
struct delayed_work ping_eeprom_work;
struct cec_msg rx_msg;
u8 data[DATA_SIZE];
@@ -142,8 +143,10 @@ static void pulse8_irq_work_handler(struct work_struct *work)
{
struct pulse8 *pulse8 =
container_of(work, struct pulse8, work);
+ u8 result = pulse8->work_result;
- switch (pulse8->data[0] & 0x3f) {
+ pulse8->work_result = 0;
+ switch (result & 0x3f) {
case MSGCODE_FRAME_DATA:
cec_received_msg(pulse8->adap, &pulse8->rx_msg);
break;
@@ -177,12 +180,12 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
pulse8->escape = false;
} else if (data == MSGEND) {
struct cec_msg *msg = &pulse8->rx_msg;
+ u8 msgcode = pulse8->buf[0];
if (debug)
dev_info(pulse8->dev, "received: %*ph\n",
pulse8->idx, pulse8->buf);
- pulse8->data[0] = pulse8->buf[0];
- switch (pulse8->buf[0] & 0x3f) {
+ switch (msgcode & 0x3f) {
case MSGCODE_FRAME_START:
msg->len = 1;
msg->msg[0] = pulse8->buf[1];
@@ -191,14 +194,20 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
if (msg->len == CEC_MAX_MSG_SIZE)
break;
msg->msg[msg->len++] = pulse8->buf[1];
- if (pulse8->buf[0] & MSGCODE_FRAME_EOM)
+ if (msgcode & MSGCODE_FRAME_EOM) {
+ WARN_ON(pulse8->work_result);
+ pulse8->work_result = msgcode;
schedule_work(&pulse8->work);
+ break;
+ }
break;
case MSGCODE_TRANSMIT_SUCCEEDED:
case MSGCODE_TRANSMIT_FAILED_LINE:
case MSGCODE_TRANSMIT_FAILED_ACK:
case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA:
case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
+ WARN_ON(pulse8->work_result);
+ pulse8->work_result = msgcode;
schedule_work(&pulse8->work);
break;
case MSGCODE_HIGH_ERROR:
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index 4320bda..e0413db 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -915,8 +915,12 @@ static void pvr2_v4l2_internal_check(struct pvr2_channel *chp)
pvr2_v4l2_dev_disassociate_parent(vp->dev_video);
pvr2_v4l2_dev_disassociate_parent(vp->dev_radio);
if (!list_empty(&vp->dev_video->devbase.fh_list) ||
- !list_empty(&vp->dev_radio->devbase.fh_list))
+ (vp->dev_radio &&
+ !list_empty(&vp->dev_radio->devbase.fh_list))) {
+ pvr2_trace(PVR2_TRACE_STRUCT,
+ "pvr2_v4l2 internal_check exit-empty id=%p", vp);
return;
+ }
pvr2_v4l2_destroy_no_lock(vp);
}
@@ -990,7 +994,8 @@ static int pvr2_v4l2_release(struct file *file)
kfree(fhp);
if (vp->channel.mc_head->disconnect_flag &&
list_empty(&vp->dev_video->devbase.fh_list) &&
- list_empty(&vp->dev_radio->devbase.fh_list)) {
+ (!vp->dev_radio ||
+ list_empty(&vp->dev_radio->devbase.fh_list))) {
pvr2_v4l2_destroy_no_lock(vp);
}
return 0;
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 1c3b1080..94bbd7b 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -1450,10 +1450,26 @@ static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
return ret;
}
+static void v4l_pix_format_touch(struct v4l2_pix_format *p)
+{
+ /*
+ * The v4l2_pix_format structure contains fields that make no sense for
+ * touch. Set them to default values in this case.
+ */
+
+ p->field = V4L2_FIELD_NONE;
+ p->colorspace = V4L2_COLORSPACE_RAW;
+ p->flags = 0;
+ p->ycbcr_enc = 0;
+ p->quantization = 0;
+ p->xfer_func = 0;
+}
+
static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct v4l2_format *p = arg;
+ struct video_device *vfd = video_devdata(file);
int ret = check_fmt(file, p->type);
if (ret)
@@ -1491,6 +1507,8 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
ret = ops->vidioc_g_fmt_vid_cap(file, fh, arg);
/* just in case the driver zeroed it again */
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ if (vfd->vfl_type == VFL_TYPE_TOUCH)
+ v4l_pix_format_touch(&p->fmt.pix);
return ret;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
return ops->vidioc_g_fmt_vid_cap_mplane(file, fh, arg);
@@ -1526,21 +1544,6 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
return -EINVAL;
}
-static void v4l_pix_format_touch(struct v4l2_pix_format *p)
-{
- /*
- * The v4l2_pix_format structure contains fields that make no sense for
- * touch. Set them to default values in this case.
- */
-
- p->field = V4L2_FIELD_NONE;
- p->colorspace = V4L2_COLORSPACE_RAW;
- p->flags = 0;
- p->ycbcr_enc = 0;
- p->quantization = 0;
- p->xfer_func = 0;
-}
-
static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index c1c8152..28c3ee3 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -1258,7 +1258,6 @@ static struct ab8500_prcmu_ranges ab8540_debug_ranges[AB8500_NUM_BANKS] = {
},
};
-
static irqreturn_t ab8500_debug_handler(int irq, void *data)
{
char buf[16];
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 7b7d077..9a1ab39 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -435,7 +435,12 @@ static int esdhc_of_enable_dma(struct sdhci_host *host)
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
- value |= ESDHC_DMA_SNOOP;
+
+ if (of_dma_is_coherent(dev->of_node))
+ value |= ESDHC_DMA_SNOOP;
+ else
+ value &= ~ESDHC_DMA_SNOOP;
+
sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
return 0;
}
@@ -880,6 +885,11 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
if (esdhc->vendor_ver > VENDOR_V_22)
host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
+ if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
+ host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
+ host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+ }
+
if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
of_device_is_compatible(np, "fsl,p5020-esdhc") ||
of_device_is_compatible(np, "fsl,p4080-esdhc") ||
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 5f6602c..fef599eb 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2186,9 +2186,6 @@ static void bond_miimon_commit(struct bonding *bond)
} else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
/* make it immediately active */
bond_set_active_slave(slave);
- } else if (slave != primary) {
- /* prevent it from being the active one */
- bond_set_backup_slave(slave);
}
netdev_info(bond->dev, "link status definitely up for interface %s, %u Mbps %s duplex\n",
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index acb708f..0a7d818 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -392,13 +392,12 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota)
struct net_device *dev = napi->dev;
struct mscan_regs __iomem *regs = priv->reg_base;
struct net_device_stats *stats = &dev->stats;
- int npackets = 0;
- int ret = 1;
+ int work_done = 0;
struct sk_buff *skb;
struct can_frame *frame;
u8 canrflg;
- while (npackets < quota) {
+ while (work_done < quota) {
canrflg = in_8(®s->canrflg);
if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF)))
break;
@@ -419,18 +418,18 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota)
stats->rx_packets++;
stats->rx_bytes += frame->can_dlc;
- npackets++;
+ work_done++;
netif_receive_skb(skb);
}
- if (!(in_8(®s->canrflg) & (MSCAN_RXF | MSCAN_ERR_IF))) {
- napi_complete(&priv->napi);
- clear_bit(F_RX_PROGRESS, &priv->flags);
- if (priv->can.state < CAN_STATE_BUS_OFF)
- out_8(®s->canrier, priv->shadow_canrier);
- ret = 0;
+ if (work_done < quota) {
+ if (likely(napi_complete_done(&priv->napi, work_done))) {
+ clear_bit(F_RX_PROGRESS, &priv->flags);
+ if (priv->can.state < CAN_STATE_BUS_OFF)
+ out_8(®s->canrier, priv->shadow_canrier);
+ }
}
- return ret;
+ return work_done;
}
static irqreturn_t mscan_isr(int irq, void *dev_id)
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index bfbf809..aed8ab6 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -926,7 +926,7 @@ static int gs_usb_probe(struct usb_interface *intf,
GS_USB_BREQ_HOST_FORMAT,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
1,
- intf->altsetting[0].desc.bInterfaceNumber,
+ intf->cur_altsetting->desc.bInterfaceNumber,
hconf,
sizeof(*hconf),
1000);
@@ -949,7 +949,7 @@ static int gs_usb_probe(struct usb_interface *intf,
GS_USB_BREQ_DEVICE_CONFIG,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
1,
- intf->altsetting[0].desc.bInterfaceNumber,
+ intf->cur_altsetting->desc.bInterfaceNumber,
dconf,
sizeof(*dconf),
1000);
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 83a9bc8..6ae13f2 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -55,6 +55,7 @@
config NET_DSA_SMSC_LAN9303
tristate
select NET_DSA_TAG_LAN9303
+ select REGMAP
---help---
This enables support for the SMSC/Microchip LAN9303 3 port ethernet
switch chips.
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index d76d7c7..544b6a9 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -313,6 +313,11 @@ int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port)
{
u16 ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST;
+ /* Use the default high priority for management frames sent to
+ * the CPU.
+ */
+ port |= MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI;
+
return mv88e6390_g1_monitor_write(chip, ptr, port);
}
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 950b914..d82e895 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -189,6 +189,7 @@
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST 0x2000
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST 0x2100
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST 0x3000
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI 0x00e0
#define MV88E6390_G1_MONITOR_MGMT_CTL_DATA_MASK 0x00ff
/* Offset 0x1C: Global Control 2 */
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index db6f6a8..d22b138 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1196,8 +1196,8 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
struct ena_ring *tx_ring, *rx_ring;
- u32 tx_work_done;
- u32 rx_work_done;
+ int tx_work_done;
+ int rx_work_done = 0;
int tx_budget;
int napi_comp_call = 0;
int ret;
@@ -1214,7 +1214,11 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
}
tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
- rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
+ /* On netpoll the budget is zero and the handler should only clean the
+ * tx completions.
+ */
+ if (likely(budget))
+ rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
/* If the device is about to reset or down, avoid unmask
* the interrupt and return 0 so NAPI won't reschedule
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 4e091a1..52bce00 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -1112,7 +1112,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
for (i = 0; i < E1H_FUNC_MAX / 2; i++) {
u32 func_config =
MF_CFG_RD(bp,
- func_mf_config[BP_PORT(bp) + 2 * i].
+ func_mf_config[BP_PATH(bp) + 2 * i].
config);
func_num +=
((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index dbe8fee..b0ada7e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -9995,10 +9995,18 @@ static void bnx2x_recovery_failed(struct bnx2x *bp)
*/
static void bnx2x_parity_recover(struct bnx2x *bp)
{
- bool global = false;
u32 error_recovered, error_unrecovered;
- bool is_parity;
+ bool is_parity, global = false;
+#ifdef CONFIG_BNX2X_SRIOV
+ int vf_idx;
+ for (vf_idx = 0; vf_idx < bp->requested_nr_virtfn; vf_idx++) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
+
+ if (vf)
+ vf->state = VF_LOST;
+ }
+#endif
DP(NETIF_MSG_HW, "Handling parity\n");
while (1) {
switch (bp->recovery_state) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 9ca994d..1977e0c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2389,15 +2389,21 @@ static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
/* send the ramrod on all the queues of the PF */
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
+ int tx_idx;
/* Set the appropriate Queue object */
q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
- /* Update the Queue state */
- rc = bnx2x_queue_state_change(bp, &q_params);
- if (rc) {
- BNX2X_ERR("Failed to configure Tx switching\n");
- return rc;
+ for (tx_idx = FIRST_TX_COS_INDEX;
+ tx_idx < fp->max_cos; tx_idx++) {
+ q_params.params.update.cid_index = tx_idx;
+
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to configure Tx switching\n");
+ return rc;
+ }
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 53466f6..a887bfa 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -139,6 +139,7 @@ struct bnx2x_virtf {
#define VF_ACQUIRED 1 /* VF acquired, but not initialized */
#define VF_ENABLED 2 /* VF Enabled */
#define VF_RESET 3 /* VF FLR'd, pending cleanup */
+#define VF_LOST 4 /* Recovery while VFs are loaded */
bool flr_clnup_stage; /* true during flr cleanup */
bool malicious; /* true if FW indicated so, until FLR */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 76a4668..6d5b81a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -2112,6 +2112,18 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
{
int i;
+ if (vf->state == VF_LOST) {
+ /* Just ack the FW and return if VFs are lost
+ * in case of parity error. VFs are supposed to be timedout
+ * on waiting for PF response.
+ */
+ DP(BNX2X_MSG_IOV,
+ "VF 0x%x lost, not handling the request\n", vf->abs_vfid);
+
+ storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
+ return;
+ }
+
/* check if tlv type is known */
if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
/* Lock the per vf op mutex and note the locker's identity.
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 84c0f22..d5489cb 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -456,9 +456,9 @@ hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_tx_timestamp(skb);
hip04_set_xmit_desc(priv, phys);
- priv->tx_head = TX_NEXT(tx_head);
count++;
netdev_sent_queue(ndev, skb->len);
+ priv->tx_head = TX_NEXT(tx_head);
stats->tx_bytes += skb->len;
stats->tx_packets++;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 4801d96..0edfd19 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -8379,7 +8379,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
adapter->ptp_clock) {
- if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
+ if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
+ !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
&adapter->state)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
index 993cb5b..b99169a 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
@@ -37,6 +37,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netlink.h>
+#include <linux/vmalloc.h>
#include <linux/xz.h>
#include "mlxfw_mfa2.h"
#include "mlxfw_mfa2_file.h"
@@ -579,7 +580,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file,
comp_size = be32_to_cpu(comp->size);
comp_buf_size = comp_size + mlxfw_mfa2_comp_magic_len;
- comp_data = kmalloc(sizeof(*comp_data) + comp_buf_size, GFP_KERNEL);
+ comp_data = vzalloc(sizeof(*comp_data) + comp_buf_size);
if (!comp_data)
return ERR_PTR(-ENOMEM);
comp_data->comp.data_size = comp_size;
@@ -601,7 +602,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file,
comp_data->comp.data = comp_data->buff + mlxfw_mfa2_comp_magic_len;
return &comp_data->comp;
err_out:
- kfree(comp_data);
+ vfree(comp_data);
return ERR_PTR(err);
}
@@ -610,7 +611,7 @@ void mlxfw_mfa2_file_component_put(struct mlxfw_mfa2_component *comp)
const struct mlxfw_mfa2_comp_data *comp_data;
comp_data = container_of(comp, struct mlxfw_mfa2_comp_data, comp);
- kfree(comp_data);
+ vfree(comp_data);
}
void mlxfw_mfa2_file_fini(struct mlxfw_mfa2_file *mfa2_file)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index f79e36e4..e7ad95d 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -1181,7 +1181,7 @@ qede_configure_mcast_filtering(struct net_device *ndev,
netif_addr_lock_bh(ndev);
mc_count = netdev_mc_count(ndev);
- if (mc_count < 64) {
+ if (mc_count <= 64) {
netdev_for_each_mc_addr(ha, ndev) {
ether_addr_copy(temp, ha->addr);
temp += ETH_ALEN;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 5fca9a7..cc53ee2 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -2756,6 +2756,9 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
int err;
for (i = 0; i < qdev->num_large_buffers; i++) {
+ lrg_buf_cb = &qdev->lrg_buf[i];
+ memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
+
skb = netdev_alloc_skb(qdev->ndev,
qdev->lrg_buffer_len);
if (unlikely(!skb)) {
@@ -2766,11 +2769,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
ql_free_large_buffers(qdev);
return -ENOMEM;
} else {
-
- lrg_buf_cb = &qdev->lrg_buf[i];
- memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
lrg_buf_cb->index = i;
- lrg_buf_cb->skb = skb;
/*
* We save some space to copy the ethhdr from first
* buffer
@@ -2792,6 +2791,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
return -ENOMEM;
}
+ lrg_buf_cb->skb = skb;
dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
dma_unmap_len_set(lrg_buf_cb, maplen,
qdev->lrg_buffer_len -
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index a62128a..149fd0d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -724,6 +724,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
/* default */
break;
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
reg |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII;
break;
case PHY_INTERFACE_MODE_RMII:
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 62ccbd4..fc1fa0f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -53,7 +53,7 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
* rate, which then uses the auto-reparenting feature of the
* clock driver, and enabling/disabling the clock.
*/
- if (gmac->interface == PHY_INTERFACE_MODE_RGMII) {
+ if (phy_interface_mode_is_rgmii(gmac->interface)) {
clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
clk_prepare_enable(gmac->tx_clk);
gmac->clk_enabled = 1;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 1c0173e..c67325d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -51,7 +51,7 @@
#include <linux/of_mdio.h>
#include "dwmac1000.h"
-#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
+#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
/* Module parameters */
@@ -3604,12 +3604,24 @@ static void stmmac_set_rx_mode(struct net_device *dev)
static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ int txfifosz = priv->plat->tx_fifo_size;
+
+ if (txfifosz == 0)
+ txfifosz = priv->dma_cap.tx_fifo_size;
+
+ txfifosz /= priv->plat->tx_queues_to_use;
if (netif_running(dev)) {
netdev_err(priv->dev, "must be stopped to change its MTU\n");
return -EBUSY;
}
+ new_mtu = STMMAC_ALIGN(new_mtu);
+
+ /* If condition true, FIFO is too small or MTU too large */
+ if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
+ return -EINVAL;
+
dev->mtu = new_mtu;
netdev_update_features(dev);
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index 14d6579..314e3ea 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -181,6 +181,9 @@ static int fjes_acpi_add(struct acpi_device *device)
/* create platform_device */
plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
ARRAY_SIZE(fjes_resource));
+ if (IS_ERR(plat_dev))
+ return PTR_ERR(plat_dev);
+
device->driver_data = plat_dev;
return 0;
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 5de4053..25be278 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -42,7 +42,6 @@ struct pdp_ctx {
struct hlist_node hlist_addr;
union {
- u64 tid;
struct {
u64 tid;
u16 flow;
@@ -545,7 +544,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
mtu = dst_mtu(&rt->dst);
}
- rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu);
+ rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false);
if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
mtu < ntohs(iph->tot_len)) {
@@ -645,9 +644,16 @@ static void gtp_link_setup(struct net_device *dev)
}
static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
-static void gtp_hashtable_free(struct gtp_dev *gtp);
static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]);
+static void gtp_destructor(struct net_device *dev)
+{
+ struct gtp_dev *gtp = netdev_priv(dev);
+
+ kfree(gtp->addr_hash);
+ kfree(gtp->tid_hash);
+}
+
static int gtp_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
@@ -665,10 +671,13 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
if (err < 0)
return err;
- if (!data[IFLA_GTP_PDP_HASHSIZE])
+ if (!data[IFLA_GTP_PDP_HASHSIZE]) {
hashsize = 1024;
- else
+ } else {
hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]);
+ if (!hashsize)
+ hashsize = 1024;
+ }
err = gtp_hashtable_new(gtp, hashsize);
if (err < 0)
@@ -682,13 +691,15 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
gn = net_generic(dev_net(dev), gtp_net_id);
list_add_rcu(>p->list, &gn->gtp_dev_list);
+ dev->priv_destructor = gtp_destructor;
netdev_dbg(dev, "registered new GTP interface\n");
return 0;
out_hashtable:
- gtp_hashtable_free(gtp);
+ kfree(gtp->addr_hash);
+ kfree(gtp->tid_hash);
out_encap:
gtp_encap_disable(gtp);
return err;
@@ -697,9 +708,14 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
static void gtp_dellink(struct net_device *dev, struct list_head *head)
{
struct gtp_dev *gtp = netdev_priv(dev);
+ struct pdp_ctx *pctx;
+ int i;
+
+ for (i = 0; i < gtp->hash_size; i++)
+ hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid)
+ pdp_context_delete(pctx);
gtp_encap_disable(gtp);
- gtp_hashtable_free(gtp);
list_del_rcu(>p->list);
unregister_netdevice_queue(dev, head);
}
@@ -775,20 +791,6 @@ static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
return -ENOMEM;
}
-static void gtp_hashtable_free(struct gtp_dev *gtp)
-{
- struct pdp_ctx *pctx;
- int i;
-
- for (i = 0; i < gtp->hash_size; i++)
- hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid)
- pdp_context_delete(pctx);
-
- synchronize_rcu();
- kfree(gtp->addr_hash);
- kfree(gtp->tid_hash);
-}
-
static struct sock *gtp_encap_enable_socket(int fd, int type,
struct gtp_dev *gtp)
{
@@ -814,7 +816,7 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
lock_sock(sock->sk);
if (sock->sk->sk_user_data) {
sk = ERR_PTR(-EBUSY);
- goto out_sock;
+ goto out_rel_sock;
}
sk = sock->sk;
@@ -827,8 +829,9 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
-out_sock:
+out_rel_sock:
release_sock(sock->sk);
+out_sock:
sockfd_put(sock);
return sk;
}
@@ -929,24 +932,31 @@ static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
}
}
-static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk,
- struct genl_info *info)
+static int gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
+ struct genl_info *info)
{
+ struct pdp_ctx *pctx, *pctx_tid = NULL;
struct net_device *dev = gtp->dev;
u32 hash_ms, hash_tid = 0;
- struct pdp_ctx *pctx;
+ unsigned int version;
bool found = false;
__be32 ms_addr;
ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;
+ version = nla_get_u32(info->attrs[GTPA_VERSION]);
- hlist_for_each_entry_rcu(pctx, >p->addr_hash[hash_ms], hlist_addr) {
- if (pctx->ms_addr_ip4.s_addr == ms_addr) {
- found = true;
- break;
- }
- }
+ pctx = ipv4_pdp_find(gtp, ms_addr);
+ if (pctx)
+ found = true;
+ if (version == GTP_V0)
+ pctx_tid = gtp0_pdp_find(gtp,
+ nla_get_u64(info->attrs[GTPA_TID]));
+ else if (version == GTP_V1)
+ pctx_tid = gtp1_pdp_find(gtp,
+ nla_get_u32(info->attrs[GTPA_I_TEI]));
+ if (pctx_tid)
+ found = true;
if (found) {
if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
@@ -954,6 +964,11 @@ static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk,
if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE)
return -EOPNOTSUPP;
+ if (pctx && pctx_tid)
+ return -EEXIST;
+ if (!pctx)
+ pctx = pctx_tid;
+
ipv4_pdp_fill(pctx, info);
if (pctx->gtp_version == GTP_V0)
@@ -1077,7 +1092,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
goto out_unlock;
}
- err = ipv4_pdp_add(gtp, sk, info);
+ err = gtp_pdp_add(gtp, sk, info);
out_unlock:
rcu_read_unlock();
@@ -1235,43 +1250,46 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb,
struct netlink_callback *cb)
{
struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
+ int i, j, bucket = cb->args[0], skip = cb->args[1];
struct net *net = sock_net(skb->sk);
- struct gtp_net *gn = net_generic(net, gtp_net_id);
- unsigned long tid = cb->args[1];
- int i, k = cb->args[0], ret;
struct pdp_ctx *pctx;
+ struct gtp_net *gn;
+
+ gn = net_generic(net, gtp_net_id);
if (cb->args[4])
return 0;
+ rcu_read_lock();
list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
if (last_gtp && last_gtp != gtp)
continue;
else
last_gtp = NULL;
- for (i = k; i < gtp->hash_size; i++) {
- hlist_for_each_entry_rcu(pctx, >p->tid_hash[i], hlist_tid) {
- if (tid && tid != pctx->u.tid)
- continue;
- else
- tid = 0;
-
- ret = gtp_genl_fill_info(skb,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- cb->nlh->nlmsg_type, pctx);
- if (ret < 0) {
+ for (i = bucket; i < gtp->hash_size; i++) {
+ j = 0;
+ hlist_for_each_entry_rcu(pctx, >p->tid_hash[i],
+ hlist_tid) {
+ if (j >= skip &&
+ gtp_genl_fill_info(skb,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ cb->nlh->nlmsg_type, pctx)) {
cb->args[0] = i;
- cb->args[1] = pctx->u.tid;
+ cb->args[1] = j;
cb->args[2] = (unsigned long)gtp;
goto out;
}
+ j++;
}
+ skip = 0;
}
+ bucket = 0;
}
cb->args[4] = 1;
out:
+ rcu_read_unlock();
return skb->len;
}
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 021a8ec..6d4742d 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -665,10 +665,10 @@ static void sixpack_close(struct tty_struct *tty)
{
struct sixpack *sp;
- write_lock_bh(&disc_data_lock);
+ write_lock_irq(&disc_data_lock);
sp = tty->disc_data;
tty->disc_data = NULL;
- write_unlock_bh(&disc_data_lock);
+ write_unlock_irq(&disc_data_lock);
if (!sp)
return;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index aec6c26..9fd7dab 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -783,10 +783,10 @@ static void mkiss_close(struct tty_struct *tty)
{
struct mkiss *ax;
- write_lock_bh(&disc_data_lock);
+ write_lock_irq(&disc_data_lock);
ax = tty->disc_data;
tty->disc_data = NULL;
- write_unlock_bh(&disc_data_lock);
+ write_unlock_irq(&disc_data_lock);
if (!ax)
return;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 0f07b59..fc794e6 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -179,7 +179,6 @@ struct rndis_device {
u8 hw_mac_adr[ETH_ALEN];
u8 rss_key[NETVSC_HASH_KEYLEN];
- u16 rx_table[ITAB_NUM];
};
@@ -741,6 +740,8 @@ struct net_device_context {
u32 tx_table[VRSS_SEND_TAB_SIZE];
+ u16 rx_table[ITAB_NUM];
+
/* Ethtool settings */
bool udp4_l4_hash;
bool udp6_l4_hash;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 5a44b97..a89de575 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1528,7 +1528,7 @@ static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
rndis_dev = ndev->extension;
if (indir) {
for (i = 0; i < ITAB_NUM; i++)
- indir[i] = rndis_dev->rx_table[i];
+ indir[i] = ndc->rx_table[i];
}
if (key)
@@ -1558,7 +1558,7 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
return -EINVAL;
for (i = 0; i < ITAB_NUM; i++)
- rndis_dev->rx_table[i] = indir[i];
+ ndc->rx_table[i] = indir[i];
}
if (!key) {
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index fc1d5e1..b19557c 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -715,6 +715,7 @@ static int rndis_set_rss_param_msg(struct rndis_device *rdev,
const u8 *rss_key, u16 flag)
{
struct net_device *ndev = rdev->ndev;
+ struct net_device_context *ndc = netdev_priv(ndev);
struct rndis_request *request;
struct rndis_set_request *set;
struct rndis_set_complete *set_complete;
@@ -754,7 +755,7 @@ static int rndis_set_rss_param_msg(struct rndis_device *rdev,
/* Set indirection table entries */
itab = (u32 *)(rssp + 1);
for (i = 0; i < ITAB_NUM; i++)
- itab[i] = rdev->rx_table[i];
+ itab[i] = ndc->rx_table[i];
/* Set hask key values */
keyp = (u8 *)((unsigned long)rssp + rssp->kashkey_offset);
@@ -1204,6 +1205,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
struct netvsc_device_info *device_info)
{
struct net_device *net = hv_get_drvdata(dev);
+ struct net_device_context *ndc = netdev_priv(net);
struct netvsc_device *net_device;
struct rndis_device *rndis_device;
struct ndis_recv_scale_cap rsscap;
@@ -1286,9 +1288,11 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
/* We will use the given number of channels if available. */
net_device->num_chn = min(net_device->max_chn, device_info->num_chn);
- for (i = 0; i < ITAB_NUM; i++)
- rndis_device->rx_table[i] = ethtool_rxfh_indir_default(
+ if (!netif_is_rxfh_configured(net)) {
+ for (i = 0; i < ITAB_NUM; i++)
+ ndc->rx_table[i] = ethtool_rxfh_indir_default(
i, net_device->num_chn);
+ }
atomic_set(&net_device->open_chn, 1);
vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 8d5f88a..2b97765 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -263,7 +263,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
struct net_device *src,
enum macvlan_mode mode)
{
- const struct ethhdr *eth = eth_hdr(skb);
+ const struct ethhdr *eth = skb_eth_hdr(skb);
const struct macvlan_dev *vlan;
struct sk_buff *nskb;
unsigned int i;
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index e03e91d..0cbcced 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -84,6 +84,10 @@
#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MAX 0x0
#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MIN 0x1f
+/* CFG3 bits */
+#define DP83867_CFG3_INT_OE BIT(7)
+#define DP83867_CFG3_ROBUST_AUTO_MDIX BIT(9)
+
/* CFG4 bits */
#define DP83867_CFG4_PORT_MIRROR_EN BIT(0)
@@ -320,12 +324,13 @@ static int dp83867_config_init(struct phy_device *phydev)
return ret;
}
+ val = phy_read(phydev, DP83867_CFG3);
/* Enable Interrupt output INT_OE in CFG3 register */
- if (phy_interrupt_is_valid(phydev)) {
- val = phy_read(phydev, DP83867_CFG3);
- val |= BIT(7);
- phy_write(phydev, DP83867_CFG3, val);
- }
+ if (phy_interrupt_is_valid(phydev))
+ val |= DP83867_CFG3_INT_OE;
+
+ val |= DP83867_CFG3_ROBUST_AUTO_MDIX;
+ phy_write(phydev, DP83867_CFG3, val);
if (dp83867->port_mirroring != DP83867_PORT_MIRROING_KEEP)
dp83867_config_port_mirroring(phydev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index ed7e3c7..a98c227 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -367,8 +367,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
mdiodev->device_free = phy_mdio_device_free;
mdiodev->device_remove = phy_mdio_device_remove;
- dev->speed = 0;
- dev->duplex = -1;
+ dev->speed = SPEED_UNKNOWN;
+ dev->duplex = DUPLEX_UNKNOWN;
dev->pause = 0;
dev->asym_pause = 0;
dev->link = 1;
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 04ddbd9..9aa254d 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -497,7 +497,7 @@ static int lan78xx_read_stats(struct lan78xx_net *dev,
}
} else {
netdev_warn(dev->net,
- "Failed to read stat ret = 0x%x", ret);
+ "Failed to read stat ret = %d", ret);
}
kfree(stats);
@@ -1763,6 +1763,7 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev)
dev->mdiobus->read = lan78xx_mdiobus_read;
dev->mdiobus->write = lan78xx_mdiobus_write;
dev->mdiobus->name = "lan78xx-mdiobus";
+ dev->mdiobus->parent = &dev->udev->dev;
snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
dev->udev->bus->busnum, dev->udev->devnum);
@@ -2601,11 +2602,6 @@ static int lan78xx_stop(struct net_device *net)
return 0;
}
-static int lan78xx_linearize(struct sk_buff *skb)
-{
- return skb_linearize(skb);
-}
-
static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
struct sk_buff *skb, gfp_t flags)
{
@@ -2616,8 +2612,10 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
return NULL;
}
- if (lan78xx_linearize(skb) < 0)
+ if (skb_linearize(skb)) {
+ dev_kfree_skb_any(skb);
return NULL;
+ }
tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 153a81e..5aa7d50 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2216,7 +2216,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
skb_dst_update_pmtu(skb, mtu);
}
- tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
+ tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
vni, md, flags, udp_sum);
@@ -2257,7 +2257,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
skb_dst_update_pmtu(skb, mtu);
}
- tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
+ tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
ttl = ttl ? : ip6_dst_hoplimit(ndst);
skb_scrub_packet(skb, xnet);
err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index dff3444..ea47ad4 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -3627,7 +3627,7 @@ static int ath10k_mac_tx(struct ath10k *ar,
struct ieee80211_vif *vif,
enum ath10k_hw_txrx_mode txmode,
enum ath10k_mac_tx_path txpath,
- struct sk_buff *skb)
+ struct sk_buff *skb, bool noque_offchan)
{
struct ieee80211_hw *hw = ar->hw;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -3655,10 +3655,10 @@ static int ath10k_mac_tx(struct ath10k *ar,
}
}
- if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
+ if (!noque_offchan && info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
if (!ath10k_mac_tx_frm_has_freq(ar)) {
- ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n",
- skb);
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac queued offchannel skb %pK len %d\n",
+ skb, skb->len);
skb_queue_tail(&ar->offchan_tx_queue, skb);
ieee80211_queue_work(hw, &ar->offchan_tx_work);
@@ -3720,8 +3720,8 @@ void ath10k_offchan_tx_work(struct work_struct *work)
mutex_lock(&ar->conf_mutex);
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n",
- skb);
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK len %d\n",
+ skb, skb->len);
hdr = (struct ieee80211_hdr *)skb->data;
peer_addr = ieee80211_get_DA(hdr);
@@ -3767,7 +3767,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
- ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
+ ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, true);
if (ret) {
ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
ret);
@@ -3777,8 +3777,8 @@ void ath10k_offchan_tx_work(struct work_struct *work)
time_left =
wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
if (time_left == 0)
- ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n",
- skb);
+ ath10k_warn(ar, "timed out waiting for offchannel skb %pK, len: %d\n",
+ skb, skb->len);
if (!peer && tmp_peer_created) {
ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
@@ -3957,7 +3957,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->htt.tx_lock);
}
- ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
+ ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, false);
if (unlikely(ret)) {
ath10k_warn(ar, "failed to push frame: %d\n", ret);
@@ -4239,7 +4239,7 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->htt.tx_lock);
}
- ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
+ ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, false);
if (ret) {
ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
if (is_htt) {
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index d4986f6..9999c8c4 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -100,6 +100,8 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
+ info->status.rates[0].idx = -1;
+
trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
if (tx_done->status == HTT_TX_COMPL_STATE_DISCARD) {
diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
index f9c79e2..c64a03f 100644
--- a/drivers/net/wireless/ath/ath10k/usb.c
+++ b/drivers/net/wireless/ath/ath10k/usb.c
@@ -454,6 +454,7 @@ static int ath10k_usb_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
ath10k_dbg(ar, ATH10K_DBG_USB_BULK,
"usb bulk transmit failed: %d\n", ret);
usb_unanchor_urb(urb);
+ usb_free_urb(urb);
ret = -EINVAL;
goto err_free_urb_to_pipe;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index b38a586e..4748f55 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -973,6 +973,8 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
struct ath_htc_rx_status *rxstatus;
struct ath_rx_status rx_stats;
bool decrypt_error = false;
+ __be16 rs_datalen;
+ bool is_phyerr;
if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
ath_err(common, "Corrupted RX frame, dropping (len: %d)\n",
@@ -982,11 +984,24 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
rxstatus = (struct ath_htc_rx_status *)skb->data;
- if (be16_to_cpu(rxstatus->rs_datalen) -
- (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0) {
+ rs_datalen = be16_to_cpu(rxstatus->rs_datalen);
+ if (unlikely(rs_datalen -
+ (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0)) {
ath_err(common,
"Corrupted RX data len, dropping (dlen: %d, skblen: %d)\n",
- rxstatus->rs_datalen, skb->len);
+ rs_datalen, skb->len);
+ goto rx_next;
+ }
+
+ is_phyerr = rxstatus->rs_status & ATH9K_RXERR_PHY;
+ /*
+ * Discard zero-length packets and packets smaller than an ACK
+ * which are not PHY_ERROR (short radar pulses have a length of 3)
+ */
+ if (unlikely(!rs_datalen || (rs_datalen < 10 && !is_phyerr))) {
+ ath_warn(common,
+ "Short RX data len, dropping (dlen: %d)\n",
+ rs_datalen);
goto rx_next;
}
@@ -1011,7 +1026,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
* Process PHY errors and return so that the packet
* can be dropped.
*/
- if (rx_stats.rs_status & ATH9K_RXERR_PHY) {
+ if (unlikely(is_phyerr)) {
/* TODO: Not using DFS processing now. */
if (ath_cmn_process_fft(&priv->spec_priv, hdr,
&rx_stats, rx_status->mactime)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.c b/drivers/net/wireless/intel/iwlwifi/dvm/led.c
index 1bbd17a..20e16c4 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/led.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.c
@@ -185,6 +185,9 @@ void iwl_leds_init(struct iwl_priv *priv)
priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
wiphy_name(priv->hw->wiphy));
+ if (!priv->led.name)
+ return;
+
priv->led.brightness_set = iwl_led_brightness_set;
priv->led.blink_set = iwl_led_blink_set;
priv->led.max_brightness = 1;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/led.c b/drivers/net/wireless/intel/iwlwifi/mvm/led.c
index b272695..072f80c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/led.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/led.c
@@ -131,6 +131,9 @@ int iwl_mvm_leds_init(struct iwl_mvm *mvm)
mvm->led.name = kasprintf(GFP_KERNEL, "%s-led",
wiphy_name(mvm->hw->wiphy));
+ if (!mvm->led.name)
+ return -ENOMEM;
+
mvm->led.brightness_set = iwl_led_brightness_set;
mvm->led.max_brightness = 1;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index c73e4be..c31303d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -62,6 +62,7 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
+#include <asm/unaligned.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include "iwl-trans.h"
@@ -290,7 +291,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data;
hdr = (struct ieee80211_hdr *)(pkt->data + sizeof(*rx_res));
len = le16_to_cpu(rx_res->byte_count);
- rx_pkt_status = le32_to_cpup((__le32 *)
+ rx_pkt_status = get_unaligned_le32((__le32 *)
(pkt->data + sizeof(*rx_res) + len));
/* Dont use dev_alloc_skb(), we'll have enough headroom once
diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
index 39bf85d..c7f8a29 100644
--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
+++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
@@ -1183,6 +1183,10 @@ static int if_sdio_probe(struct sdio_func *func,
spin_lock_init(&card->lock);
card->workqueue = alloc_workqueue("libertas_sdio", WQ_MEM_RECLAIM, 0);
+ if (unlikely(!card->workqueue)) {
+ ret = -ENOMEM;
+ goto err_queue;
+ }
INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker);
init_waitqueue_head(&card->pwron_waitq);
@@ -1234,6 +1238,7 @@ static int if_sdio_probe(struct sdio_func *func,
lbs_remove_card(priv);
free:
destroy_workqueue(card->workqueue);
+err_queue:
while (card->packets) {
packet = card->packets;
card->packets = card->packets->next;
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 9511f5f..8ee9609 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -677,8 +677,11 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
skb_put(skb, MAX_EVENT_SIZE);
if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE,
- PCI_DMA_FROMDEVICE))
+ PCI_DMA_FROMDEVICE)) {
+ kfree_skb(skb);
+ kfree(card->evtbd_ring_vbase);
return -1;
+ }
buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
@@ -1019,8 +1022,10 @@ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter)
}
skb_put(skb, MWIFIEX_UPLD_SIZE);
if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
- PCI_DMA_FROMDEVICE))
+ PCI_DMA_FROMDEVICE)) {
+ kfree_skb(skb);
return -1;
+ }
card->cmdrsp_buf = skb;
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index a8043d7..f88a953 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -271,6 +271,14 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv,
"11D: skip setting domain info in FW\n");
return 0;
}
+
+ if (country_ie_len >
+ (IEEE80211_COUNTRY_STRING_LEN + MWIFIEX_MAX_TRIPLET_802_11D)) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "11D: country_ie_len overflow!, deauth AP\n");
+ return -EINVAL;
+ }
+
memcpy(priv->adapter->country_code, &country_ie[2], 2);
domain_info->country_code[0] = country_ie[2];
@@ -314,8 +322,9 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
priv->scan_block = false;
if (bss) {
- if (adapter->region_code == 0x00)
- mwifiex_process_country_ie(priv, bss);
+ if (adapter->region_code == 0x00 &&
+ mwifiex_process_country_ie(priv, bss))
+ return -EINVAL;
/* Allocate and fill new bss descriptor */
bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor),
diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
index e76af286..b5340af 100644
--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
+++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
@@ -956,59 +956,117 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
switch (*pos) {
case WLAN_EID_SUPP_RATES:
+ if (pos[1] > 32)
+ return;
sta_ptr->tdls_cap.rates_len = pos[1];
for (i = 0; i < pos[1]; i++)
sta_ptr->tdls_cap.rates[i] = pos[i + 2];
break;
case WLAN_EID_EXT_SUPP_RATES:
+ if (pos[1] > 32)
+ return;
basic = sta_ptr->tdls_cap.rates_len;
+ if (pos[1] > 32 - basic)
+ return;
for (i = 0; i < pos[1]; i++)
sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2];
sta_ptr->tdls_cap.rates_len += pos[1];
break;
case WLAN_EID_HT_CAPABILITY:
- memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos,
+ if (pos > end - sizeof(struct ieee80211_ht_cap) - 2)
+ return;
+ if (pos[1] != sizeof(struct ieee80211_ht_cap))
+ return;
+ /* copy the ie's value into ht_capb*/
+ memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos + 2,
sizeof(struct ieee80211_ht_cap));
sta_ptr->is_11n_enabled = 1;
break;
case WLAN_EID_HT_OPERATION:
- memcpy(&sta_ptr->tdls_cap.ht_oper, pos,
+ if (pos > end -
+ sizeof(struct ieee80211_ht_operation) - 2)
+ return;
+ if (pos[1] != sizeof(struct ieee80211_ht_operation))
+ return;
+ /* copy the ie's value into ht_oper*/
+ memcpy(&sta_ptr->tdls_cap.ht_oper, pos + 2,
sizeof(struct ieee80211_ht_operation));
break;
case WLAN_EID_BSS_COEX_2040:
+ if (pos > end - 3)
+ return;
+ if (pos[1] != 1)
+ return;
sta_ptr->tdls_cap.coex_2040 = pos[2];
break;
case WLAN_EID_EXT_CAPABILITY:
+ if (pos > end - sizeof(struct ieee_types_header))
+ return;
+ if (pos[1] < sizeof(struct ieee_types_header))
+ return;
+ if (pos[1] > 8)
+ return;
memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos,
sizeof(struct ieee_types_header) +
min_t(u8, pos[1], 8));
break;
case WLAN_EID_RSN:
+ if (pos > end - sizeof(struct ieee_types_header))
+ return;
+ if (pos[1] < sizeof(struct ieee_types_header))
+ return;
+ if (pos[1] > IEEE_MAX_IE_SIZE -
+ sizeof(struct ieee_types_header))
+ return;
memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos,
sizeof(struct ieee_types_header) +
min_t(u8, pos[1], IEEE_MAX_IE_SIZE -
sizeof(struct ieee_types_header)));
break;
case WLAN_EID_QOS_CAPA:
+ if (pos > end - 3)
+ return;
+ if (pos[1] != 1)
+ return;
sta_ptr->tdls_cap.qos_info = pos[2];
break;
case WLAN_EID_VHT_OPERATION:
- if (priv->adapter->is_hw_11ac_capable)
- memcpy(&sta_ptr->tdls_cap.vhtoper, pos,
+ if (priv->adapter->is_hw_11ac_capable) {
+ if (pos > end -
+ sizeof(struct ieee80211_vht_operation) - 2)
+ return;
+ if (pos[1] !=
+ sizeof(struct ieee80211_vht_operation))
+ return;
+ /* copy the ie's value into vhtoper*/
+ memcpy(&sta_ptr->tdls_cap.vhtoper, pos + 2,
sizeof(struct ieee80211_vht_operation));
+ }
break;
case WLAN_EID_VHT_CAPABILITY:
if (priv->adapter->is_hw_11ac_capable) {
- memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos,
+ if (pos > end -
+ sizeof(struct ieee80211_vht_cap) - 2)
+ return;
+ if (pos[1] != sizeof(struct ieee80211_vht_cap))
+ return;
+ /* copy the ie's value into vhtcap*/
+ memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos + 2,
sizeof(struct ieee80211_vht_cap));
sta_ptr->is_11ac_enabled = 1;
}
break;
case WLAN_EID_AID:
- if (priv->adapter->is_hw_11ac_capable)
+ if (priv->adapter->is_hw_11ac_capable) {
+ if (pos > end - 4)
+ return;
+ if (pos[1] != 2)
+ return;
sta_ptr->tdls_cap.aid =
get_unaligned_le16((pos + 2));
+ }
+ break;
default:
break;
}
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 95e3993..a895b6f 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -1349,6 +1349,7 @@ struct rtl8xxxu_fileops {
u8 has_s0s1:1;
u8 has_tx_report:1;
u8 gen2_thermal_meter:1;
+ u8 needs_full_init:1;
u32 adda_1t_init;
u32 adda_1t_path_on;
u32 adda_2t_path_on_a;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
index c4b86a8..27e97df 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
@@ -1673,6 +1673,7 @@ struct rtl8xxxu_fileops rtl8723bu_fops = {
.has_s0s1 = 1,
.has_tx_report = 1,
.gen2_thermal_meter = 1,
+ .needs_full_init = 1,
.adda_1t_init = 0x01c00014,
.adda_1t_path_on = 0x01c00014,
.adda_2t_path_on_a = 0x01c00014,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 91b01ca..73fc595 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -3905,6 +3905,9 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
else
macpower = true;
+ if (fops->needs_full_init)
+ macpower = false;
+
ret = fops->power_on(priv);
if (ret < 0) {
dev_warn(dev, "%s: Failed power on\n", __func__);
@@ -5450,6 +5453,7 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw)
ret = usb_submit_urb(urb, GFP_KERNEL);
if (ret) {
usb_unanchor_urb(urb);
+ usb_free_urb(urb);
goto error;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index 530e80f..1ee7f79 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -1556,6 +1556,8 @@ static bool usb_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb)
* This is maybe necessary:
* rtlpriv->cfg->ops->fill_tx_cmddesc(hw, buffer, 1, 1, skb);
*/
+ dev_kfree_skb(skb);
+
return true;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 2401c8b..93eda23 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -1068,8 +1068,10 @@ int rtl_usb_probe(struct usb_interface *intf,
rtlpriv->hw = hw;
rtlpriv->usb_data = kzalloc(RTL_USB_MAX_RX_COUNT * sizeof(u32),
GFP_KERNEL);
- if (!rtlpriv->usb_data)
+ if (!rtlpriv->usb_data) {
+ ieee80211_free_hw(hw);
return -ENOMEM;
+ }
/* this spin lock must be initialized early */
spin_lock_init(&rtlpriv->locks.usb_lock);
@@ -1130,6 +1132,7 @@ int rtl_usb_probe(struct usb_interface *intf,
_rtl_usb_io_handler_release(hw);
usb_put_dev(udev);
complete(&rtlpriv->firmware_loading_complete);
+ kfree(rtlpriv->usb_data);
return -ENODEV;
}
EXPORT_SYMBOL(rtl_usb_probe);
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index b2feda3..4714984 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1259,11 +1259,11 @@ static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
ret = btt_data_read(arena, page, off, postmap, cur_len);
if (ret) {
- int rc;
-
/* Media error - set the e_flag */
- rc = btt_map_write(arena, premap, postmap, 0, 1,
- NVDIMM_IO_ATOMIC);
+ if (btt_map_write(arena, premap, postmap, 0, 1, NVDIMM_IO_ATOMIC))
+ dev_warn_ratelimited(to_dev(arena),
+ "Error persistently tracking bad blocks at %#x\n",
+ premap);
goto out_rtt;
}
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 058d542..9e4d2ec 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -337,7 +337,8 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
!template->ls_req || !template->fcp_io ||
!template->ls_abort || !template->fcp_abort ||
!template->max_hw_queues || !template->max_sgl_segments ||
- !template->max_dif_sgl_segments || !template->dma_boundary) {
+ !template->max_dif_sgl_segments || !template->dma_boundary ||
+ !template->module) {
ret = -EINVAL;
goto out_reghost_failed;
}
@@ -1762,6 +1763,7 @@ nvme_fc_ctrl_free(struct kref *ref)
{
struct nvme_fc_ctrl *ctrl =
container_of(ref, struct nvme_fc_ctrl, ref);
+ struct nvme_fc_lport *lport = ctrl->lport;
unsigned long flags;
if (ctrl->ctrl.tagset) {
@@ -1787,6 +1789,7 @@ nvme_fc_ctrl_free(struct kref *ref)
if (ctrl->ctrl.opts)
nvmf_free_options(ctrl->ctrl.opts);
kfree(ctrl);
+ module_put(lport->ops->module);
}
static void
@@ -2765,10 +2768,15 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
goto out_fail;
}
+ if (!try_module_get(lport->ops->module)) {
+ ret = -EUNATCH;
+ goto out_free_ctrl;
+ }
+
idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
if (idx < 0) {
ret = -ENOSPC;
- goto out_free_ctrl;
+ goto out_mod_put;
}
ctrl->ctrl.opts = opts;
@@ -2915,6 +2923,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
out_free_ida:
put_device(ctrl->dev);
ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
+out_mod_put:
+ module_put(lport->ops->module);
out_free_ctrl:
kfree(ctrl);
out_fail:
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 096523d..b8fe870 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -693,6 +693,7 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
#define FCLOOP_DMABOUND_4G 0xFFFFFFFF
static struct nvme_fc_port_template fctemplate = {
+ .module = THIS_MODULE,
.localport_delete = fcloop_localport_delete,
.remoteport_delete = fcloop_remoteport_delete,
.create_queue = fcloop_create_queue,
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 7b4ee33..15c81cf 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -230,6 +230,18 @@ static int port_check(struct device *dev, void *dev_drv)
return 0;
}
+/*
+ * Iterates through all the devices connected to the bus and return 1
+ * if the device is a parallel port.
+ */
+
+static int port_detect(struct device *dev, void *dev_drv)
+{
+ if (is_parport(dev))
+ return 1;
+ return 0;
+}
+
/**
* parport_register_driver - register a parallel port device driver
* @drv: structure describing the driver
@@ -282,6 +294,15 @@ int __parport_register_driver(struct parport_driver *drv, struct module *owner,
if (ret)
return ret;
+ /*
+ * check if bus has any parallel port registered, if
+ * none is found then load the lowlevel driver.
+ */
+ ret = bus_for_each_dev(&parport_bus_type, NULL, NULL,
+ port_detect);
+ if (!ret)
+ get_lowlevel_driver();
+
mutex_lock(®istration_lock);
if (drv->match_port)
bus_for_each_dev(&parport_bus_type, NULL, drv,
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index e3aefda..0941555 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -23,7 +23,7 @@
#include <linux/pci.h>
#include <linux/cdev.h>
#include <linux/wait.h>
-
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/nospec.h>
MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
@@ -898,7 +898,7 @@ static int ioctl_event_summary(struct switchtec_dev *stdev,
u32 reg;
s.global = ioread32(&stdev->mmio_sw_event->global_summary);
- s.part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
+ s.part_bitmap = readq(&stdev->mmio_sw_event->part_event_bitmap);
s.local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
for (i = 0; i < stdev->partition_count; i++) {
diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c
index 6601ad0..4ba3634 100644
--- a/drivers/phy/motorola/phy-cpcap-usb.c
+++ b/drivers/phy/motorola/phy-cpcap-usb.c
@@ -207,6 +207,19 @@ static int cpcap_phy_get_ints_state(struct cpcap_phy_ddata *ddata,
static int cpcap_usb_set_uart_mode(struct cpcap_phy_ddata *ddata);
static int cpcap_usb_set_usb_mode(struct cpcap_phy_ddata *ddata);
+static void cpcap_usb_try_musb_mailbox(struct cpcap_phy_ddata *ddata,
+ enum musb_vbus_id_status status)
+{
+ int error;
+
+ error = musb_mailbox(status);
+ if (!error)
+ return;
+
+ dev_dbg(ddata->dev, "%s: musb_mailbox failed: %i\n",
+ __func__, error);
+}
+
static void cpcap_usb_detect(struct work_struct *work)
{
struct cpcap_phy_ddata *ddata;
@@ -226,9 +239,7 @@ static void cpcap_usb_detect(struct work_struct *work)
if (error)
goto out_err;
- error = musb_mailbox(MUSB_ID_GROUND);
- if (error)
- goto out_err;
+ cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND);
error = regmap_update_bits(ddata->reg, CPCAP_REG_USBC3,
CPCAP_BIT_VBUSSTBY_EN,
@@ -255,9 +266,7 @@ static void cpcap_usb_detect(struct work_struct *work)
error = cpcap_usb_set_usb_mode(ddata);
if (error)
goto out_err;
- error = musb_mailbox(MUSB_ID_GROUND);
- if (error)
- goto out_err;
+ cpcap_usb_try_musb_mailbox(ddata, MUSB_ID_GROUND);
return;
}
@@ -267,22 +276,18 @@ static void cpcap_usb_detect(struct work_struct *work)
error = cpcap_usb_set_usb_mode(ddata);
if (error)
goto out_err;
- error = musb_mailbox(MUSB_VBUS_VALID);
- if (error)
- goto out_err;
+ cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_VALID);
return;
}
+ cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_OFF);
+
/* Default to debug UART mode */
error = cpcap_usb_set_uart_mode(ddata);
if (error)
goto out_err;
- error = musb_mailbox(MUSB_VBUS_OFF);
- if (error)
- goto out_err;
-
dev_dbg(ddata->dev, "set UART mode\n");
return;
@@ -647,9 +652,7 @@ static int cpcap_usb_phy_remove(struct platform_device *pdev)
if (error)
dev_err(ddata->dev, "could not set UART mode\n");
- error = musb_mailbox(MUSB_VBUS_OFF);
- if (error)
- dev_err(ddata->dev, "could not set mailbox\n");
+ cpcap_usb_try_musb_mailbox(ddata, MUSB_VBUS_OFF);
usb_remove_phy(&ddata->phy);
cancel_delayed_work_sync(&ddata->detect_work);
diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hs.c b/drivers/phy/qualcomm/phy-qcom-usb-hs.c
index 2d0c70b..643934a 100644
--- a/drivers/phy/qualcomm/phy-qcom-usb-hs.c
+++ b/drivers/phy/qualcomm/phy-qcom-usb-hs.c
@@ -159,8 +159,8 @@ static int qcom_usb_hs_phy_power_on(struct phy *phy)
/* setup initial state */
qcom_usb_hs_phy_vbus_notifier(&uphy->vbus_notify, state,
uphy->vbus_edev);
- ret = devm_extcon_register_notifier(&ulpi->dev, uphy->vbus_edev,
- EXTCON_USB, &uphy->vbus_notify);
+ ret = extcon_register_notifier(uphy->vbus_edev, EXTCON_USB,
+ &uphy->vbus_notify);
if (ret)
goto err_ulpi;
}
@@ -181,6 +181,9 @@ static int qcom_usb_hs_phy_power_off(struct phy *phy)
{
struct qcom_usb_hs_phy *uphy = phy_get_drvdata(phy);
+ if (uphy->vbus_edev)
+ extcon_unregister_notifier(uphy->vbus_edev, EXTCON_USB,
+ &uphy->vbus_notify);
regulator_disable(uphy->v3p3);
regulator_disable(uphy->v1p8);
clk_disable_unprepare(uphy->sleep_clk);
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index beeb7cb..9df5d29 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -204,7 +204,6 @@ struct byt_gpio {
struct platform_device *pdev;
struct pinctrl_dev *pctl_dev;
struct pinctrl_desc pctl_desc;
- raw_spinlock_t lock;
const struct byt_pinctrl_soc_data *soc_data;
struct byt_community *communities_copy;
struct byt_gpio_pin_context *saved_context;
@@ -715,6 +714,8 @@ static const struct byt_pinctrl_soc_data *byt_soc_data[] = {
NULL,
};
+static DEFINE_RAW_SPINLOCK(byt_lock);
+
static struct byt_community *byt_get_community(struct byt_gpio *vg,
unsigned int pin)
{
@@ -856,7 +857,7 @@ static void byt_set_group_simple_mux(struct byt_gpio *vg,
unsigned long flags;
int i;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
for (i = 0; i < group.npins; i++) {
void __iomem *padcfg0;
@@ -876,7 +877,7 @@ static void byt_set_group_simple_mux(struct byt_gpio *vg,
writel(value, padcfg0);
}
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
}
static void byt_set_group_mixed_mux(struct byt_gpio *vg,
@@ -886,7 +887,7 @@ static void byt_set_group_mixed_mux(struct byt_gpio *vg,
unsigned long flags;
int i;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
for (i = 0; i < group.npins; i++) {
void __iomem *padcfg0;
@@ -906,7 +907,7 @@ static void byt_set_group_mixed_mux(struct byt_gpio *vg,
writel(value, padcfg0);
}
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
}
static int byt_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
@@ -955,11 +956,11 @@ static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned int offset)
unsigned long flags;
u32 value;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
value = readl(reg);
value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
writel(value, reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
}
static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
@@ -971,7 +972,7 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
u32 value, gpio_mux;
unsigned long flags;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
/*
* In most cases, func pin mux 000 means GPIO function.
@@ -993,7 +994,7 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
"pin %u forcibly re-configured as GPIO\n", offset);
}
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
pm_runtime_get(&vg->pdev->dev);
@@ -1021,7 +1022,7 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
unsigned long flags;
u32 value;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
value = readl(val_reg);
value &= ~BYT_DIR_MASK;
@@ -1038,7 +1039,7 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
"Potential Error: Setting GPIO with direct_irq_en to output");
writel(value, val_reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
return 0;
}
@@ -1107,11 +1108,11 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
u32 conf, pull, val, debounce;
u16 arg = 0;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
conf = readl(conf_reg);
pull = conf & BYT_PULL_ASSIGN_MASK;
val = readl(val_reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
@@ -1138,9 +1139,9 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
if (!(conf & BYT_DEBOUNCE_EN))
return -EINVAL;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
debounce = readl(db_reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
switch (debounce & BYT_DEBOUNCE_PULSE_MASK) {
case BYT_DEBOUNCE_PULSE_375US:
@@ -1192,7 +1193,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
u32 conf, val, debounce;
int i, ret = 0;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
conf = readl(conf_reg);
val = readl(val_reg);
@@ -1300,7 +1301,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
if (!ret)
writel(conf, conf_reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
return ret;
}
@@ -1325,9 +1326,9 @@ static int byt_gpio_get(struct gpio_chip *chip, unsigned offset)
unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
val = readl(reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
return !!(val & BYT_LEVEL);
}
@@ -1342,13 +1343,13 @@ static void byt_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
if (!reg)
return;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
old_val = readl(reg);
if (value)
writel(old_val | BYT_LEVEL, reg);
else
writel(old_val & ~BYT_LEVEL, reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
}
static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
@@ -1361,9 +1362,9 @@ static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
if (!reg)
return -EINVAL;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
value = readl(reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
if (!(value & BYT_OUTPUT_EN))
return GPIOF_DIR_OUT;
@@ -1406,14 +1407,14 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
const char *label;
unsigned int pin;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
pin = vg->soc_data->pins[i].number;
reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
if (!reg) {
seq_printf(s,
"Could not retrieve pin %i conf0 reg\n",
pin);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
continue;
}
conf0 = readl(reg);
@@ -1422,11 +1423,11 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
if (!reg) {
seq_printf(s,
"Could not retrieve pin %i val reg\n", pin);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
continue;
}
val = readl(reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
comm = byt_get_community(vg, pin);
if (!comm) {
@@ -1510,9 +1511,9 @@ static void byt_irq_ack(struct irq_data *d)
if (!reg)
return;
- raw_spin_lock(&vg->lock);
+ raw_spin_lock(&byt_lock);
writel(BIT(offset % 32), reg);
- raw_spin_unlock(&vg->lock);
+ raw_spin_unlock(&byt_lock);
}
static void byt_irq_mask(struct irq_data *d)
@@ -1536,7 +1537,7 @@ static void byt_irq_unmask(struct irq_data *d)
if (!reg)
return;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
value = readl(reg);
switch (irqd_get_trigger_type(d)) {
@@ -1557,7 +1558,7 @@ static void byt_irq_unmask(struct irq_data *d)
writel(value, reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
}
static int byt_irq_type(struct irq_data *d, unsigned int type)
@@ -1571,7 +1572,7 @@ static int byt_irq_type(struct irq_data *d, unsigned int type)
if (!reg || offset >= vg->chip.ngpio)
return -EINVAL;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
value = readl(reg);
WARN(value & BYT_DIRECT_IRQ_EN,
@@ -1593,7 +1594,7 @@ static int byt_irq_type(struct irq_data *d, unsigned int type)
else if (type & IRQ_TYPE_LEVEL_MASK)
irq_set_handler_locked(d, handle_level_irq);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
return 0;
}
@@ -1629,9 +1630,9 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
continue;
}
- raw_spin_lock(&vg->lock);
+ raw_spin_lock(&byt_lock);
pending = readl(reg);
- raw_spin_unlock(&vg->lock);
+ raw_spin_unlock(&byt_lock);
for_each_set_bit(pin, &pending, 32) {
virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
generic_handle_irq(virq);
@@ -1833,8 +1834,6 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
return PTR_ERR(vg->pctl_dev);
}
- raw_spin_lock_init(&vg->lock);
-
ret = byt_gpio_probe(vg);
if (ret)
return ret;
@@ -1850,8 +1849,11 @@ static int byt_gpio_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct byt_gpio *vg = platform_get_drvdata(pdev);
+ unsigned long flags;
int i;
+ raw_spin_lock_irqsave(&byt_lock, flags);
+
for (i = 0; i < vg->soc_data->npins; i++) {
void __iomem *reg;
u32 value;
@@ -1872,6 +1874,7 @@ static int byt_gpio_suspend(struct device *dev)
vg->saved_context[i].val = value;
}
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
return 0;
}
@@ -1879,8 +1882,11 @@ static int byt_gpio_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct byt_gpio *vg = platform_get_drvdata(pdev);
+ unsigned long flags;
int i;
+ raw_spin_lock_irqsave(&byt_lock, flags);
+
for (i = 0; i < vg->soc_data->npins; i++) {
void __iomem *reg;
u32 value;
@@ -1918,6 +1924,7 @@ static int byt_gpio_resume(struct device *dev)
}
}
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
return 0;
}
#endif
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index b78f42a..7385cd8 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -509,7 +509,8 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
irqreturn_t ret = IRQ_NONE;
unsigned int i, irqnr;
unsigned long flags;
- u32 *regs, regval;
+ u32 __iomem *regs;
+ u32 regval;
u64 status, mask;
/* Read the wake status */
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
index 3323204..3eccc9b 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7734.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
@@ -1453,7 +1453,7 @@ static const struct pinmux_func pinmux_func_gpios[] = {
GPIO_FN(ET0_ETXD2_A),
GPIO_FN(EX_CS5), GPIO_FN(SD1_CMD_A), GPIO_FN(ATADIR), GPIO_FN(QSSL_B),
GPIO_FN(ET0_ETXD3_A),
- GPIO_FN(RD_WR), GPIO_FN(TCLK1_B),
+ GPIO_FN(RD_WR), GPIO_FN(TCLK0), GPIO_FN(CAN_CLK_B), GPIO_FN(ET0_ETXD4),
GPIO_FN(EX_WAIT0), GPIO_FN(TCLK1_B),
GPIO_FN(EX_WAIT1), GPIO_FN(SD1_DAT0_A), GPIO_FN(DREQ2),
GPIO_FN(CAN1_TX_C), GPIO_FN(ET0_LINK_C), GPIO_FN(ET0_ETXD5_A),
@@ -1949,7 +1949,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP3_20 [1] */
FN_EX_WAIT0, FN_TCLK1_B,
/* IP3_19_18 [2] */
- FN_RD_WR, FN_TCLK1_B, 0, 0,
+ FN_RD_WR, FN_TCLK0, FN_CAN_CLK_B, FN_ET0_ETXD4,
/* IP3_17_15 [3] */
FN_EX_CS5, FN_SD1_CMD_A, FN_ATADIR, FN_QSSL_B,
FN_ET0_ETXD3_A, 0, 0, 0,
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index d0ffdd5..06a3c1e 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -313,7 +313,7 @@ static int __init hp_wmi_bios_2008_later(void)
static int __init hp_wmi_bios_2009_later(void)
{
- int state = 0;
+ u8 state[128];
int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, HPWMI_READ, &state,
sizeof(state), sizeof(state));
if (!ret)
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
index 971ae89..7499719 100644
--- a/drivers/platform/x86/pmc_atom.c
+++ b/drivers/platform/x86/pmc_atom.c
@@ -482,6 +482,14 @@ static const struct dmi_system_id critclk_systems[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "6ES7647-8B"),
},
},
+ {
+ .ident = "CONNECT X300",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SIEMENS AG"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "A5E45074588"),
+ },
+ },
+
{ /*sentinel*/ }
};
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index c64903a..b818f65 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -175,9 +175,9 @@ static struct posix_clock_operations ptp_clock_ops = {
.read = ptp_read,
};
-static void delete_ptp_clock(struct posix_clock *pc)
+static void ptp_clock_release(struct device *dev)
{
- struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
+ struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
@@ -222,7 +222,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
}
ptp->clock.ops = ptp_clock_ops;
- ptp->clock.release = delete_ptp_clock;
ptp->info = info;
ptp->devid = MKDEV(major, index);
ptp->index = index;
@@ -249,15 +248,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
if (err)
goto no_pin_groups;
- /* Create a new device in our class. */
- ptp->dev = device_create_with_groups(ptp_class, parent, ptp->devid,
- ptp, ptp->pin_attr_groups,
- "ptp%d", ptp->index);
- if (IS_ERR(ptp->dev)) {
- err = PTR_ERR(ptp->dev);
- goto no_device;
- }
-
/* Register a new PPS source. */
if (info->pps) {
struct pps_source_info pps;
@@ -273,8 +263,18 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
}
}
- /* Create a posix clock. */
- err = posix_clock_register(&ptp->clock, ptp->devid);
+ /* Initialize a new device of our class in our clock structure. */
+ device_initialize(&ptp->dev);
+ ptp->dev.devt = ptp->devid;
+ ptp->dev.class = ptp_class;
+ ptp->dev.parent = parent;
+ ptp->dev.groups = ptp->pin_attr_groups;
+ ptp->dev.release = ptp_clock_release;
+ dev_set_drvdata(&ptp->dev, ptp);
+ dev_set_name(&ptp->dev, "ptp%d", ptp->index);
+
+ /* Create a posix clock and link it to the device. */
+ err = posix_clock_register(&ptp->clock, &ptp->dev);
if (err) {
pr_err("failed to create posix clock\n");
goto no_clock;
@@ -286,8 +286,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
if (ptp->pps_source)
pps_unregister_source(ptp->pps_source);
no_pps:
- device_destroy(ptp_class, ptp->devid);
-no_device:
ptp_cleanup_pin_groups(ptp);
no_pin_groups:
if (ptp->kworker)
@@ -317,7 +315,6 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
if (ptp->pps_source)
pps_unregister_source(ptp->pps_source);
- device_destroy(ptp_class, ptp->devid);
ptp_cleanup_pin_groups(ptp);
posix_clock_unregister(&ptp->clock);
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index b86f1bf..45ed9e1 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -41,7 +41,7 @@ struct timestamp_event_queue {
struct ptp_clock {
struct posix_clock clock;
- struct device *dev;
+ struct device dev;
struct ptp_clock_info *info;
dev_t devid;
int index; /* index into clocks.map */
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index 0f97514..c9f20e1 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -1099,23 +1099,6 @@ static struct ab8500_regulator_info
.update_val_idle = 0x82,
.update_val_normal = 0x02,
},
- [AB8505_LDO_USB] = {
- .desc = {
- .name = "LDO-USB",
- .ops = &ab8500_regulator_mode_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB8505_LDO_USB,
- .owner = THIS_MODULE,
- .n_voltages = 1,
- .volt_table = fixed_3300000_voltage,
- },
- .update_bank = 0x03,
- .update_reg = 0x82,
- .update_mask = 0x03,
- .update_val = 0x01,
- .update_val_idle = 0x03,
- .update_val_normal = 0x01,
- },
[AB8505_LDO_AUDIO] = {
.desc = {
.name = "LDO-AUDIO",
diff --git a/drivers/regulator/max8907-regulator.c b/drivers/regulator/max8907-regulator.c
index 860400d..a8f2f07 100644
--- a/drivers/regulator/max8907-regulator.c
+++ b/drivers/regulator/max8907-regulator.c
@@ -299,7 +299,10 @@ static int max8907_regulator_probe(struct platform_device *pdev)
memcpy(pmic->desc, max8907_regulators, sizeof(pmic->desc));
/* Backwards compatibility with MAX8907B; SD1 uses different voltages */
- regmap_read(max8907->regmap_gen, MAX8907_REG_II2RR, &val);
+ ret = regmap_read(max8907->regmap_gen, MAX8907_REG_II2RR, &val);
+ if (ret)
+ return ret;
+
if ((val & MAX8907_II2RR_VERSION_MASK) ==
MAX8907_II2RR_VERSION_REV_B) {
pmic->desc[MAX8907_SD1].min_uV = 637500;
@@ -336,14 +339,20 @@ static int max8907_regulator_probe(struct platform_device *pdev)
}
if (pmic->desc[i].ops == &max8907_ldo_ops) {
- regmap_read(config.regmap, pmic->desc[i].enable_reg,
+ ret = regmap_read(config.regmap, pmic->desc[i].enable_reg,
&val);
+ if (ret)
+ return ret;
+
if ((val & MAX8907_MASK_LDO_SEQ) !=
MAX8907_MASK_LDO_SEQ)
pmic->desc[i].ops = &max8907_ldo_hwctl_ops;
} else if (pmic->desc[i].ops == &max8907_out5v_ops) {
- regmap_read(config.regmap, pmic->desc[i].enable_reg,
+ ret = regmap_read(config.regmap, pmic->desc[i].enable_reg,
&val);
+ if (ret)
+ return ret;
+
if ((val & (MAX8907_MASK_OUT5V_VINEN |
MAX8907_MASK_OUT5V_ENSRC)) !=
MAX8907_MASK_OUT5V_ENSRC)
diff --git a/drivers/regulator/rn5t618-regulator.c b/drivers/regulator/rn5t618-regulator.c
index 790a4a7..40b7464 100644
--- a/drivers/regulator/rn5t618-regulator.c
+++ b/drivers/regulator/rn5t618-regulator.c
@@ -154,6 +154,7 @@ static struct platform_driver rn5t618_regulator_driver = {
module_platform_driver(rn5t618_regulator_driver);
+MODULE_ALIAS("platform:rn5t618-regulator");
MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
MODULE_DESCRIPTION("RN5T618 regulator driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 0d5e2d9..aa65140 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1133,7 +1133,8 @@ static u32 get_fcx_max_data(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
int fcx_in_css, fcx_in_gneq, fcx_in_features;
- int tpm, mdc;
+ unsigned int mdc;
+ int tpm;
if (dasd_nofcx)
return 0;
@@ -1147,7 +1148,7 @@ static u32 get_fcx_max_data(struct dasd_device *device)
return 0;
mdc = ccw_device_get_mdc(device->cdev, 0);
- if (mdc < 0) {
+ if (mdc == 0) {
dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
return 0;
} else {
@@ -1158,12 +1159,12 @@ static u32 get_fcx_max_data(struct dasd_device *device)
static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
{
struct dasd_eckd_private *private = device->private;
- int mdc;
+ unsigned int mdc;
u32 fcx_max_data;
if (private->fcx_max_data) {
mdc = ccw_device_get_mdc(device->cdev, lpm);
- if ((mdc < 0)) {
+ if (mdc == 0) {
dev_warn(&device->cdev->dev,
"Detecting the maximum data size for zHPF "
"requests failed (rc=%d) for a new path %x\n",
@@ -1767,7 +1768,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
dasd_free_block(device->block);
device->block = NULL;
out_err1:
- kfree(private->conf_data);
+ dasd_eckd_clear_conf_data(device);
kfree(device->private);
device->private = NULL;
return rc;
@@ -1776,7 +1777,6 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
static void dasd_eckd_uncheck_device(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
- int i;
if (!private)
return;
@@ -1786,21 +1786,7 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
private->sneq = NULL;
private->vdsneq = NULL;
private->gneq = NULL;
- private->conf_len = 0;
- for (i = 0; i < 8; i++) {
- kfree(device->path[i].conf_data);
- if ((__u8 *)device->path[i].conf_data ==
- private->conf_data) {
- private->conf_data = NULL;
- private->conf_len = 0;
- }
- device->path[i].conf_data = NULL;
- device->path[i].cssid = 0;
- device->path[i].ssid = 0;
- device->path[i].chpid = 0;
- }
- kfree(private->conf_data);
- private->conf_data = NULL;
+ dasd_eckd_clear_conf_data(device);
}
static struct dasd_ccw_req *
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index b22922e..474afec 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -595,7 +595,7 @@ EXPORT_SYMBOL(ccw_device_tm_start_timeout);
* @mask: mask of paths to use
*
* Return the number of 64K-bytes blocks all paths at least support
- * for a transport command. Return values <= 0 indicate failures.
+ * for a transport command. Return value 0 indicates failure.
*/
int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
{
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index 9499cd3..02a936d 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -75,6 +75,7 @@ struct error_hdr {
#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85
#define REP82_ERROR_RESERVED_FIELD 0x88
#define REP82_ERROR_INVALID_DOMAIN_PENDING 0x8A
+#define REP82_ERROR_FILTERED_BY_HYPERVISOR 0x8B
#define REP82_ERROR_TRANSPORT_FAIL 0x90
#define REP82_ERROR_PACKET_TRUNCATED 0xA0
#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
@@ -105,6 +106,7 @@ static inline int convert_error(struct zcrypt_queue *zq,
case REP82_ERROR_INVALID_DOMAIN_PRECHECK:
case REP82_ERROR_INVALID_DOMAIN_PENDING:
case REP82_ERROR_INVALID_SPECIAL_CMD:
+ case REP82_ERROR_FILTERED_BY_HYPERVISOR:
// REP88_ERROR_INVALID_KEY // '82' CEX2A
// REP88_ERROR_OPERAND // '84' CEX2A
// REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 21377ac..79b0b4e 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -129,6 +129,9 @@
#define NCR5380_release_dma_irq(x)
#endif
+static unsigned int disconnect_mask = ~0;
+module_param(disconnect_mask, int, 0444);
+
static int do_abort(struct Scsi_Host *);
static void do_reset(struct Scsi_Host *);
static void bus_reset_cleanup(struct Scsi_Host *);
@@ -946,7 +949,8 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
int err;
bool ret = true;
bool can_disconnect = instance->irq != NO_IRQ &&
- cmd->cmnd[0] != REQUEST_SENSE;
+ cmd->cmnd[0] != REQUEST_SENSE &&
+ (disconnect_mask & BIT(scmd_id(cmd)));
NCR5380_dprint(NDEBUG_ARBITRATION, instance);
dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n",
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index 89f5154..764c46d7 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -742,7 +742,7 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
atari_scsi_template.sg_tablesize = SG_ALL;
} else {
atari_scsi_template.can_queue = 1;
- atari_scsi_template.sg_tablesize = SG_NONE;
+ atari_scsi_template.sg_tablesize = 1;
}
if (setup_can_queue > 0)
@@ -751,8 +751,8 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
if (setup_cmd_per_lun > 0)
atari_scsi_template.cmd_per_lun = setup_cmd_per_lun;
- /* Leave sg_tablesize at 0 on a Falcon! */
- if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize >= 0)
+ /* Don't increase sg_tablesize on Falcon! */
+ if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize > 0)
atari_scsi_template.sg_tablesize = setup_sg_tablesize;
if (setup_hostid >= 0) {
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index d0a504a..0a70d54 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -283,8 +283,10 @@ bfad_im_get_stats(struct Scsi_Host *shost)
rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa),
fcstats, bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
- if (rc != BFA_STATUS_OK)
+ if (rc != BFA_STATUS_OK) {
+ kfree(fcstats);
return NULL;
+ }
wait_for_completion(&fcomp.comp);
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index be5ee2d..957767d 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -301,6 +301,7 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
struct fc_fdmi_port_name *port_name;
uint8_t buf[64];
uint8_t *fc4_type;
+ unsigned long flags;
if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n",
@@ -377,13 +378,13 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
len = (uint32_t)(pld - (uint8_t *)cmd);
/* Submit FDMI RPA request */
- spin_lock_irq(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n");
}
- spin_unlock_irq(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}
/*
@@ -404,6 +405,7 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
struct fc_fdmi_rpl *reg_pl;
struct fs_fdmi_attrs *attrib_blk;
uint8_t buf[64];
+ unsigned long flags;
if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n",
@@ -483,13 +485,13 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
attrib_blk->numattrs = htonl(numattrs);
/* Submit FDMI RHBA request */
- spin_lock_irq(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n");
}
- spin_unlock_irq(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}
/*
@@ -504,6 +506,7 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
void *cmd;
struct fc_fdmi_port_name *port_name;
uint32_t len;
+ unsigned long flags;
if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n",
@@ -534,13 +537,13 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
len += sizeof(*port_name);
/* Submit FDMI request */
- spin_lock_irq(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n");
}
- spin_unlock_irq(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}
/**
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 045207b..7e3a77d 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -372,8 +372,16 @@ static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
{
struct iscsi_conn *conn = task->conn;
unsigned int noreclaim_flag;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
int rc = 0;
+ if (!tcp_sw_conn->sock) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "Transport not bound to socket!\n");
+ return -EINVAL;
+ }
+
noreclaim_flag = memalloc_noreclaim_save();
while (iscsi_sw_tcp_xmit_qlen(conn)) {
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 60de662..b200edc 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -97,12 +97,21 @@ static int sas_get_port_device(struct asd_sas_port *port)
else
dev->dev_type = SAS_SATA_DEV;
dev->tproto = SAS_PROTOCOL_SATA;
- } else {
+ } else if (port->oob_mode == SAS_OOB_MODE) {
struct sas_identify_frame *id =
(struct sas_identify_frame *) dev->frame_rcvd;
dev->dev_type = id->dev_type;
dev->iproto = id->initiator_bits;
dev->tproto = id->target_bits;
+ } else {
+ /* If the oob mode is OOB_NOT_CONNECTED, the port is
+ * disconnected due to race with PHY down. We cannot
+ * continue to discover this port
+ */
+ sas_put_device(dev);
+ pr_warn("Port %016llx is disconnected when discovering\n",
+ SAS_ADDR(port->attached_sas_addr));
+ return -ENODEV;
}
sas_init_dev(dev);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 6dde21d..08ed27b 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -4419,12 +4419,6 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
phba->mbox_ext_buf_ctx.seqNum++;
nemb_tp = phba->mbox_ext_buf_ctx.nembType;
- dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
- if (!dd_data) {
- rc = -ENOMEM;
- goto job_error;
- }
-
pbuf = (uint8_t *)dmabuf->virt;
size = job->request_payload.payload_len;
sg_copy_to_buffer(job->request_payload.sg_list,
@@ -4461,6 +4455,13 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
"2968 SLI_CONFIG ext-buffer wr all %d "
"ebuffers received\n",
phba->mbox_ext_buf_ctx.numBuf);
+
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+
/* mailbox command structure for base driver */
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq) {
@@ -4509,6 +4510,8 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
return SLI_CONFIG_HANDLED;
job_error:
+ if (pmboxq)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
lpfc_bsg_dma_page_free(phba, dmabuf);
kfree(dd_data);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index c851fd1..4c84c2a 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -4102,7 +4102,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
mempool_free(mbox, phba->mbox_mem_pool);
}
out:
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
spin_unlock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 3f88f3d..4a0889d 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -5220,9 +5220,14 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
/* If we've already received a PLOGI from this NPort
* we don't need to try to discover it again.
*/
- if (ndlp->nlp_flag & NLP_RCV_PLOGI)
+ if (ndlp->nlp_flag & NLP_RCV_PLOGI &&
+ !(ndlp->nlp_type &
+ (NLP_FCP_TARGET | NLP_NVME_TARGET)))
return NULL;
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 043bca6..9641175 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -483,8 +483,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* single discovery thread, this will cause a huge delay in
* discovery. Also this will cause multiple state machines
* running in parallel for this node.
+ * This only applies to a fabric environment.
*/
- if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
+ if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
+ (vport->fc_flag & FC_FABRIC)) {
/* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp);
}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index fcf4b41..af937b9 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1591,6 +1591,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
/* Declare and initialization an instance of the FC NVME template. */
static struct nvme_fc_port_template lpfc_nvme_template = {
+ .module = THIS_MODULE,
+
/* initiator-based functions */
.localport_delete = lpfc_nvme_localport_delete,
.remoteport_delete = lpfc_nvme_remoteport_delete,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index d3bad0d..d8e0ba6 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -12689,13 +12689,19 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
/* Setting active mailbox pointer need to be in sync to flag clear */
phba->sli.mbox_active = NULL;
+ if (bf_get(lpfc_trailer_consumed, mcqe))
+ lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Wake up worker thread to post the next pending mailbox command */
lpfc_worker_wake_up(phba);
+ return workposted;
+
out_no_mqe_complete:
+ spin_lock_irqsave(&phba->hbalock, iflags);
if (bf_get(lpfc_trailer_consumed, mcqe))
lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
- return workposted;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return false;
}
/**
@@ -17486,6 +17492,13 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
static void
__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
{
+ /*
+ * if the rpi value indicates a prior unreg has already
+ * been done, skip the unreg.
+ */
+ if (rpi == LPFC_RPI_ALLOC_ERROR)
+ return;
+
if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
phba->sli4_hba.rpi_count--;
phba->sli4_hba.max_cfg_param.rpi_used--;
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index 643321f..b5050c2 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -429,7 +429,7 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
mac_scsi_template.can_queue = setup_can_queue;
if (setup_cmd_per_lun > 0)
mac_scsi_template.cmd_per_lun = setup_cmd_per_lun;
- if (setup_sg_tablesize >= 0)
+ if (setup_sg_tablesize > 0)
mac_scsi_template.sg_tablesize = setup_sg_tablesize;
if (setup_hostid >= 0)
mac_scsi_template.this_id = setup_hostid & 7;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index bdffb69..622dcf2 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -1502,7 +1502,8 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
" for diag buffers, requested size(%d)\n",
ioc->name, __func__, request_data_sz);
mpt3sas_base_free_smid(ioc, smid);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out;
}
ioc->diag_buffer[buffer_type] = request_data;
ioc->diag_buffer_sz[buffer_type] = request_data_sz;
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 9edd61c..df5f0bc 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -2368,6 +2368,8 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("task 0x%p done with io_status 0x%x"
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat));
+ if (t->slow_task)
+ complete(&t->slow_task->completion);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index 59c18ca..e5927a0 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -23,8 +23,6 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
int rc = 0;
uint32_t did, sid;
uint16_t xid;
- uint32_t start_time = jiffies / HZ;
- uint32_t current_time;
struct fcoe_wqe *sqe;
unsigned long flags;
u16 sqe_idx;
@@ -50,18 +48,12 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
goto els_err;
}
-retry_els:
els_req = qedf_alloc_cmd(fcport, QEDF_ELS);
if (!els_req) {
- current_time = jiffies / HZ;
- if ((current_time - start_time) > 10) {
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
- "els: Failed els 0x%x\n", op);
- rc = -ENOMEM;
- goto els_err;
- }
- mdelay(20 * USEC_PER_MSEC);
- goto retry_els;
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
+ "Failed to alloc ELS request 0x%x\n", op);
+ rc = -ENOMEM;
+ goto els_err;
}
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "initiate_els els_req = "
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ebca1a4..7f2da56 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1046,8 +1046,6 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
ql_dbg(ql_dbg_async, vha, 0x5011,
"Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
mb[1], mb[2], mb[3]);
-
- qlt_async_event(mb[0], vha, mb);
break;
}
@@ -1065,8 +1063,6 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(VP_CONFIG_OK, &vha->vp_flags);
-
- qlt_async_event(mb[0], vha, mb);
break;
case MBA_RSCN_UPDATE: /* State Change Registration */
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 6b33a1f..7dceed0 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -578,6 +578,7 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
}
static struct nvme_fc_port_template qla_nvme_fc_transport = {
+ .module = THIS_MODULE,
.localport_delete = qla_nvme_localport_delete,
.remoteport_delete = qla_nvme_remoteport_delete,
.create_queue = qla_nvme_alloc_queue,
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 69ed544..55227d2 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1210,7 +1210,6 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess,
"Scheduling sess %p for deletion %8phC\n",
sess, sess->port_name);
- INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn);
queue_work(sess->vha->hw->wq, &sess->del_work);
}
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 4421f9b..b0ad605 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -4285,7 +4285,6 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
return QLA_SUCCESS;
mem_alloc_error_exit:
- qla4xxx_mem_free(ha);
return QLA_ERROR;
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 92bc5b2..ac936b5 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -4960,6 +4960,11 @@ static int __init scsi_debug_init(void)
return -EINVAL;
}
+ if (sdebug_num_tgts < 0) {
+ pr_err("num_tgts must be >= 0\n");
+ return -EINVAL;
+ }
+
if (sdebug_guard > 1) {
pr_err("guard must be 0 or 1\n");
return -EINVAL;
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
index 0ff083b..617a607 100644
--- a/drivers/scsi/scsi_trace.c
+++ b/drivers/scsi/scsi_trace.c
@@ -30,15 +30,18 @@ static const char *
scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- sector_t lba = 0, txlen = 0;
+ u32 lba = 0, txlen;
lba |= ((cdb[1] & 0x1F) << 16);
lba |= (cdb[2] << 8);
lba |= cdb[3];
- txlen = cdb[4];
+ /*
+ * From SBC-2: a TRANSFER LENGTH field set to zero specifies that 256
+ * logical blocks shall be read (READ(6)) or written (WRITE(6)).
+ */
+ txlen = cdb[4] ? cdb[4] : 256;
- trace_seq_printf(p, "lba=%llu txlen=%llu",
- (unsigned long long)lba, (unsigned long long)txlen);
+ trace_seq_printf(p, "lba=%u txlen=%u", lba, txlen);
trace_seq_putc(p, 0);
return ret;
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 9492638..af8a7ef 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -498,7 +498,7 @@ static struct scsi_host_template sun3_scsi_template = {
.eh_host_reset_handler = sun3scsi_host_reset,
.can_queue = 16,
.this_id = 7,
- .sg_tablesize = SG_NONE,
+ .sg_tablesize = 1,
.cmd_per_lun = 2,
.use_clustering = DISABLE_CLUSTERING,
.cmd_size = NCR5380_CMD_SIZE,
@@ -520,7 +520,7 @@ static int __init sun3_scsi_probe(struct platform_device *pdev)
sun3_scsi_template.can_queue = setup_can_queue;
if (setup_cmd_per_lun > 0)
sun3_scsi_template.cmd_per_lun = setup_cmd_per_lun;
- if (setup_sg_tablesize >= 0)
+ if (setup_sg_tablesize > 0)
sun3_scsi_template.sg_tablesize = setup_sg_tablesize;
if (setup_hostid >= 0)
sun3_scsi_template.this_id = setup_hostid & 7;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 64a46b2..c04d281 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -4629,10 +4629,10 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
goto out_unlock;
}
- hba->dev_cmd.query.descriptor = NULL;
*buf_len = be16_to_cpu(response->upiu_res.length);
out_unlock:
+ hba->dev_cmd.query.descriptor = NULL;
mutex_unlock(&hba->dev_cmd.lock);
out:
ufshcd_release_all(hba);
@@ -5512,8 +5512,8 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
ufshcd_custom_cmd_log(hba, "h8-enter-failed");
/*
- * If link recovery fails then return error code (-ENOLINK)
- * returned ufshcd_link_recovery().
+ * If link recovery fails then return error code returned from
+ * ufshcd_link_recovery().
* If link recovery succeeds then return -EAGAIN to attempt
* hibern8 enter retry again.
*/
diff --git a/drivers/spi/spi-cavium-thunderx.c b/drivers/spi/spi-cavium-thunderx.c
index 8779377..828fbbe 100644
--- a/drivers/spi/spi-cavium-thunderx.c
+++ b/drivers/spi/spi-cavium-thunderx.c
@@ -81,6 +81,7 @@ static int thunderx_spi_probe(struct pci_dev *pdev,
error:
clk_disable_unprepare(p->clk);
+ pci_release_regions(pdev);
spi_master_put(master);
return ret;
}
@@ -95,6 +96,7 @@ static void thunderx_spi_remove(struct pci_dev *pdev)
return;
clk_disable_unprepare(p->clk);
+ pci_release_regions(pdev);
/* Put everything in a known state. */
writeq(0, p->register_base + OCTEON_SPI_CFG(p));
}
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 8f2e978..8b79e36 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -832,9 +832,9 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
if (ret)
goto err;
- irq = irq_of_parse_and_map(np, 0);
- if (!irq) {
- ret = -EINVAL;
+ irq = platform_get_irq(ofdev, 0);
+ if (irq < 0) {
+ ret = irq;
goto err;
}
@@ -847,7 +847,6 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
return 0;
err:
- irq_dispose_mapping(irq);
if (type == TYPE_FSL)
of_fsl_spi_free_chipselects(dev);
return ret;
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index 7a37090..2e65b70 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -673,6 +673,8 @@ static int img_spfi_probe(struct platform_device *pdev)
dma_release_channel(spfi->tx_ch);
if (spfi->rx_ch)
dma_release_channel(spfi->rx_ch);
+ spfi->tx_ch = NULL;
+ spfi->rx_ch = NULL;
dev_warn(spfi->dev, "Failed to get DMA channels, falling back to PIO mode\n");
} else {
master->dma_tx = spfi->tx_ch;
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 9bf3e5f..b2245cd 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1559,7 +1559,13 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
}
ssp->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ssp->clk))
+ return NULL;
+
ssp->irq = platform_get_irq(pdev, 0);
+ if (ssp->irq < 0)
+ return NULL;
+
ssp->type = type;
ssp->pdev = pdev;
ssp->port_id = pxa2xx_spi_get_port_id(adev);
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
index a4e43fc..5df01ff 100644
--- a/drivers/spi/spi-st-ssc4.c
+++ b/drivers/spi/spi-st-ssc4.c
@@ -385,6 +385,7 @@ static int spi_st_probe(struct platform_device *pdev)
return 0;
clk_disable:
+ pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(spi_st->clk);
put_master:
spi_master_put(master);
@@ -396,6 +397,8 @@ static int spi_st_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct spi_st *spi_st = spi_master_get_devdata(master);
+ pm_runtime_disable(&pdev->dev);
+
clk_disable_unprepare(spi_st->clk);
pinctrl_pm_select_sleep_state(&pdev->dev);
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index 9831c11..62b074b 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -1078,7 +1078,7 @@ static int tegra_slink_probe(struct platform_device *pdev)
ret = clk_enable(tspi->clk);
if (ret < 0) {
dev_err(&pdev->dev, "Clock enable failed %d\n", ret);
- goto exit_free_master;
+ goto exit_clk_unprepare;
}
spi_irq = platform_get_irq(pdev, 0);
@@ -1151,6 +1151,8 @@ static int tegra_slink_probe(struct platform_device *pdev)
free_irq(spi_irq, tspi);
exit_clk_disable:
clk_disable(tspi->clk);
+exit_clk_unprepare:
+ clk_unprepare(tspi->clk);
exit_free_master:
spi_master_put(master);
return ret;
@@ -1164,6 +1166,7 @@ static int tegra_slink_remove(struct platform_device *pdev)
free_irq(tspi->irq, tspi);
clk_disable(tspi->clk);
+ clk_unprepare(tspi->clk);
if (tspi->tx_dma_chan)
tegra_slink_deinit_dma_param(tspi, false);
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 9ab45c8..ab3b79e 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -634,6 +634,9 @@ static int spidev_release(struct inode *inode, struct file *filp)
if (dofree)
kfree(spidev);
}
+#ifdef CONFIG_SPI_SLAVE
+ spi_slave_abort(spidev->spi);
+#endif
mutex_unlock(&device_list_lock);
return 0;
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index 2c1b6de..385e142 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -45,8 +45,8 @@
#define PCI171X_RANGE_UNI BIT(4)
#define PCI171X_RANGE_GAIN(x) (((x) & 0x7) << 0)
#define PCI171X_MUX_REG 0x04 /* W: A/D multiplexor control */
-#define PCI171X_MUX_CHANH(x) (((x) & 0xf) << 8)
-#define PCI171X_MUX_CHANL(x) (((x) & 0xf) << 0)
+#define PCI171X_MUX_CHANH(x) (((x) & 0xff) << 8)
+#define PCI171X_MUX_CHANL(x) (((x) & 0xff) << 0)
#define PCI171X_MUX_CHAN(x) (PCI171X_MUX_CHANH(x) | PCI171X_MUX_CHANL(x))
#define PCI171X_STATUS_REG 0x06 /* R: status register */
#define PCI171X_STATUS_IRQ BIT(11) /* 1=IRQ occurred */
diff --git a/drivers/staging/comedi/drivers/gsc_hpdi.c b/drivers/staging/comedi/drivers/gsc_hpdi.c
index e5b9484..a09631f 100644
--- a/drivers/staging/comedi/drivers/gsc_hpdi.c
+++ b/drivers/staging/comedi/drivers/gsc_hpdi.c
@@ -632,6 +632,11 @@ static int gsc_hpdi_auto_attach(struct comedi_device *dev,
dma_alloc_coherent(&pcidev->dev, DMA_BUFFER_SIZE,
&devpriv->dio_buffer_phys_addr[i],
GFP_KERNEL);
+ if (!devpriv->dio_buffer[i]) {
+ dev_warn(dev->class_dev,
+ "failed to allocate DMA buffer\n");
+ return -ENOMEM;
+ }
}
/* allocate dma descriptors */
devpriv->dma_desc = dma_alloc_coherent(&pcidev->dev,
@@ -639,6 +644,11 @@ static int gsc_hpdi_auto_attach(struct comedi_device *dev,
NUM_DMA_DESCRIPTORS,
&devpriv->dma_desc_phys_addr,
GFP_KERNEL);
+ if (!devpriv->dma_desc) {
+ dev_warn(dev->class_dev,
+ "failed to allocate DMA descriptors\n");
+ return -ENOMEM;
+ }
if (devpriv->dma_desc_phys_addr & 0xf) {
dev_warn(dev->class_dev,
" dma descriptors not quad-word aligned (bug)\n");
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 0cbcbad..b81c6df 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -780,7 +780,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
fbdefio->deferred_io = fbtft_deferred_io;
fb_deferred_io_init(info);
- strncpy(info->fix.id, dev->driver->name, 16);
+ snprintf(info->fix.id, sizeof(info->fix.id), "%s", dev->driver->name);
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_TRUECOLOR;
info->fix.xpanstep = 0;
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index 904b988..7c895af 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -805,7 +805,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
memcpy(pwlanhdr->addr2, get_bssid(pmlmepriv), ETH_ALEN);
memcpy(pwlanhdr->addr3, pattrib->src, ETH_ALEN);
- if (psta->qos_option)
+ if (psta && psta->qos_option)
qos_option = true;
} else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
@@ -813,7 +813,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN);
- if (psta->qos_option)
+ if (psta && psta->qos_option)
qos_option = true;
} else {
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("fw_state:%x is not allowed to xmit frame\n", get_fwstate(pmlmepriv)));
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 3733b73..5364533 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -45,6 +45,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = {
{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
{USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
{USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
+ {USB_DEVICE(0x2357, 0x0111)}, /* TP-Link TL-WN727N v5.21 */
{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
{} /* Terminating entry */
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index db3eb7e..fbbd1b5 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -1506,7 +1506,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
(tx_fwinfo_819x_usb *)(skb->data + USB_HWDESC_HEADER_LEN);
struct usb_device *udev = priv->udev;
int pend;
- int status;
+ int status, rt = -1;
struct urb *tx_urb = NULL, *tx_urb_zero = NULL;
unsigned int idx_pipe;
@@ -1650,8 +1650,10 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
}
if (bSend0Byte) {
tx_urb_zero = usb_alloc_urb(0, GFP_ATOMIC);
- if (!tx_urb_zero)
- return -ENOMEM;
+ if (!tx_urb_zero) {
+ rt = -ENOMEM;
+ goto error;
+ }
usb_fill_bulk_urb(tx_urb_zero, udev,
usb_sndbulkpipe(udev, idx_pipe),
&zero, 0, tx_zero_isr, dev);
@@ -1661,7 +1663,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
"Error TX URB for zero byte %d, error %d",
atomic_read(&priv->tx_pending[tcb_desc->queue_index]),
status);
- return -1;
+ goto error;
}
}
netif_trans_update(dev);
@@ -1672,7 +1674,12 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
RT_TRACE(COMP_ERR, "Error TX URB %d, error %d",
atomic_read(&priv->tx_pending[tcb_desc->queue_index]),
status);
- return -1;
+
+error:
+ dev_kfree_skb_any(skb);
+ usb_free_urb(tx_urb);
+ usb_free_urb(tx_urb_zero);
+ return rt;
}
static short rtl8192_usb_initendpoints(struct net_device *dev)
diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h
index 74715c8..705fffa 100644
--- a/drivers/staging/vt6656/device.h
+++ b/drivers/staging/vt6656/device.h
@@ -269,6 +269,7 @@ struct vnt_private {
u8 mac_hw;
/* netdev */
struct usb_device *usb;
+ struct usb_interface *intf;
u64 tsf_time;
u8 rx_rate;
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index cc6d877..645ea16 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -954,6 +954,7 @@ vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id)
priv = hw->priv;
priv->hw = hw;
priv->usb = udev;
+ priv->intf = intf;
vnt_set_options(priv);
diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c
index b2fc17f..3f6ccde 100644
--- a/drivers/staging/vt6656/wcmd.c
+++ b/drivers/staging/vt6656/wcmd.c
@@ -109,6 +109,7 @@ void vnt_run_command(struct work_struct *work)
if (vnt_init(priv)) {
/* If fail all ends TODO retry */
dev_err(&priv->usb->dev, "failed to start\n");
+ usb_set_intfdata(priv->intf, NULL);
ieee80211_free_hw(priv->hw);
return;
}
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index fb7bd422..21ce92e 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1158,7 +1158,9 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
conn->cid);
- target_get_sess_cmd(&cmd->se_cmd, true);
+ if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
scsilun_to_int(&hdr->lun));
@@ -2004,7 +2006,9 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
conn->sess->se_sess, 0, DMA_NONE,
TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
- target_get_sess_cmd(&cmd->se_cmd, true);
+ if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
/*
* TASK_REASSIGN for ERL=2 / connection stays inside of
@@ -4236,6 +4240,8 @@ int iscsit_close_connection(
* must wait until they have completed.
*/
iscsit_check_conn_usage_count(conn);
+ target_sess_cmd_list_set_waiting(sess->se_sess);
+ target_wait_for_sess_cmds(sess->se_sess);
ahash_request_free(conn->conn_tx_hash);
if (conn->conn_rx_hash) {
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index e2fa3a3..b6bf605 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -78,7 +78,7 @@ static int chap_check_algorithm(const char *a_str)
if (!token)
goto out;
- if (!strncmp(token, "5", 1)) {
+ if (!strcmp(token, "5")) {
pr_debug("Selected MD5 Algorithm\n");
kfree(orig);
return CHAP_DIGEST_MD5;
diff --git a/drivers/tty/hvc/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c
index a1d272a..c33150f 100644
--- a/drivers/tty/hvc/hvc_vio.c
+++ b/drivers/tty/hvc/hvc_vio.c
@@ -120,6 +120,14 @@ static int hvterm_raw_get_chars(uint32_t vtermno, char *buf, int count)
return got;
}
+/**
+ * hvterm_raw_put_chars: send characters to firmware for given vterm adapter
+ * @vtermno: The virtual terminal number.
+ * @buf: The characters to send. Because of the underlying hypercall in
+ * hvc_put_chars(), this buffer must be at least 16 bytes long, even if
+ * you are sending fewer chars.
+ * @count: number of chars to send.
+ */
static int hvterm_raw_put_chars(uint32_t vtermno, const char *buf, int count)
{
struct hvterm_priv *pv = hvterm_privs[vtermno];
@@ -232,6 +240,7 @@ static const struct hv_ops hvterm_hvsi_ops = {
static void udbg_hvc_putc(char c)
{
int count = -1;
+ unsigned char bounce_buffer[16];
if (!hvterm_privs[0])
return;
@@ -242,7 +251,12 @@ static void udbg_hvc_putc(char c)
do {
switch(hvterm_privs[0]->proto) {
case HV_PROTOCOL_RAW:
- count = hvterm_raw_put_chars(0, &c, 1);
+ /*
+ * hvterm_raw_put_chars requires at least a 16-byte
+ * buffer, so go via the bounce buffer
+ */
+ bounce_buffer[0] = c;
+ count = hvterm_raw_put_chars(0, bounce_buffer, 1);
break;
case HV_PROTOCOL_HVSI:
count = hvterm_hvsi_put_chars(0, &c, 1);
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index 4fa5931..f513107 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -56,7 +56,7 @@ static int serdev_device_match(struct device *dev, struct device_driver *drv)
return 1;
if (dev->parent->parent->bus == &platform_bus_type &&
- dev->parent->parent->bus->match(dev, drv))
+ dev->parent->parent->bus->match(dev->parent->parent, drv))
return 1;
return 0;
@@ -73,7 +73,7 @@ static int serdev_uevent(struct device *dev, struct kobj_uevent_env *env)
return rc;
if (dev->parent->parent->bus == &platform_bus_type)
- rc = dev->parent->parent->bus->uevent(dev, env);
+ rc = dev->parent->parent->bus->uevent(dev->parent->parent, env);
return rc;
}
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 9ee41ba..367ce81 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -2183,27 +2183,6 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
mode |= ATMEL_US_USMODE_NORMAL;
}
- /* set the mode, clock divisor, parity, stop bits and data size */
- atmel_uart_writel(port, ATMEL_US_MR, mode);
-
- /*
- * when switching the mode, set the RTS line state according to the
- * new mode, otherwise keep the former state
- */
- if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
- unsigned int rts_state;
-
- if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
- /* let the hardware control the RTS line */
- rts_state = ATMEL_US_RTSDIS;
- } else {
- /* force RTS line to low level */
- rts_state = ATMEL_US_RTSEN;
- }
-
- atmel_uart_writel(port, ATMEL_US_CR, rts_state);
- }
-
/*
* Set the baud rate:
* Fractional baudrate allows to setup output frequency more
@@ -2229,6 +2208,28 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
quot = cd | fp << ATMEL_US_FP_OFFSET;
atmel_uart_writel(port, ATMEL_US_BRGR, quot);
+
+ /* set the mode, clock divisor, parity, stop bits and data size */
+ atmel_uart_writel(port, ATMEL_US_MR, mode);
+
+ /*
+ * when switching the mode, set the RTS line state according to the
+ * new mode, otherwise keep the former state
+ */
+ if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
+ unsigned int rts_state;
+
+ if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
+ /* let the hardware control the RTS line */
+ rts_state = ATMEL_US_RTSDIS;
+ } else {
+ /* force RTS line to low level */
+ rts_state = ATMEL_US_RTSEN;
+ }
+
+ atmel_uart_writel(port, ATMEL_US_CR, rts_state);
+ }
+
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index a0a4f0f..f5578b3 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -1602,6 +1602,7 @@ static void __msm_console_write(struct uart_port *port, const char *s,
int num_newlines = 0;
bool replaced = false;
void __iomem *tf;
+ int locked = 1;
if (is_uartdm)
tf = port->membase + UARTDM_TF;
@@ -1614,7 +1615,13 @@ static void __msm_console_write(struct uart_port *port, const char *s,
num_newlines++;
count += num_newlines;
- spin_lock(&port->lock);
+ if (port->sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+ locked = spin_trylock(&port->lock);
+ else
+ spin_lock(&port->lock);
+
if (is_uartdm)
msm_reset_dm_count(port, count);
@@ -1650,7 +1657,9 @@ static void __msm_console_write(struct uart_port *port, const char *s,
iowrite32_rep(tf, buf, 1);
i += num_chars;
}
- spin_unlock(&port->lock);
+
+ if (locked)
+ spin_unlock(&port->lock);
}
#ifdef CONFIG_SERIAL_RX_CONSOLE_ONLY
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 15e5bf3..dbec0f6 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -2813,6 +2813,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
if (uport->cons && uport->dev)
of_console_check(uport->dev->of_node, uport->cons->name, uport->line);
+ tty_port_link_device(port, drv->tty_driver, uport->line);
uart_configure_port(drv, state, uport);
port->console = uart_console(uport);
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 18cb8e4..83683a5 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -37,6 +37,7 @@ static int (*orig_bus_suspend)(struct usb_hcd *hcd);
struct ehci_ci_priv {
struct regulator *reg_vbus;
+ bool enabled;
};
static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
@@ -48,7 +49,7 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
int ret = 0;
int port = HCS_N_PORTS(ehci->hcs_params);
- if (priv->reg_vbus) {
+ if (priv->reg_vbus && enable != priv->enabled) {
if (port > 1) {
dev_warn(dev,
"Not support multi-port regulator control\n");
@@ -64,6 +65,7 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
enable ? "enable" : "disable", ret);
return ret;
}
+ priv->enabled = enable;
}
if (enable && (ci->platdata->phy_mode == USBPHY_INTERFACE_MODE_HSIC)) {
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 81312e2..623f996 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -203,9 +203,58 @@ static const unsigned short super_speed_maxpacket_maxes[4] = {
[USB_ENDPOINT_XFER_INT] = 1024,
};
-static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
- int asnum, struct usb_host_interface *ifp, int num_ep,
- unsigned char *buffer, int size)
+static bool endpoint_is_duplicate(struct usb_endpoint_descriptor *e1,
+ struct usb_endpoint_descriptor *e2)
+{
+ if (e1->bEndpointAddress == e2->bEndpointAddress)
+ return true;
+
+ if (usb_endpoint_xfer_control(e1) || usb_endpoint_xfer_control(e2)) {
+ if (usb_endpoint_num(e1) == usb_endpoint_num(e2))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Check for duplicate endpoint addresses in other interfaces and in the
+ * altsetting currently being parsed.
+ */
+static bool config_endpoint_is_duplicate(struct usb_host_config *config,
+ int inum, int asnum, struct usb_endpoint_descriptor *d)
+{
+ struct usb_endpoint_descriptor *epd;
+ struct usb_interface_cache *intfc;
+ struct usb_host_interface *alt;
+ int i, j, k;
+
+ for (i = 0; i < config->desc.bNumInterfaces; ++i) {
+ intfc = config->intf_cache[i];
+
+ for (j = 0; j < intfc->num_altsetting; ++j) {
+ alt = &intfc->altsetting[j];
+
+ if (alt->desc.bInterfaceNumber == inum &&
+ alt->desc.bAlternateSetting != asnum)
+ continue;
+
+ for (k = 0; k < alt->desc.bNumEndpoints; ++k) {
+ epd = &alt->endpoint[k].desc;
+
+ if (endpoint_is_duplicate(epd, d))
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ struct usb_host_config *config, int inum, int asnum,
+ struct usb_host_interface *ifp, int num_ep,
+ unsigned char *buffer, int size)
{
unsigned char *buffer0 = buffer;
struct usb_endpoint_descriptor *d;
@@ -242,13 +291,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
goto skip_to_next_endpoint_or_interface_descriptor;
/* Check for duplicate endpoint addresses */
- for (i = 0; i < ifp->desc.bNumEndpoints; ++i) {
- if (ifp->endpoint[i].desc.bEndpointAddress ==
- d->bEndpointAddress) {
- dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
- cfgno, inum, asnum, d->bEndpointAddress);
- goto skip_to_next_endpoint_or_interface_descriptor;
- }
+ if (config_endpoint_is_duplicate(config, inum, asnum, d)) {
+ dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
+ cfgno, inum, asnum, d->bEndpointAddress);
+ goto skip_to_next_endpoint_or_interface_descriptor;
}
endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
@@ -346,12 +392,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
endpoint->desc.wMaxPacketSize = cpu_to_le16(8);
}
- /* Validate the wMaxPacketSize field */
+ /*
+ * Validate the wMaxPacketSize field.
+ * Some devices have isochronous endpoints in altsetting 0;
+ * the USB-2 spec requires such endpoints to have wMaxPacketSize = 0
+ * (see the end of section 5.6.3), so don't warn about them.
+ */
maxp = usb_endpoint_maxp(&endpoint->desc);
- if (maxp == 0) {
- dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has wMaxPacketSize 0, skipping\n",
+ if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) {
+ dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n",
cfgno, inum, asnum, d->bEndpointAddress);
- goto skip_to_next_endpoint_or_interface_descriptor;
}
/* Find the highest legal maxpacket size for this endpoint */
@@ -522,8 +572,8 @@ static int usb_parse_interface(struct device *ddev, int cfgno,
if (((struct usb_descriptor_header *) buffer)->bDescriptorType
== USB_DT_INTERFACE)
break;
- retval = usb_parse_endpoint(ddev, cfgno, inum, asnum, alt,
- num_ep, buffer, size);
+ retval = usb_parse_endpoint(ddev, cfgno, config, inum, asnum,
+ alt, num_ep, buffer, size);
if (retval < 0)
return retval;
++n;
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 62b2a71..4fb4cf8 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -755,8 +755,15 @@ static int claimintf(struct usb_dev_state *ps, unsigned int ifnum)
intf = usb_ifnum_to_if(dev, ifnum);
if (!intf)
err = -ENOENT;
- else
+ else {
+ unsigned int old_suppress;
+
+ /* suppress uevents while claiming interface */
+ old_suppress = dev_get_uevent_suppress(&intf->dev);
+ dev_set_uevent_suppress(&intf->dev, 1);
err = usb_driver_claim_interface(&usbfs_driver, intf, ps);
+ dev_set_uevent_suppress(&intf->dev, old_suppress);
+ }
if (err == 0)
set_bit(ifnum, &ps->ifclaimed);
return err;
@@ -776,7 +783,13 @@ static int releaseintf(struct usb_dev_state *ps, unsigned int ifnum)
if (!intf)
err = -ENOENT;
else if (test_and_clear_bit(ifnum, &ps->ifclaimed)) {
+ unsigned int old_suppress;
+
+ /* suppress uevents while releasing interface */
+ old_suppress = dev_get_uevent_suppress(&intf->dev);
+ dev_set_uevent_suppress(&intf->dev, 1);
usb_driver_release_interface(&usbfs_driver, intf);
+ dev_set_uevent_suppress(&intf->dev, old_suppress);
err = 0;
}
return err;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 6ecf9ff..73b1fc5 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -18,6 +18,7 @@
#include <linux/sched/mm.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/kcov.h>
#include <linux/ioctl.h>
#include <linux/usb.h>
#include <linux/usbdevice_fs.h>
@@ -5210,6 +5211,8 @@ static void hub_event(struct work_struct *work)
hub_dev = hub->intfdev;
intf = to_usb_interface(hub_dev);
+ kcov_remote_start_usb((u64)hdev->bus->busnum);
+
dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x\n",
hdev->state, hdev->maxchild,
/* NOTE: expects max 15 ports... */
@@ -5316,6 +5319,8 @@ static void hub_event(struct work_struct *work)
/* Balance the stuff in kick_hub_wq() and allow autosuspend */
usb_autopm_put_interface(intf);
kref_put(&hub->kref, hub_release);
+
+ kcov_remote_stop();
}
static const struct usb_device_id hub_id_table[] = {
diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c
index 4c488d1..dc99ed9 100644
--- a/drivers/usb/gadget/function/f_ecm.c
+++ b/drivers/usb/gadget/function/f_ecm.c
@@ -625,8 +625,12 @@ static void ecm_disable(struct usb_function *f)
DBG(cdev, "ecm deactivated\n");
- if (ecm->port.in_ep->enabled)
+ if (ecm->port.in_ep->enabled) {
gether_disconnect(&ecm->port);
+ } else {
+ ecm->port.in_ep->desc = NULL;
+ ecm->port.out_ep->desc = NULL;
+ }
usb_ep_disable(ecm->notify);
ecm->notify->desc = NULL;
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index c14b02a..25ce66d 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -632,6 +632,7 @@ static void rndis_disable(struct usb_function *f)
gether_disconnect(&rndis->port);
usb_ep_disable(rndis->notify);
+ rndis->notify->desc = NULL;
}
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 7e90f78..8ee7652 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -980,8 +980,18 @@ static int dummy_udc_start(struct usb_gadget *g,
struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
struct dummy *dum = dum_hcd->dum;
- if (driver->max_speed == USB_SPEED_UNKNOWN)
+ switch (g->speed) {
+ /* All the speeds we support */
+ case USB_SPEED_LOW:
+ case USB_SPEED_FULL:
+ case USB_SPEED_HIGH:
+ case USB_SPEED_SUPER:
+ break;
+ default:
+ dev_err(dummy_dev(dum_hcd), "Unsupported driver max speed %d\n",
+ driver->max_speed);
return -EINVAL;
+ }
/*
* SLAVE side init ... the layer above hardware, which
@@ -1325,7 +1335,7 @@ static int dummy_perform_transfer(struct urb *urb, struct dummy_request *req,
u32 this_sg;
bool next_sg;
- to_host = usb_pipein(urb->pipe);
+ to_host = usb_urb_dir_in(urb);
rbuf = req->req.buf + req->req.actual;
if (!urb->num_sgs) {
@@ -1413,7 +1423,7 @@ static int transfer(struct dummy_hcd *dum_hcd, struct urb *urb,
/* FIXME update emulated data toggle too */
- to_host = usb_pipein(urb->pipe);
+ to_host = usb_urb_dir_in(urb);
if (unlikely(len == 0))
is_short = 1;
else {
@@ -1770,6 +1780,7 @@ static void dummy_timer(unsigned long _dum_hcd)
int i;
/* simplistic model for one frame's bandwidth */
+ /* FIXME: account for transaction and packet overhead */
switch (dum->gadget.speed) {
case USB_SPEED_LOW:
total = 8/*bytes*/ * 12/*packets*/;
@@ -1784,9 +1795,10 @@ static void dummy_timer(unsigned long _dum_hcd)
/* Bus speed is 500000 bytes/ms, so use a little less */
total = 490000;
break;
- default:
+ default: /* Can't happen */
dev_err(dummy_dev(dum_hcd), "bogus device speed\n");
- return;
+ total = 0;
+ break;
}
/* FIXME if HZ != 1000 this will probably misbehave ... */
@@ -1814,7 +1826,6 @@ static void dummy_timer(unsigned long _dum_hcd)
struct dummy_request *req;
u8 address;
struct dummy_ep *ep = NULL;
- int type;
int status = -EINPROGRESS;
/* stop when we reach URBs queued after the timer interrupt */
@@ -1826,18 +1837,14 @@ static void dummy_timer(unsigned long _dum_hcd)
goto return_urb;
else if (dum_hcd->rh_state != DUMMY_RH_RUNNING)
continue;
- type = usb_pipetype(urb->pipe);
- /* used up this frame's non-periodic bandwidth?
- * FIXME there's infinite bandwidth for control and
- * periodic transfers ... unrealistic.
- */
- if (total <= 0 && type == PIPE_BULK)
+ /* Used up this frame's bandwidth? */
+ if (total <= 0)
continue;
/* find the gadget's ep for this request (if configured) */
address = usb_pipeendpoint (urb->pipe);
- if (usb_pipein(urb->pipe))
+ if (usb_urb_dir_in(urb))
address |= USB_DIR_IN;
ep = find_endpoint(dum, address);
if (!ep) {
@@ -2390,7 +2397,7 @@ static inline ssize_t show_urb(char *buf, size_t size, struct urb *urb)
s = "?";
break;
} s; }),
- ep, ep ? (usb_pipein(urb->pipe) ? "in" : "out") : "",
+ ep, ep ? (usb_urb_dir_in(urb) ? "in" : "out") : "",
({ char *s; \
switch (usb_pipetype(urb->pipe)) { \
case PIPE_CONTROL: \
@@ -2734,7 +2741,7 @@ static struct platform_driver dummy_hcd_driver = {
};
/*-------------------------------------------------------------------------*/
-#define MAX_NUM_UDC 2
+#define MAX_NUM_UDC 32
static struct platform_device *the_udc_pdev[MAX_NUM_UDC];
static struct platform_device *the_hcd_pdev[MAX_NUM_UDC];
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 2a166de..8ca0890 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -40,6 +40,10 @@
/*-------------------------------------------------------------------------*/
+/* PID Codes that are used here, from EHCI specification, Table 3-16. */
+#define PID_CODE_IN 1
+#define PID_CODE_SETUP 2
+
/* fill a qtd, returning how much of the buffer we were able to queue up */
static int
@@ -203,7 +207,7 @@ static int qtd_copy_status (
int status = -EINPROGRESS;
/* count IN/OUT bytes, not SETUP (even short packets) */
- if (likely (QTD_PID (token) != 2))
+ if (likely(QTD_PID(token) != PID_CODE_SETUP))
urb->actual_length += length - QTD_LENGTH (token);
/* don't modify error codes */
@@ -219,6 +223,13 @@ static int qtd_copy_status (
if (token & QTD_STS_BABBLE) {
/* FIXME "must" disable babbling device's port too */
status = -EOVERFLOW;
+ /*
+ * When MMF is active and PID Code is IN, queue is halted.
+ * EHCI Specification, Table 4-13.
+ */
+ } else if ((token & QTD_STS_MMF) &&
+ (QTD_PID(token) == PID_CODE_IN)) {
+ status = -EPROTO;
/* CERR nonzero + halt --> stall */
} else if (QTD_CERR(token)) {
status = -EPIPE;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 021a2d3..09f2282 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -497,7 +497,6 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
retval = xhci_resume(xhci, hibernated);
return retval;
}
-#endif /* CONFIG_PM */
static void xhci_pci_shutdown(struct usb_hcd *hcd)
{
@@ -510,6 +509,7 @@ static void xhci_pci_shutdown(struct usb_hcd *hcd)
if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
pci_set_power_state(pdev, PCI_D3hot);
}
+#endif /* CONFIG_PM */
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index ff17e94..dca39c9 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1838,6 +1838,9 @@ static const struct attribute_group musb_attr_group = {
#define MUSB_QUIRK_B_INVALID_VBUS_91 (MUSB_DEVCTL_BDEVICE | \
(2 << MUSB_DEVCTL_VBUS_SHIFT) | \
MUSB_DEVCTL_SESSION)
+#define MUSB_QUIRK_B_DISCONNECT_99 (MUSB_DEVCTL_BDEVICE | \
+ (3 << MUSB_DEVCTL_VBUS_SHIFT) | \
+ MUSB_DEVCTL_SESSION)
#define MUSB_QUIRK_A_DISCONNECT_19 ((3 << MUSB_DEVCTL_VBUS_SHIFT) | \
MUSB_DEVCTL_SESSION)
@@ -1860,6 +1863,11 @@ static void musb_pm_runtime_check_session(struct musb *musb)
s = MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV |
MUSB_DEVCTL_HR;
switch (devctl & ~s) {
+ case MUSB_QUIRK_B_DISCONNECT_99:
+ musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n");
+ schedule_delayed_work(&musb->irq_work,
+ msecs_to_jiffies(1000));
+ break;
case MUSB_QUIRK_B_INVALID_VBUS_91:
if (musb->quirk_retries && !musb->flush_irq_work) {
musb_dbg(musb,
@@ -2320,6 +2328,9 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
musb_disable_interrupts(musb);
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ /* MUSB_POWER_SOFTCONN might be already set, JZ4740 does this. */
+ musb_writeb(musb->mregs, MUSB_POWER, 0);
+
/* Init IRQ workqueue before request_irq */
INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work);
INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset);
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 512108e..1dc35ab 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -399,7 +399,7 @@ struct dma_controller *musbhs_dma_controller_create(struct musb *musb,
controller->controller.channel_abort = dma_channel_abort;
if (request_irq(irq, dma_controller_irq, 0,
- dev_name(musb->controller), &controller->controller)) {
+ dev_name(musb->controller), controller)) {
dev_err(dev, "request_irq %d failed!\n", irq);
musb_dma_controller_destroy(&controller->controller);
diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
index b8620aa..8424c16 100644
--- a/drivers/usb/renesas_usbhs/common.h
+++ b/drivers/usb/renesas_usbhs/common.h
@@ -163,11 +163,12 @@ struct usbhs_priv;
#define VBSTS (1 << 7) /* VBUS_0 and VBUSIN_0 Input Status */
#define VALID (1 << 3) /* USB Request Receive */
-#define DVSQ_MASK (0x3 << 4) /* Device State */
+#define DVSQ_MASK (0x7 << 4) /* Device State */
#define POWER_STATE (0 << 4)
#define DEFAULT_STATE (1 << 4)
#define ADDRESS_STATE (2 << 4)
#define CONFIGURATION_STATE (3 << 4)
+#define SUSPENDED_STATE (4 << 4)
#define CTSQ_MASK (0x7) /* Control Transfer Stage */
#define IDLE_SETUP_STAGE 0 /* Idle stage or setup stage */
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 0dedb0d..b27f213 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -465,12 +465,18 @@ static int usbhsg_irq_dev_state(struct usbhs_priv *priv,
{
struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
struct device *dev = usbhsg_gpriv_to_dev(gpriv);
+ int state = usbhs_status_get_device_state(irq_state);
gpriv->gadget.speed = usbhs_bus_get_speed(priv);
- dev_dbg(dev, "state = %x : speed : %d\n",
- usbhs_status_get_device_state(irq_state),
- gpriv->gadget.speed);
+ dev_dbg(dev, "state = %x : speed : %d\n", state, gpriv->gadget.speed);
+
+ if (gpriv->gadget.speed != USB_SPEED_UNKNOWN &&
+ (state & SUSPENDED_STATE)) {
+ if (gpriv->driver && gpriv->driver->suspend)
+ gpriv->driver->suspend(&gpriv->gadget);
+ usb_gadget_set_state(&gpriv->gadget, USB_STATE_SUSPENDED);
+ }
return 0;
}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 8d349f2..e69e315 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -570,6 +570,9 @@ static void option_instat_callback(struct urb *urb);
/* Interface must have two endpoints */
#define NUMEP2 BIT(16)
+/* Device needs ZLP */
+#define ZLP BIT(17)
+
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
@@ -1175,6 +1178,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1102, 0xff), /* Telit ME910 (ECM) */
.driver_info = NCTRL(0) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110a, 0xff), /* Telit ME910G1 */
+ .driver_info = NCTRL(0) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
@@ -1199,6 +1204,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */
.driver_info = NCTRL(0) },
+ { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
+ .driver_info = NCTRL(0) | ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) },
@@ -2107,6 +2114,9 @@ static int option_attach(struct usb_serial *serial)
if (!(device_flags & NCTRL(iface_desc->bInterfaceNumber)))
data->use_send_setup = 1;
+ if (device_flags & ZLP)
+ data->use_zlp = 1;
+
spin_lock_init(&data->susp_lock);
usb_set_serial_data(serial, data);
diff --git a/drivers/usb/serial/usb-wwan.h b/drivers/usb/serial/usb-wwan.h
index d28dab4..9879773 100644
--- a/drivers/usb/serial/usb-wwan.h
+++ b/drivers/usb/serial/usb-wwan.h
@@ -36,6 +36,7 @@ struct usb_wwan_intf_private {
spinlock_t susp_lock;
unsigned int suspended:1;
unsigned int use_send_setup:1;
+ unsigned int use_zlp:1;
int in_flight;
unsigned int open_ports;
void *private;
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 59bfcb3..95e9576 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -492,6 +492,7 @@ static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port,
void (*callback) (struct urb *))
{
struct usb_serial *serial = port->serial;
+ struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial);
struct urb *urb;
urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */
@@ -502,6 +503,9 @@ static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port,
usb_sndbulkpipe(serial->dev, endpoint) | dir,
buf, len, callback, ctx);
+ if (intfdata->use_zlp && dir == USB_DIR_OUT)
+ urb->transfer_flags |= URB_ZERO_PACKET;
+
return urb;
}
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index da03451..cd01f1e 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -742,6 +742,9 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
copy -= recv;
ret += recv;
+
+ if (!copy)
+ break;
}
if (ret != size)
diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
index 3f998b6..c13dd52 100644
--- a/drivers/usb/usbip/vhci_rx.c
+++ b/drivers/usb/usbip/vhci_rx.c
@@ -91,16 +91,21 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
usbip_pack_pdu(pdu, urb, USBIP_RET_SUBMIT, 0);
/* recv transfer buffer */
- if (usbip_recv_xbuff(ud, urb) < 0)
- return;
+ if (usbip_recv_xbuff(ud, urb) < 0) {
+ urb->status = -EPROTO;
+ goto error;
+ }
/* recv iso_packet_descriptor */
- if (usbip_recv_iso(ud, urb) < 0)
- return;
+ if (usbip_recv_iso(ud, urb) < 0) {
+ urb->status = -EPROTO;
+ goto error;
+ }
/* restore the padding in iso packets */
usbip_pad_iso(ud, urb);
+error:
if (usbip_dbg_flag_vhci_rx)
usbip_dump_urb(urb);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 3d7bea1..85edacc 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -31,6 +31,7 @@
#include <linux/sched/signal.h>
#include <linux/interval_tree_generic.h>
#include <linux/nospec.h>
+#include <linux/kcov.h>
#include "vhost.h"
@@ -361,7 +362,9 @@ static int vhost_worker(void *data)
llist_for_each_entry_safe(work, work_next, node, node) {
clear_bit(VHOST_WORK_QUEUED, &work->flags);
__set_current_state(TASK_RUNNING);
+ kcov_remote_start_common(dev->kcov_handle);
work->fn(work);
+ kcov_remote_stop();
if (need_resched())
schedule();
}
@@ -521,6 +524,7 @@ long vhost_dev_set_owner(struct vhost_dev *dev)
/* No owner, become one */
dev->mm = get_task_mm(current);
+ dev->kcov_handle = kcov_common_handle();
worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
@@ -546,6 +550,7 @@ long vhost_dev_set_owner(struct vhost_dev *dev)
if (dev->mm)
mmput(dev->mm);
dev->mm = NULL;
+ dev->kcov_handle = 0;
err_mm:
return err;
}
@@ -665,6 +670,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
if (dev->worker) {
kthread_stop(dev->worker);
dev->worker = NULL;
+ dev->kcov_handle = 0;
}
if (dev->mm)
mmput(dev->mm);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 950c5c4..6e8f67f 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -175,6 +175,7 @@ struct vhost_dev {
wait_queue_head_t wait;
int weight;
int byte_weight;
+ u64 kcov_handle;
};
bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 491de83..6391dc5 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -436,7 +436,9 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
virtio_transport_deliver_tap_pkt(pkt);
/* Only accept correctly addressed packets */
- if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
+ if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid &&
+ le64_to_cpu(pkt->hdr.dst_cid) ==
+ vhost_transport_get_local_cid())
virtio_transport_recv_pkt(pkt);
else
virtio_transport_free_pkt(pkt);
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 71a6dee..3f9260a 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -401,7 +401,8 @@ static struct notifier_block xen_memory_nb = {
#else
static enum bp_state reserve_additional_memory(void)
{
- balloon_stats.target_pages = balloon_stats.current_pages;
+ balloon_stats.target_pages = balloon_stats.current_pages +
+ balloon_stats.target_unpopulated;
return BP_ECANCELED;
}
#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index e00c8a9..72d7589 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -265,16 +265,17 @@ static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
}
}
-static void run_ordered_work(struct __btrfs_workqueue *wq)
+static void run_ordered_work(struct __btrfs_workqueue *wq,
+ struct btrfs_work *self)
{
struct list_head *list = &wq->ordered_list;
struct btrfs_work *work;
spinlock_t *lock = &wq->list_lock;
unsigned long flags;
+ void *wtag;
+ bool free_self = false;
while (1) {
- void *wtag;
-
spin_lock_irqsave(lock, flags);
if (list_empty(list))
break;
@@ -300,16 +301,47 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
list_del(&work->ordered_list);
spin_unlock_irqrestore(lock, flags);
- /*
- * We don't want to call the ordered free functions with the
- * lock held though. Save the work as tag for the trace event,
- * because the callback could free the structure.
- */
- wtag = work;
- work->ordered_free(work);
- trace_btrfs_all_work_done(wq->fs_info, wtag);
+ if (work == self) {
+ /*
+ * This is the work item that the worker is currently
+ * executing.
+ *
+ * The kernel workqueue code guarantees non-reentrancy
+ * of work items. I.e., if a work item with the same
+ * address and work function is queued twice, the second
+ * execution is blocked until the first one finishes. A
+ * work item may be freed and recycled with the same
+ * work function; the workqueue code assumes that the
+ * original work item cannot depend on the recycled work
+ * item in that case (see find_worker_executing_work()).
+ *
+ * Note that the work of one Btrfs filesystem may depend
+ * on the work of another Btrfs filesystem via, e.g., a
+ * loop device. Therefore, we must not allow the current
+ * work item to be recycled until we are really done,
+ * otherwise we break the above assumption and can
+ * deadlock.
+ */
+ free_self = true;
+ } else {
+ /*
+ * We don't want to call the ordered free functions with
+ * the lock held though. Save the work as tag for the
+ * trace event, because the callback could free the
+ * structure.
+ */
+ wtag = work;
+ work->ordered_free(work);
+ trace_btrfs_all_work_done(wq->fs_info, wtag);
+ }
}
spin_unlock_irqrestore(lock, flags);
+
+ if (free_self) {
+ wtag = self;
+ self->ordered_free(self);
+ trace_btrfs_all_work_done(wq->fs_info, wtag);
+ }
}
static void normal_work_helper(struct btrfs_work *work)
@@ -337,7 +369,7 @@ static void normal_work_helper(struct btrfs_work *work)
work->func(work);
if (need_order) {
set_bit(WORK_DONE_BIT, &work->flags);
- run_ordered_work(wq);
+ run_ordered_work(wq, work);
}
if (!need_order)
trace_btrfs_all_work_done(wq->fs_info, wtag);
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index d2263ca..740ef42 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -427,7 +427,7 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
for (node = rb_first(tm_root); node; node = next) {
next = rb_next(node);
tm = rb_entry(node, struct tree_mod_elem, node);
- if (tm->seq > min_seq)
+ if (tm->seq >= min_seq)
continue;
rb_erase(node, tm_root);
kfree(tm);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 8138345..a8ea562 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1679,8 +1679,8 @@ static void end_workqueue_fn(struct btrfs_work *work)
bio->bi_status = end_io_wq->status;
bio->bi_private = end_io_wq->private;
bio->bi_end_io = end_io_wq->end_io;
- kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
bio_endio(bio);
+ kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
}
static int cleaner_kthread(void *arg)
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 2965bb4..c133106 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4938,12 +4938,14 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
return eb;
eb = alloc_dummy_extent_buffer(fs_info, start);
if (!eb)
- return NULL;
+ return ERR_PTR(-ENOMEM);
eb->fs_info = fs_info;
again:
ret = radix_tree_preload(GFP_NOFS);
- if (ret)
+ if (ret) {
+ exists = ERR_PTR(ret);
goto free_eb;
+ }
spin_lock(&fs_info->buffer_lock);
ret = radix_tree_insert(&fs_info->buffer_radix,
start >> PAGE_SHIFT, eb);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 739f45b..f2dc517 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -5729,7 +5729,6 @@ static void inode_tree_add(struct inode *inode)
static void inode_tree_del(struct inode *inode)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
int empty = 0;
@@ -5742,7 +5741,6 @@ static void inode_tree_del(struct inode *inode)
spin_unlock(&root->inode_lock);
if (empty && btrfs_root_refs(&root->root_item) == 0) {
- synchronize_srcu(&fs_info->subvol_srcu);
spin_lock(&root->inode_lock);
empty = RB_EMPTY_ROOT(&root->inode_tree);
spin_unlock(&root->inode_lock);
@@ -9820,9 +9818,8 @@ static int btrfs_rename_exchange(struct inode *old_dir,
return -EXDEV;
/* close the race window with snapshot create/destroy ioctl */
- if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
- down_read(&fs_info->subvol_sem);
- if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
+ if (old_ino == BTRFS_FIRST_FREE_OBJECTID ||
+ new_ino == BTRFS_FIRST_FREE_OBJECTID)
down_read(&fs_info->subvol_sem);
/*
@@ -10014,9 +10011,8 @@ static int btrfs_rename_exchange(struct inode *old_dir,
ret2 = btrfs_end_transaction(trans);
ret = ret ? ret : ret2;
out_notrans:
- if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
- up_read(&fs_info->subvol_sem);
- if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
+ if (new_ino == BTRFS_FIRST_FREE_OBJECTID ||
+ old_ino == BTRFS_FIRST_FREE_OBJECTID)
up_read(&fs_info->subvol_sem);
return ret;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index dd3b482..e82b4f3 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -580,12 +580,18 @@ static noinline int create_subvol(struct inode *dir,
btrfs_i_size_write(BTRFS_I(dir), dir->i_size + namelen * 2);
ret = btrfs_update_inode(trans, root, dir);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto fail;
+ }
ret = btrfs_add_root_ref(trans, fs_info,
objectid, root->root_key.objectid,
btrfs_ino(BTRFS_I(dir)), index, name, namelen);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto fail;
+ }
ret = btrfs_uuid_tree_add(trans, fs_info, root_item->uuid,
BTRFS_UUID_KEY_SUBVOL, objectid);
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index 3a4e15b..440c0d5 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -734,21 +734,19 @@ static int reada_start_machine_dev(struct btrfs_device *dev)
static void reada_start_machine_worker(struct btrfs_work *work)
{
struct reada_machine_work *rmw;
- struct btrfs_fs_info *fs_info;
int old_ioprio;
rmw = container_of(work, struct reada_machine_work, work);
- fs_info = rmw->fs_info;
-
- kfree(rmw);
old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current),
task_nice_ioprio(current));
set_task_ioprio(current, BTRFS_IOPRIO_READA);
- __reada_start_machine(fs_info);
+ __reada_start_machine(rmw->fs_info);
set_task_ioprio(current, old_ioprio);
- atomic_dec(&fs_info->reada_works_cnt);
+ atomic_dec(&rmw->fs_info->reada_works_cnt);
+
+ kfree(rmw);
}
static void __reada_start_machine(struct btrfs_fs_info *fs_info)
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 9fa6db6..d4c00ed 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -4587,6 +4587,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
fs_root = read_fs_root(fs_info, reloc_root->root_key.offset);
if (IS_ERR(fs_root)) {
err = PTR_ERR(fs_root);
+ list_add_tail(&reloc_root->root_list, &reloc_roots);
goto out_free;
}
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 61192c5..2ebae97 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -2421,14 +2421,13 @@ static void scrub_missing_raid56_worker(struct btrfs_work *work)
scrub_write_block_to_dev_replace(sblock);
}
- scrub_block_put(sblock);
-
if (sctx->is_dev_replace && sctx->flush_all_writes) {
mutex_lock(&sctx->wr_lock);
scrub_wr_submit(sctx);
mutex_unlock(&sctx->wr_lock);
}
+ scrub_block_put(sblock);
scrub_pending_bio_dec(sctx);
}
diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c
index 8444a01..f6c783e9 100644
--- a/fs/btrfs/tests/free-space-tree-tests.c
+++ b/fs/btrfs/tests/free-space-tree-tests.c
@@ -475,9 +475,9 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
root->fs_info->tree_root = root;
root->node = alloc_test_extent_buffer(root->fs_info, nodesize);
- if (!root->node) {
- test_msg("Couldn't allocate dummy buffer\n");
- ret = -ENOMEM;
+ if (IS_ERR(root->node)) {
+ test_msg("couldn't allocate dummy buffer\n");
+ ret = PTR_ERR(root->node);
goto out;
}
btrfs_set_header_level(root->node, 0);
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index 578fd04..eb72cf2 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -487,9 +487,9 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
* *cough*backref walking code*cough*
*/
root->node = alloc_test_extent_buffer(root->fs_info, nodesize);
- if (!root->node) {
+ if (IS_ERR(root->node)) {
test_msg("Couldn't allocate dummy buffer\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(root->node);
goto out;
}
btrfs_set_header_level(root->node, 0);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index e35301e..98c397e 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -6018,9 +6018,28 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
if (IS_ERR(wc.replay_dest)) {
ret = PTR_ERR(wc.replay_dest);
+
+ /*
+ * We didn't find the subvol, likely because it was
+ * deleted. This is ok, simply skip this log and go to
+ * the next one.
+ *
+ * We need to exclude the root because we can't have
+ * other log replays overwriting this log as we'll read
+ * it back in a few more times. This will keep our
+ * block from being modified, and we'll just bail for
+ * each subsequent pass.
+ */
+ if (ret == -ENOENT)
+ ret = btrfs_pin_extent_for_log_replay(fs_info,
+ log->node->start,
+ log->node->len);
free_extent_buffer(log->node);
free_extent_buffer(log->commit_root);
kfree(log);
+
+ if (!ret)
+ goto next;
btrfs_handle_fs_error(fs_info, ret,
"Couldn't read target root for tree log recovery.");
goto error;
@@ -6052,7 +6071,6 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
&root->highest_objectid);
}
- key.offset = found_key.offset - 1;
wc.replay_dest->log_root = NULL;
free_extent_buffer(log->node);
free_extent_buffer(log->commit_root);
@@ -6060,9 +6078,10 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
if (ret)
goto error;
-
+next:
if (found_key.offset == 0)
break;
+ key.offset = found_key.offset - 1;
}
btrfs_release_path(path);
diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
index 726f928..331f3a1 100644
--- a/fs/btrfs/uuid-tree.c
+++ b/fs/btrfs/uuid-tree.c
@@ -336,6 +336,8 @@ int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info,
}
if (ret < 0 && ret != -ENOENT)
goto out;
+ key.offset++;
+ goto again_search_slot;
}
item_size -= sizeof(subid_le);
offset += sizeof(subid_le);
diff --git a/fs/char_dev.c b/fs/char_dev.c
index 20ce45c..715d76b 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -361,7 +361,7 @@ static struct kobject *cdev_get(struct cdev *p)
if (owner && !try_module_get(owner))
return NULL;
- kobj = kobject_get(&p->kobj);
+ kobj = kobject_get_unless_zero(&p->kobj);
if (!kobj)
module_put(owner);
return kobj;
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index a8303b1..e966a8c 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -1581,9 +1581,10 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
#endif
case FICLONE:
+ goto do_ioctl;
case FICLONERANGE:
case FIDEDUPERANGE:
- goto do_ioctl;
+ goto found_handler;
case FIBMAP:
case FIGETBSZ:
diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index 093add5..aa76f1e5 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -26,7 +26,7 @@
#include <linux/namei.h>
#include "fscrypt_private.h"
-static void __fscrypt_decrypt_bio(struct bio *bio, bool done)
+void fscrypt_decrypt_bio(struct bio *bio)
{
struct bio_vec *bv;
int i;
@@ -37,37 +37,10 @@ static void __fscrypt_decrypt_bio(struct bio *bio, bool done)
bv->bv_offset);
if (ret)
SetPageError(page);
- else if (done)
- SetPageUptodate(page);
- if (done)
- unlock_page(page);
}
}
-
-void fscrypt_decrypt_bio(struct bio *bio)
-{
- __fscrypt_decrypt_bio(bio, false);
-}
EXPORT_SYMBOL(fscrypt_decrypt_bio);
-static void completion_pages(struct work_struct *work)
-{
- struct fscrypt_ctx *ctx = container_of(work, struct fscrypt_ctx, work);
- struct bio *bio = ctx->bio;
-
- __fscrypt_decrypt_bio(bio, true);
- fscrypt_release_ctx(ctx);
- bio_put(bio);
-}
-
-void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio)
-{
- INIT_WORK(&ctx->work, completion_pages);
- ctx->bio = bio;
- fscrypt_enqueue_decrypt_work(&ctx->work);
-}
-EXPORT_SYMBOL(fscrypt_enqueue_decrypt_bio);
-
int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len)
{
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 35efeae..6e6f39e 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -26,29 +26,20 @@
#include <linux/ratelimit.h>
#include <linux/dcache.h>
#include <linux/namei.h>
-#include <crypto/aes.h>
#include <crypto/skcipher.h>
#include "fscrypt_private.h"
static unsigned int num_prealloc_crypto_pages = 32;
-static unsigned int num_prealloc_crypto_ctxs = 128;
module_param(num_prealloc_crypto_pages, uint, 0444);
MODULE_PARM_DESC(num_prealloc_crypto_pages,
"Number of crypto pages to preallocate");
-module_param(num_prealloc_crypto_ctxs, uint, 0444);
-MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
- "Number of crypto contexts to preallocate");
static mempool_t *fscrypt_bounce_page_pool = NULL;
-static LIST_HEAD(fscrypt_free_ctxs);
-static DEFINE_SPINLOCK(fscrypt_ctx_lock);
-
static struct workqueue_struct *fscrypt_read_workqueue;
static DEFINE_MUTEX(fscrypt_init_mutex);
-static struct kmem_cache *fscrypt_ctx_cachep;
struct kmem_cache *fscrypt_info_cachep;
void fscrypt_enqueue_decrypt_work(struct work_struct *work)
@@ -57,62 +48,6 @@ void fscrypt_enqueue_decrypt_work(struct work_struct *work)
}
EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);
-/**
- * fscrypt_release_ctx() - Release a decryption context
- * @ctx: The decryption context to release.
- *
- * If the decryption context was allocated from the pre-allocated pool, return
- * it to that pool. Else, free it.
- */
-void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
-{
- unsigned long flags;
-
- if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
- kmem_cache_free(fscrypt_ctx_cachep, ctx);
- } else {
- spin_lock_irqsave(&fscrypt_ctx_lock, flags);
- list_add(&ctx->free_list, &fscrypt_free_ctxs);
- spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
- }
-}
-EXPORT_SYMBOL(fscrypt_release_ctx);
-
-/**
- * fscrypt_get_ctx() - Get a decryption context
- * @gfp_flags: The gfp flag for memory allocation
- *
- * Allocate and initialize a decryption context.
- *
- * Return: A new decryption context on success; an ERR_PTR() otherwise.
- */
-struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags)
-{
- struct fscrypt_ctx *ctx;
- unsigned long flags;
-
- /*
- * First try getting a ctx from the free list so that we don't have to
- * call into the slab allocator.
- */
- spin_lock_irqsave(&fscrypt_ctx_lock, flags);
- ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
- struct fscrypt_ctx, free_list);
- if (ctx)
- list_del(&ctx->free_list);
- spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
- if (!ctx) {
- ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
- if (!ctx)
- return ERR_PTR(-ENOMEM);
- ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
- } else {
- ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
- }
- return ctx;
-}
-EXPORT_SYMBOL(fscrypt_get_ctx);
-
struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags)
{
return mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
@@ -137,14 +72,17 @@ EXPORT_SYMBOL(fscrypt_free_bounce_page);
void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
const struct fscrypt_info *ci)
{
+ u8 flags = fscrypt_policy_flags(&ci->ci_policy);
+
memset(iv, 0, ci->ci_mode->ivsize);
- iv->lblk_num = cpu_to_le64(lblk_num);
- if (fscrypt_is_direct_key_policy(&ci->ci_policy))
+ if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) {
+ WARN_ON_ONCE((u32)lblk_num != lblk_num);
+ lblk_num |= (u64)ci->ci_inode->i_ino << 32;
+ } else if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
memcpy(iv->nonce, ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE);
-
- if (ci->ci_essiv_tfm != NULL)
- crypto_cipher_encrypt_one(ci->ci_essiv_tfm, iv->raw, iv->raw);
+ }
+ iv->lblk_num = cpu_to_le64(lblk_num);
}
/* Encrypt or decrypt a single filesystem block of file contents */
@@ -395,17 +333,6 @@ const struct dentry_operations fscrypt_d_ops = {
.d_revalidate = fscrypt_d_revalidate,
};
-static void fscrypt_destroy(void)
-{
- struct fscrypt_ctx *pos, *n;
-
- list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list)
- kmem_cache_free(fscrypt_ctx_cachep, pos);
- INIT_LIST_HEAD(&fscrypt_free_ctxs);
- mempool_destroy(fscrypt_bounce_page_pool);
- fscrypt_bounce_page_pool = NULL;
-}
-
/**
* fscrypt_initialize() - allocate major buffers for fs encryption.
* @cop_flags: fscrypt operations flags
@@ -413,11 +340,11 @@ static void fscrypt_destroy(void)
* We only call this when we start accessing encrypted files, since it
* results in memory getting allocated that wouldn't otherwise be used.
*
- * Return: Zero on success, non-zero otherwise.
+ * Return: 0 on success; -errno on failure
*/
int fscrypt_initialize(unsigned int cop_flags)
{
- int i, res = -ENOMEM;
+ int err = 0;
/* No need to allocate a bounce page pool if this FS won't use it. */
if (cop_flags & FS_CFLG_OWN_PAGES)
@@ -425,29 +352,18 @@ int fscrypt_initialize(unsigned int cop_flags)
mutex_lock(&fscrypt_init_mutex);
if (fscrypt_bounce_page_pool)
- goto already_initialized;
+ goto out_unlock;
- for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
- struct fscrypt_ctx *ctx;
-
- ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
- if (!ctx)
- goto fail;
- list_add(&ctx->free_list, &fscrypt_free_ctxs);
- }
-
+ err = -ENOMEM;
fscrypt_bounce_page_pool =
mempool_create_page_pool(num_prealloc_crypto_pages, 0);
if (!fscrypt_bounce_page_pool)
- goto fail;
+ goto out_unlock;
-already_initialized:
+ err = 0;
+out_unlock:
mutex_unlock(&fscrypt_init_mutex);
- return 0;
-fail:
- fscrypt_destroy();
- mutex_unlock(&fscrypt_init_mutex);
- return res;
+ return err;
}
void fscrypt_msg(const struct inode *inode, const char *level,
@@ -493,13 +409,9 @@ static int __init fscrypt_init(void)
if (!fscrypt_read_workqueue)
goto fail;
- fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
- if (!fscrypt_ctx_cachep)
- goto fail_free_queue;
-
fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
if (!fscrypt_info_cachep)
- goto fail_free_ctx;
+ goto fail_free_queue;
err = fscrypt_init_keyring();
if (err)
@@ -509,8 +421,6 @@ static int __init fscrypt_init(void)
fail_free_info:
kmem_cache_destroy(fscrypt_info_cachep);
-fail_free_ctx:
- kmem_cache_destroy(fscrypt_ctx_cachep);
fail_free_queue:
destroy_workqueue(fscrypt_read_workqueue);
fail:
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index 81134d34..92372b1 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -165,11 +165,8 @@ struct fscrypt_info {
/* The actual crypto transform used for encryption and decryption */
struct crypto_skcipher *ci_ctfm;
- /*
- * Cipher for ESSIV IV generation. Only set for CBC contents
- * encryption, otherwise is NULL.
- */
- struct crypto_cipher *ci_essiv_tfm;
+ /* True if the key should be freed when this fscrypt_info is freed */
+ bool ci_owns_key;
/*
* Encryption mode used for this inode. It corresponds to either the
@@ -214,8 +211,6 @@ typedef enum {
FS_ENCRYPT,
} fscrypt_direction_t;
-#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
-
static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
u32 filenames_mode)
{
@@ -298,7 +293,8 @@ extern int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key,
*/
#define HKDF_CONTEXT_KEY_IDENTIFIER 1
#define HKDF_CONTEXT_PER_FILE_KEY 2
-#define HKDF_CONTEXT_PER_MODE_KEY 3
+#define HKDF_CONTEXT_DIRECT_KEY 3
+#define HKDF_CONTEXT_IV_INO_LBLK_64_KEY 4
extern int fscrypt_hkdf_expand(struct fscrypt_hkdf *hkdf, u8 context,
const u8 *info, unsigned int infolen,
@@ -395,8 +391,14 @@ struct fscrypt_master_key {
struct list_head mk_decrypted_inodes;
spinlock_t mk_decrypted_inodes_lock;
- /* Per-mode tfms for DIRECT_KEY policies, allocated on-demand */
- struct crypto_skcipher *mk_mode_keys[__FSCRYPT_MODE_MAX + 1];
+ /* Crypto API transforms for DIRECT_KEY policies, allocated on-demand */
+ struct crypto_skcipher *mk_direct_tfms[__FSCRYPT_MODE_MAX + 1];
+
+ /*
+ * Crypto API transforms for filesystem-layer implementation of
+ * IV_INO_LBLK_64 policies, allocated on-demand.
+ */
+ struct crypto_skcipher *mk_iv_ino_lblk_64_tfms[__FSCRYPT_MODE_MAX + 1];
} __randomize_layout;
@@ -452,8 +454,7 @@ struct fscrypt_mode {
const char *cipher_str;
int keysize;
int ivsize;
- bool logged_impl_name;
- bool needs_essiv;
+ int logged_impl_name;
};
static inline bool is_private_mode(const struct fscrypt_mode *mode)
diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c
index 5882224..cdc9c0c 100644
--- a/fs/crypto/keyring.c
+++ b/fs/crypto/keyring.c
@@ -43,8 +43,10 @@ static void free_master_key(struct fscrypt_master_key *mk)
wipe_master_key_secret(&mk->mk_secret);
- for (i = 0; i < ARRAY_SIZE(mk->mk_mode_keys); i++)
- crypto_free_skcipher(mk->mk_mode_keys[i]);
+ for (i = 0; i <= __FSCRYPT_MODE_MAX; i++) {
+ crypto_free_skcipher(mk->mk_direct_tfms[i]);
+ crypto_free_skcipher(mk->mk_iv_ino_lblk_64_tfms[i]);
+ }
key_put(mk->mk_users);
kzfree(mk);
diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c
index 22a4afc..b45edbd 100644
--- a/fs/crypto/keysetup.c
+++ b/fs/crypto/keysetup.c
@@ -8,15 +8,11 @@
* Heavily modified since then.
*/
-#include <crypto/aes.h>
-#include <crypto/sha.h>
#include <crypto/skcipher.h>
#include <linux/key.h>
#include "fscrypt_private.h"
-static struct crypto_shash *essiv_hash_tfm;
-
static struct fscrypt_mode available_modes[] = {
[FSCRYPT_MODE_AES_256_XTS] = {
.friendly_name = "AES-256-XTS",
@@ -31,11 +27,10 @@ static struct fscrypt_mode available_modes[] = {
.ivsize = 16,
},
[FSCRYPT_MODE_AES_128_CBC] = {
- .friendly_name = "AES-128-CBC",
- .cipher_str = "cbc(aes)",
+ .friendly_name = "AES-128-CBC-ESSIV",
+ .cipher_str = "essiv(cbc(aes),sha256)",
.keysize = 16,
.ivsize = 16,
- .needs_essiv = true,
},
[FSCRYPT_MODE_AES_128_CTS] = {
.friendly_name = "AES-128-CTS-CBC",
@@ -92,15 +87,13 @@ struct crypto_skcipher *fscrypt_allocate_skcipher(struct fscrypt_mode *mode,
mode->cipher_str, PTR_ERR(tfm));
return tfm;
}
- if (unlikely(!mode->logged_impl_name)) {
+ if (!xchg(&mode->logged_impl_name, 1)) {
/*
* fscrypt performance can vary greatly depending on which
* crypto algorithm implementation is used. Help people debug
* performance problems by logging the ->cra_driver_name the
- * first time a mode is used. Note that multiple threads can
- * race here, but it doesn't really matter.
+ * first time a mode is used.
*/
- mode->logged_impl_name = true;
pr_info("fscrypt: %s using implementation \"%s\"\n",
mode->friendly_name,
crypto_skcipher_alg(tfm)->base.cra_driver_name);
@@ -117,132 +110,64 @@ struct crypto_skcipher *fscrypt_allocate_skcipher(struct fscrypt_mode *mode,
return ERR_PTR(err);
}
-static int derive_essiv_salt(const u8 *key, int keysize, u8 *salt)
-{
- struct crypto_shash *tfm = READ_ONCE(essiv_hash_tfm);
-
- /* init hash transform on demand */
- if (unlikely(!tfm)) {
- struct crypto_shash *prev_tfm;
-
- tfm = crypto_alloc_shash("sha256", 0, 0);
- if (IS_ERR(tfm)) {
- if (PTR_ERR(tfm) == -ENOENT) {
- fscrypt_warn(NULL,
- "Missing crypto API support for SHA-256");
- return -ENOPKG;
- }
- fscrypt_err(NULL,
- "Error allocating SHA-256 transform: %ld",
- PTR_ERR(tfm));
- return PTR_ERR(tfm);
- }
- prev_tfm = cmpxchg(&essiv_hash_tfm, NULL, tfm);
- if (prev_tfm) {
- crypto_free_shash(tfm);
- tfm = prev_tfm;
- }
- }
-
- {
- SHASH_DESC_ON_STACK(desc, tfm);
- desc->tfm = tfm;
- desc->flags = 0;
-
- return crypto_shash_digest(desc, key, keysize, salt);
- }
-}
-
-static int init_essiv_generator(struct fscrypt_info *ci, const u8 *raw_key,
- int keysize)
-{
- int err;
- struct crypto_cipher *essiv_tfm;
- u8 salt[SHA256_DIGEST_SIZE];
-
- if (WARN_ON(ci->ci_mode->ivsize != AES_BLOCK_SIZE))
- return -EINVAL;
-
- essiv_tfm = crypto_alloc_cipher("aes", 0, 0);
- if (IS_ERR(essiv_tfm))
- return PTR_ERR(essiv_tfm);
-
- ci->ci_essiv_tfm = essiv_tfm;
-
- err = derive_essiv_salt(raw_key, keysize, salt);
- if (err)
- goto out;
-
- /*
- * Using SHA256 to derive the salt/key will result in AES-256 being
- * used for IV generation. File contents encryption will still use the
- * configured keysize (AES-128) nevertheless.
- */
- err = crypto_cipher_setkey(essiv_tfm, salt, sizeof(salt));
- if (err)
- goto out;
-
-out:
- memzero_explicit(salt, sizeof(salt));
- return err;
-}
-
-/* Given the per-file key, set up the file's crypto transform object(s) */
+/* Given the per-file key, set up the file's crypto transform object */
int fscrypt_set_derived_key(struct fscrypt_info *ci, const u8 *derived_key)
{
- struct fscrypt_mode *mode = ci->ci_mode;
- struct crypto_skcipher *ctfm;
- int err;
+ struct crypto_skcipher *tfm;
- ctfm = fscrypt_allocate_skcipher(mode, derived_key, ci->ci_inode);
- if (IS_ERR(ctfm))
- return PTR_ERR(ctfm);
+ tfm = fscrypt_allocate_skcipher(ci->ci_mode, derived_key, ci->ci_inode);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
- ci->ci_ctfm = ctfm;
-
- if (mode->needs_essiv) {
- err = init_essiv_generator(ci, derived_key, mode->keysize);
- if (err) {
- fscrypt_warn(ci->ci_inode,
- "Error initializing ESSIV generator: %d",
- err);
- return err;
- }
- }
+ ci->ci_ctfm = tfm;
+ ci->ci_owns_key = true;
return 0;
}
static int setup_per_mode_key(struct fscrypt_info *ci,
- struct fscrypt_master_key *mk)
+ struct fscrypt_master_key *mk,
+ struct crypto_skcipher **tfms,
+ u8 hkdf_context, bool include_fs_uuid)
{
+ const struct inode *inode = ci->ci_inode;
+ const struct super_block *sb = inode->i_sb;
struct fscrypt_mode *mode = ci->ci_mode;
u8 mode_num = mode - available_modes;
struct crypto_skcipher *tfm, *prev_tfm;
u8 mode_key[FSCRYPT_MAX_KEY_SIZE];
+ u8 hkdf_info[sizeof(mode_num) + sizeof(sb->s_uuid)];
+ unsigned int hkdf_infolen = 0;
int err;
- if (WARN_ON(mode_num >= ARRAY_SIZE(mk->mk_mode_keys)))
+ if (WARN_ON(mode_num > __FSCRYPT_MODE_MAX))
return -EINVAL;
/* pairs with cmpxchg() below */
- tfm = READ_ONCE(mk->mk_mode_keys[mode_num]);
+ tfm = READ_ONCE(tfms[mode_num]);
if (likely(tfm != NULL))
goto done;
BUILD_BUG_ON(sizeof(mode_num) != 1);
+ BUILD_BUG_ON(sizeof(sb->s_uuid) != 16);
+ BUILD_BUG_ON(sizeof(hkdf_info) != 17);
+ hkdf_info[hkdf_infolen++] = mode_num;
+ if (include_fs_uuid) {
+ memcpy(&hkdf_info[hkdf_infolen], &sb->s_uuid,
+ sizeof(sb->s_uuid));
+ hkdf_infolen += sizeof(sb->s_uuid);
+ }
err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
- HKDF_CONTEXT_PER_MODE_KEY,
- &mode_num, sizeof(mode_num),
+ hkdf_context, hkdf_info, hkdf_infolen,
mode_key, mode->keysize);
if (err)
return err;
- tfm = fscrypt_allocate_skcipher(mode, mode_key, ci->ci_inode);
+ tfm = fscrypt_allocate_skcipher(mode, mode_key, inode);
memzero_explicit(mode_key, mode->keysize);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
/* pairs with READ_ONCE() above */
- prev_tfm = cmpxchg(&mk->mk_mode_keys[mode_num], NULL, tfm);
+ prev_tfm = cmpxchg(&tfms[mode_num], NULL, tfm);
if (prev_tfm != NULL) {
crypto_free_skcipher(tfm);
tfm = prev_tfm;
@@ -273,7 +198,19 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci,
ci->ci_mode->friendly_name);
return -EINVAL;
}
- return setup_per_mode_key(ci, mk);
+ return setup_per_mode_key(ci, mk, mk->mk_direct_tfms,
+ HKDF_CONTEXT_DIRECT_KEY, false);
+ } else if (ci->ci_policy.v2.flags &
+ FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) {
+ /*
+ * IV_INO_LBLK_64: encryption keys are derived from (master_key,
+ * mode_num, filesystem_uuid), and inode number is included in
+ * the IVs. This format is optimized for use with inline
+ * encryption hardware compliant with the UFS or eMMC standards.
+ */
+ return setup_per_mode_key(ci, mk, mk->mk_iv_ino_lblk_64_tfms,
+ HKDF_CONTEXT_IV_INO_LBLK_64_KEY,
+ true);
}
err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
@@ -395,13 +332,10 @@ static void put_crypt_info(struct fscrypt_info *ci)
if (!ci)
return;
- if (ci->ci_direct_key) {
+ if (ci->ci_direct_key)
fscrypt_put_direct_key(ci->ci_direct_key);
- } else if ((ci->ci_ctfm != NULL || ci->ci_essiv_tfm != NULL) &&
- !fscrypt_is_direct_key_policy(&ci->ci_policy)) {
+ else if (ci->ci_owns_key)
crypto_free_skcipher(ci->ci_ctfm);
- crypto_free_cipher(ci->ci_essiv_tfm);
- }
key = ci->ci_master_key;
if (key) {
@@ -422,7 +356,7 @@ static void put_crypt_info(struct fscrypt_info *ci)
key_invalidate(key);
key_put(key);
}
- memset(ci, 0, sizeof(*ci)); /* sanitizes ->ci_raw_key */
+ memzero_explicit(ci, sizeof(*ci));
kmem_cache_free(fscrypt_info_cachep, ci);
}
diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c
index e5552d0..8a685fd 100644
--- a/fs/crypto/keysetup_v1.c
+++ b/fs/crypto/keysetup_v1.c
@@ -271,10 +271,6 @@ static int setup_v1_file_key_direct(struct fscrypt_info *ci,
return -EINVAL;
}
- /* ESSIV implies 16-byte IVs which implies !DIRECT_KEY */
- if (WARN_ON(mode->needs_essiv))
- return -EINVAL;
-
dk = fscrypt_get_direct_key(ci, raw_master_key);
if (IS_ERR(dk))
return PTR_ERR(dk);
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index 91b22c8..bc92f1e 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -29,6 +29,40 @@ bool fscrypt_policies_equal(const union fscrypt_policy *policy1,
return !memcmp(policy1, policy2, fscrypt_policy_size(policy1));
}
+static bool supported_iv_ino_lblk_64_policy(
+ const struct fscrypt_policy_v2 *policy,
+ const struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+ int ino_bits = 64, lblk_bits = 64;
+
+ if (policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
+ fscrypt_warn(inode,
+ "The DIRECT_KEY and IV_INO_LBLK_64 flags are mutually exclusive");
+ return false;
+ }
+ /*
+ * It's unsafe to include inode numbers in the IVs if the filesystem can
+ * potentially renumber inodes, e.g. via filesystem shrinking.
+ */
+ if (!sb->s_cop->has_stable_inodes ||
+ !sb->s_cop->has_stable_inodes(sb)) {
+ fscrypt_warn(inode,
+ "Can't use IV_INO_LBLK_64 policy on filesystem '%s' because it doesn't have stable inode numbers",
+ sb->s_id);
+ return false;
+ }
+ if (sb->s_cop->get_ino_and_lblk_bits)
+ sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits);
+ if (ino_bits > 32 || lblk_bits > 32) {
+ fscrypt_warn(inode,
+ "Can't use IV_INO_LBLK_64 policy on filesystem '%s' because it doesn't use 32-bit inode and block numbers",
+ sb->s_id);
+ return false;
+ }
+ return true;
+}
+
/**
* fscrypt_supported_policy - check whether an encryption policy is supported
*
@@ -55,7 +89,8 @@ bool fscrypt_supported_policy(const union fscrypt_policy *policy_u,
return false;
}
- if (policy->flags & ~FSCRYPT_POLICY_FLAGS_VALID) {
+ if (policy->flags & ~(FSCRYPT_POLICY_FLAGS_PAD_MASK |
+ FSCRYPT_POLICY_FLAG_DIRECT_KEY)) {
fscrypt_warn(inode,
"Unsupported encryption flags (0x%02x)",
policy->flags);
@@ -89,6 +124,10 @@ bool fscrypt_supported_policy(const union fscrypt_policy *policy_u,
return false;
}
+ if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) &&
+ !supported_iv_ino_lblk_64_policy(policy, inode))
+ return false;
+
if (memchr_inv(policy->__reserved, 0,
sizeof(policy->__reserved))) {
fscrypt_warn(inode,
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index d31b6c7..dc1a1d5 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -35,11 +35,11 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
spin_unlock(&inode->i_lock);
spin_unlock(&sb->s_inode_list_lock);
- cond_resched();
invalidate_mapping_pages(inode->i_mapping, 0, -1);
iput(toput_inode);
toput_inode = inode;
+ cond_resched();
spin_lock(&sb->s_inode_list_lock);
}
spin_unlock(&sb->s_inode_list_lock);
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 4b65b8d..1014369 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -77,6 +77,11 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
error_msg = "rec_len is too small for name_len";
else if (unlikely(((char *) de - buf) + rlen > size))
error_msg = "directory entry overrun";
+ else if (unlikely(((char *) de - buf) + rlen >
+ size - EXT4_DIR_REC_LEN(1) &&
+ ((char *) de - buf) + rlen != size)) {
+ error_msg = "directory entry too close to block end";
+ }
else if (unlikely(le32_to_cpu(de->inode) >
le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
error_msg = "inode out of bounds";
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 523e6d7f1..3bab4b5 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1688,6 +1688,7 @@ static inline bool ext4_verity_in_progress(struct inode *inode)
#define EXT4_FEATURE_COMPAT_RESIZE_INODE 0x0010
#define EXT4_FEATURE_COMPAT_DIR_INDEX 0x0020
#define EXT4_FEATURE_COMPAT_SPARSE_SUPER2 0x0200
+#define EXT4_FEATURE_COMPAT_STABLE_INODES 0x0800
#define EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001
#define EXT4_FEATURE_RO_COMPAT_LARGE_FILE 0x0002
@@ -1784,6 +1785,7 @@ EXT4_FEATURE_COMPAT_FUNCS(xattr, EXT_ATTR)
EXT4_FEATURE_COMPAT_FUNCS(resize_inode, RESIZE_INODE)
EXT4_FEATURE_COMPAT_FUNCS(dir_index, DIR_INDEX)
EXT4_FEATURE_COMPAT_FUNCS(sparse_super2, SPARSE_SUPER2)
+EXT4_FEATURE_COMPAT_FUNCS(stable_inodes, STABLE_INODES)
EXT4_FEATURE_RO_COMPAT_FUNCS(sparse_super, SPARSE_SUPER)
EXT4_FEATURE_RO_COMPAT_FUNCS(large_file, LARGE_FILE)
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index e7ee1fb..e579509 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3827,7 +3827,13 @@ static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
* writes & truncates and since we take care of writing back page cache,
* we are protected against page writeback as well.
*/
- inode_lock_shared(inode);
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!inode_trylock_shared(inode))
+ return -EAGAIN;
+ } else {
+ inode_lock_shared(inode);
+ }
+
ret = filemap_write_and_wait_range(mapping, iocb->ki_pos,
iocb->ki_pos + count - 1);
if (ret)
@@ -5653,12 +5659,15 @@ int ext4_getattr(const struct path *path, struct kstat *stat,
stat->attributes |= STATX_ATTR_IMMUTABLE;
if (flags & EXT4_NODUMP_FL)
stat->attributes |= STATX_ATTR_NODUMP;
+ if (flags & EXT4_VERITY_FL)
+ stat->attributes |= STATX_ATTR_VERITY;
stat->attributes_mask |= (STATX_ATTR_APPEND |
STATX_ATTR_COMPRESSED |
STATX_ATTR_ENCRYPTED |
STATX_ATTR_IMMUTABLE |
- STATX_ATTR_NODUMP);
+ STATX_ATTR_NODUMP |
+ STATX_ATTR_VERITY);
generic_fillattr(inode, stat);
return 0;
@@ -5958,7 +5967,7 @@ int ext4_expand_extra_isize(struct inode *inode,
error = ext4_journal_get_write_access(handle, iloc->bh);
if (error) {
brelse(iloc->bh);
- goto out_stop;
+ goto out_unlock;
}
error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc,
@@ -5968,8 +5977,8 @@ int ext4_expand_extra_isize(struct inode *inode,
if (!error)
error = rc;
+out_unlock:
ext4_write_unlock_xattr(inode, &no_expand);
-out_stop:
ext4_journal_stop(handle);
return error;
}
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index a25b4d2..42177a4 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2821,7 +2821,7 @@ bool ext4_empty_dir(struct inode *inode)
{
unsigned int offset;
struct buffer_head *bh;
- struct ext4_dir_entry_2 *de, *de1;
+ struct ext4_dir_entry_2 *de;
struct super_block *sb;
if (ext4_has_inline_data(inode)) {
@@ -2846,19 +2846,25 @@ bool ext4_empty_dir(struct inode *inode)
return true;
de = (struct ext4_dir_entry_2 *) bh->b_data;
- de1 = ext4_next_entry(de, sb->s_blocksize);
- if (le32_to_cpu(de->inode) != inode->i_ino ||
- le32_to_cpu(de1->inode) == 0 ||
- strcmp(".", de->name) || strcmp("..", de1->name)) {
- ext4_warning_inode(inode, "directory missing '.' and/or '..'");
+ if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size,
+ 0) ||
+ le32_to_cpu(de->inode) != inode->i_ino || strcmp(".", de->name)) {
+ ext4_warning_inode(inode, "directory missing '.'");
brelse(bh);
return true;
}
- offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) +
- ext4_rec_len_from_disk(de1->rec_len, sb->s_blocksize);
- de = ext4_next_entry(de1, sb->s_blocksize);
+ offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
+ de = ext4_next_entry(de, sb->s_blocksize);
+ if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size,
+ offset) ||
+ le32_to_cpu(de->inode) == 0 || strcmp("..", de->name)) {
+ ext4_warning_inode(inode, "directory missing '..'");
+ brelse(bh);
+ return true;
+ }
+ offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
while (offset < inode->i_size) {
- if ((void *) de >= (void *) (bh->b_data+sb->s_blocksize)) {
+ if (!(offset & (sb->s_blocksize - 1))) {
unsigned int lblock;
brelse(bh);
lblock = offset >> EXT4_BLOCK_SIZE_BITS(sb);
@@ -2869,12 +2875,11 @@ bool ext4_empty_dir(struct inode *inode)
}
if (IS_ERR(bh))
return true;
- de = (struct ext4_dir_entry_2 *) bh->b_data;
}
+ de = (struct ext4_dir_entry_2 *) (bh->b_data +
+ (offset & (sb->s_blocksize - 1)));
if (ext4_check_dir_entry(inode, NULL, de, bh,
bh->b_data, bh->b_size, offset)) {
- de = (struct ext4_dir_entry_2 *)(bh->b_data +
- sb->s_blocksize);
offset = (offset | (sb->s_blocksize - 1)) + 1;
continue;
}
@@ -2883,7 +2888,6 @@ bool ext4_empty_dir(struct inode *inode)
return false;
}
offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
- de = ext4_next_entry(de, sb->s_blocksize);
}
brelse(bh);
return true;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 0d5c1fb..681654e 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1286,6 +1286,18 @@ static bool ext4_dummy_context(struct inode *inode)
return DUMMY_ENCRYPTION_ENABLED(EXT4_SB(inode->i_sb));
}
+static bool ext4_has_stable_inodes(struct super_block *sb)
+{
+ return ext4_has_feature_stable_inodes(sb);
+}
+
+static void ext4_get_ino_and_lblk_bits(struct super_block *sb,
+ int *ino_bits_ret, int *lblk_bits_ret)
+{
+ *ino_bits_ret = 8 * sizeof(EXT4_SB(sb)->s_es->s_inodes_count);
+ *lblk_bits_ret = 8 * sizeof(ext4_lblk_t);
+}
+
static const struct fscrypt_operations ext4_cryptops = {
.key_prefix = "ext4:",
.get_context = ext4_get_context,
@@ -1293,6 +1305,8 @@ static const struct fscrypt_operations ext4_cryptops = {
.dummy_context = ext4_dummy_context,
.empty_dir = ext4_empty_dir,
.max_namelen = EXT4_NAME_LEN,
+ .has_stable_inodes = ext4_has_stable_inodes,
+ .get_ino_and_lblk_bits = ext4_get_ino_and_lblk_bits,
};
#endif
diff --git a/fs/inode.c b/fs/inode.c
index 68e6eeb..c7e1406 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -656,6 +656,7 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
struct inode *inode, *next;
LIST_HEAD(dispose);
+again:
spin_lock(&sb->s_inode_list_lock);
list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
spin_lock(&inode->i_lock);
@@ -678,6 +679,12 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
inode_lru_list_del(inode);
spin_unlock(&inode->i_lock);
list_add(&inode->i_lru, &dispose);
+ if (need_resched()) {
+ spin_unlock(&sb->s_inode_list_lock);
+ cond_resched();
+ dispose_list(&dispose);
+ goto again;
+ }
}
spin_unlock(&sb->s_inode_list_lock);
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 0567b17..7dd6133 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -726,7 +726,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
}
cond_resched();
- stats.run.rs_blocks_logged += bufs;
/* Force a new descriptor to be generated next
time round the loop. */
@@ -813,6 +812,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
if (unlikely(!buffer_uptodate(bh)))
err = -EIO;
jbd2_unfile_log_bh(bh);
+ stats.run.rs_blocks_logged++;
/*
* The list contains temporary buffer heads created by
@@ -858,6 +858,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
clear_buffer_jwrite(bh);
jbd2_unfile_log_bh(bh);
+ stats.run.rs_blocks_logged++;
__brelse(bh); /* One for getblk */
/* AKPM: bforget here */
}
@@ -879,6 +880,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
}
if (cbh)
err = journal_wait_on_commit_record(journal, cbh);
+ stats.run.rs_blocks_logged++;
if (jbd2_has_feature_async_commit(journal) &&
journal->j_flags & JBD2_BARRIER) {
blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL);
diff --git a/fs/locks.c b/fs/locks.c
index 665e3ce..1a40e27 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -2691,7 +2691,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
}
if (inode) {
/* userspace relies on this representation of dev_t */
- seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
+ seq_printf(f, "%d %02x:%02x:%lu ", fl_pid,
MAJOR(inode->i_sb->s_dev),
MINOR(inode->i_sb->s_dev), inode->i_ino);
} else {
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 87ee9cb..fc13236 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -3058,12 +3058,17 @@ static bool replay_matches_cache(struct svc_rqst *rqstp,
(bool)seq->cachethis)
return false;
/*
- * If there's an error than the reply can have fewer ops than
- * the call. But if we cached a reply with *more* ops than the
- * call you're sending us now, then this new call is clearly not
- * really a replay of the old one:
+ * If there's an error then the reply can have fewer ops than
+ * the call.
*/
- if (slot->sl_opcnt < argp->opcnt)
+ if (slot->sl_opcnt < argp->opcnt && !slot->sl_status)
+ return false;
+ /*
+ * But if we cached a reply with *more* ops than the call you're
+ * sending us now, then this new call is clearly not really a
+ * replay of the old one:
+ */
+ if (slot->sl_opcnt > argp->opcnt)
return false;
/* This is the only check explicitly called by spec: */
if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 506da82..a308f7a 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -90,6 +90,7 @@ void fsnotify_unmount_inodes(struct super_block *sb)
iput_inode = inode;
+ cond_resched();
spin_lock(&sb->s_inode_list_lock);
}
spin_unlock(&sb->s_inode_list_lock);
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index 917fadca..b73b787 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -335,8 +335,8 @@ int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh)
down_read(&OCFS2_I(inode)->ip_xattr_sem);
acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
up_read(&OCFS2_I(inode)->ip_xattr_sem);
- if (IS_ERR(acl) || !acl)
- return PTR_ERR(acl);
+ if (IS_ERR_OR_NULL(acl))
+ return PTR_ERR_OR_ZERO(acl);
ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
if (ret)
return ret;
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index ef9b389..43c0d41 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -582,6 +582,17 @@ static int notrace ramoops_pstore_write(struct pstore_record *record)
prz = cxt->dprzs[cxt->dump_write_cnt];
+ /*
+ * Since this is a new crash dump, we need to reset the buffer in
+ * case it still has an old dump present. Without this, the new dump
+ * will get appended, which would seriously confuse anything trying
+ * to check dump file contents. Specifically, ramoops_read_kmsg_hdr()
+ * expects to find a dump header in the beginning of buffer data, so
+ * we must to reset the buffer values, in order to ensure that the
+ * header will be written to the beginning of the buffer.
+ */
+ persistent_ram_zap(prz, 0);
+
/* Build header and append record contents. */
hlen = ramoops_write_kmsg_hdr(prz, record);
size = record->size;
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 3254c90..30f5da8 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -976,6 +976,7 @@ static int add_dquot_ref(struct super_block *sb, int type)
* later.
*/
old_inode = inode;
+ cond_resched();
spin_lock(&sb->s_inode_list_lock);
}
spin_unlock(&sb->s_inode_list_lock);
@@ -2849,68 +2850,73 @@ EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
static int do_proc_dqstats(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- unsigned int type = (int *)table->data - dqstats.stat;
+ unsigned int type = (unsigned long *)table->data - dqstats.stat;
+ s64 value = percpu_counter_sum(&dqstats.counter[type]);
+
+ /* Filter negative values for non-monotonic counters */
+ if (value < 0 && (type == DQST_ALLOC_DQUOTS ||
+ type == DQST_FREE_DQUOTS))
+ value = 0;
/* Update global table */
- dqstats.stat[type] =
- percpu_counter_sum_positive(&dqstats.counter[type]);
- return proc_dointvec(table, write, buffer, lenp, ppos);
+ dqstats.stat[type] = value;
+ return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
static struct ctl_table fs_dqstats_table[] = {
{
.procname = "lookups",
.data = &dqstats.stat[DQST_LOOKUPS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "drops",
.data = &dqstats.stat[DQST_DROPS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "reads",
.data = &dqstats.stat[DQST_READS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "writes",
.data = &dqstats.stat[DQST_WRITES],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "cache_hits",
.data = &dqstats.stat[DQST_CACHE_HITS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "allocated_dquots",
.data = &dqstats.stat[DQST_ALLOC_DQUOTS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "free_dquots",
.data = &dqstats.stat[DQST_FREE_DQUOTS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "syncs",
.data = &dqstats.stat[DQST_SYNCS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
diff --git a/fs/readdir.c b/fs/readdir.c
index d336db6..0c35766 100644
--- a/fs/readdir.c
+++ b/fs/readdir.c
@@ -66,6 +66,40 @@ int iterate_dir(struct file *file, struct dir_context *ctx)
EXPORT_SYMBOL(iterate_dir);
/*
+ * POSIX says that a dirent name cannot contain NULL or a '/'.
+ *
+ * It's not 100% clear what we should really do in this case.
+ * The filesystem is clearly corrupted, but returning a hard
+ * error means that you now don't see any of the other names
+ * either, so that isn't a perfect alternative.
+ *
+ * And if you return an error, what error do you use? Several
+ * filesystems seem to have decided on EUCLEAN being the error
+ * code for EFSCORRUPTED, and that may be the error to use. Or
+ * just EIO, which is perhaps more obvious to users.
+ *
+ * In order to see the other file names in the directory, the
+ * caller might want to make this a "soft" error: skip the
+ * entry, and return the error at the end instead.
+ *
+ * Note that this should likely do a "memchr(name, 0, len)"
+ * check too, since that would be filesystem corruption as
+ * well. However, that case can't actually confuse user space,
+ * which has to do a strlen() on the name anyway to find the
+ * filename length, and the above "soft error" worry means
+ * that it's probably better left alone until we have that
+ * issue clarified.
+ */
+static int verify_dirent_name(const char *name, int len)
+{
+ if (!len)
+ return -EIO;
+ if (memchr(name, '/', len))
+ return -EIO;
+ return 0;
+}
+
+/*
* Traditional linux readdir() handling..
*
* "count=1" is a special case, meaning that the buffer is one
@@ -174,6 +208,9 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
int reclen = ALIGN(offsetof(struct linux_dirent, d_name) + namlen + 2,
sizeof(long));
+ buf->error = verify_dirent_name(name, namlen);
+ if (unlikely(buf->error))
+ return buf->error;
buf->error = -EINVAL; /* only used if we fail.. */
if (reclen > buf->count)
return -EINVAL;
@@ -260,6 +297,9 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
int reclen = ALIGN(offsetof(struct linux_dirent64, d_name) + namlen + 1,
sizeof(u64));
+ buf->error = verify_dirent_name(name, namlen);
+ if (unlikely(buf->error))
+ return buf->error;
buf->error = -EINVAL; /* only used if we fail.. */
if (reclen > buf->count)
return -EINVAL;
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index c2d524b..839a914 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -1812,13 +1812,12 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
goto out;
features = uffdio_api.features;
- if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES)) {
- memset(&uffdio_api, 0, sizeof(uffdio_api));
- if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
- goto out;
- ret = -EINVAL;
- goto out;
- }
+ ret = -EINVAL;
+ if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
+ goto err_out;
+ ret = -EPERM;
+ if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
+ goto err_out;
/* report all available features and ioctls to userland */
uffdio_api.features = UFFD_API_FEATURES;
uffdio_api.ioctls = UFFD_API_IOCTLS;
@@ -1831,6 +1830,11 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
ret = 0;
out:
return ret;
+err_out:
+ memset(&uffdio_api, 0, sizeof(uffdio_api));
+ if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
+ ret = -EFAULT;
+ goto out;
}
static long userfaultfd_ioctl(struct file *file, unsigned cmd,
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 7b25a88..84245d2 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -5556,7 +5556,7 @@ __xfs_bunmapi(
* Make sure we don't touch multiple AGF headers out of order
* in a single transaction, as that could cause AB-BA deadlocks.
*/
- if (!wasdel) {
+ if (!wasdel && !isrt) {
agno = XFS_FSB_TO_AGNO(mp, del.br_startblock);
if (prev_agno != NULLAGNUMBER && prev_agno > agno)
break;
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index dc95a49..4e768e6 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1539,6 +1539,8 @@ xlog_alloc_log(
if (iclog->ic_bp)
xfs_buf_free(iclog->ic_bp);
kmem_free(iclog);
+ if (prev_iclog == log->l_iclog)
+ break;
}
spinlock_destroy(&log->l_icloglock);
xfs_buf_free(log->l_xbuf);
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index a72efa0..553b61c 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -313,7 +313,7 @@ struct drm_dp_resource_status_notify {
struct drm_dp_query_payload_ack_reply {
u8 port_number;
- u8 allocated_pbn;
+ u16 allocated_pbn;
};
struct drm_dp_sideband_msg_req_body {
diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h
index 1b0a17b..d560580 100644
--- a/include/linux/ahci_platform.h
+++ b/include/linux/ahci_platform.h
@@ -23,6 +23,8 @@ struct ahci_host_priv;
struct platform_device;
struct scsi_host_template;
+int ahci_platform_enable_phys(struct ahci_host_priv *hpriv);
+void ahci_platform_disable_phys(struct ahci_host_priv *hpriv);
int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv);
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index c0c0b99..995903c 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -18,6 +18,7 @@
#include <linux/can/error.h>
#include <linux/can/led.h>
#include <linux/can/netlink.h>
+#include <linux/can/skb.h>
#include <linux/netdevice.h>
/*
@@ -90,6 +91,36 @@ struct can_priv {
#define get_can_dlc(i) (min_t(__u8, (i), CAN_MAX_DLC))
#define get_canfd_dlc(i) (min_t(__u8, (i), CANFD_MAX_DLC))
+/* Check for outgoing skbs that have not been created by the CAN subsystem */
+static inline bool can_skb_headroom_valid(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */
+ if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv)))
+ return false;
+
+ /* af_packet does not apply CAN skb specific settings */
+ if (skb->ip_summed == CHECKSUM_NONE) {
+ /* init headroom */
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* preform proper loopback on capable devices */
+ if (dev->flags & IFF_ECHO)
+ skb->pkt_type = PACKET_LOOPBACK;
+ else
+ skb->pkt_type = PACKET_HOST;
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+ }
+
+ return true;
+}
+
/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
static inline bool can_dropped_invalid_skb(struct net_device *dev,
struct sk_buff *skb)
@@ -107,6 +138,9 @@ static inline bool can_dropped_invalid_skb(struct net_device *dev,
} else
goto inval_skb;
+ if (!can_skb_headroom_valid(dev, skb))
+ goto inval_skb;
+
return false;
inval_skb:
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 8319101..087cbe7 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -1362,8 +1362,11 @@ static inline int dma_get_slave_caps(struct dma_chan *chan,
static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
{
struct dma_slave_caps caps;
+ int ret;
- dma_get_slave_caps(tx->chan, &caps);
+ ret = dma_get_slave_caps(tx->chan, &caps);
+ if (ret)
+ return ret;
if (caps.descriptor_reuse) {
tx->flags |= DMA_CTRL_REUSE;
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index ad1d6e5..c6c801c 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -20,7 +20,6 @@
#define FS_CRYPTO_BLOCK_SIZE 16
-struct fscrypt_ctx;
struct fscrypt_info;
struct fscrypt_str {
@@ -62,18 +61,9 @@ struct fscrypt_operations {
bool (*dummy_context)(struct inode *);
bool (*empty_dir)(struct inode *);
unsigned int max_namelen;
-};
-
-/* Decryption work */
-struct fscrypt_ctx {
- union {
- struct {
- struct bio *bio;
- struct work_struct work;
- };
- struct list_head free_list; /* Free list */
- };
- u8 flags; /* Flags */
+ bool (*has_stable_inodes)(struct super_block *sb);
+ void (*get_ino_and_lblk_bits)(struct super_block *sb,
+ int *ino_bits_ret, int *lblk_bits_ret);
};
static inline bool fscrypt_has_encryption_key(const struct inode *inode)
@@ -102,8 +92,6 @@ static inline void fscrypt_handle_d_move(struct dentry *dentry)
/* crypto.c */
extern void fscrypt_enqueue_decrypt_work(struct work_struct *);
-extern struct fscrypt_ctx *fscrypt_get_ctx(gfp_t);
-extern void fscrypt_release_ctx(struct fscrypt_ctx *);
extern struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
unsigned int len,
@@ -246,8 +234,6 @@ static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
/* bio.c */
extern void fscrypt_decrypt_bio(struct bio *);
-extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
- struct bio *bio);
extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
unsigned int);
@@ -292,16 +278,6 @@ static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
{
}
-static inline struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
-static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
-{
- return;
-}
-
static inline struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
unsigned int len,
unsigned int offs,
@@ -493,11 +469,6 @@ static inline void fscrypt_decrypt_bio(struct bio *bio)
{
}
-static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
- struct bio *bio)
-{
-}
-
static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len)
{
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index ceb4ac23..822256f 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -415,12 +415,18 @@ extern u64 hrtimer_next_event_without(const struct hrtimer *exclude);
extern bool hrtimer_active(const struct hrtimer *timer);
-/*
- * Helper function to check, whether the timer is on one of the queues
+/**
+ * hrtimer_is_queued = check, whether the timer is on one of the queues
+ * @timer: Timer to check
+ *
+ * Returns: True if the timer is queued, false otherwise
+ *
+ * The function can be used lockless, but it gives only a current snapshot.
*/
-static inline int hrtimer_is_queued(struct hrtimer *timer)
+static inline bool hrtimer_is_queued(struct hrtimer *timer)
{
- return timer->state & HRTIMER_STATE_ENQUEUED;
+ /* The READ_ONCE pairs with the update functions of timer->state */
+ return !!(READ_ONCE(timer->state) & HRTIMER_STATE_ENQUEUED);
}
/*
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index 548fd53..d433f5e 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -28,6 +28,14 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
return (struct ethhdr *)skb_mac_header(skb);
}
+/* Prefer this version in TX path, instead of
+ * skb_reset_mac_header() + eth_hdr()
+ */
+static inline struct ethhdr *skb_eth_hdr(const struct sk_buff *skb)
+{
+ return (struct ethhdr *)skb->data;
+}
+
static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb)
{
return (struct ethhdr *)skb_inner_mac_header(skb);
diff --git a/include/linux/kcov.h b/include/linux/kcov.h
index f5d8ce4..a10e847 100644
--- a/include/linux/kcov.h
+++ b/include/linux/kcov.h
@@ -8,23 +8,64 @@ struct task_struct;
#ifdef CONFIG_KCOV
-void kcov_task_init(struct task_struct *t);
-void kcov_task_exit(struct task_struct *t);
-
enum kcov_mode {
/* Coverage collection is not enabled yet. */
KCOV_MODE_DISABLED = 0,
+ /* KCOV was initialized, but tracing mode hasn't been chosen yet. */
+ KCOV_MODE_INIT = 1,
/*
* Tracing coverage collection mode.
* Covered PCs are collected in a per-task buffer.
*/
- KCOV_MODE_TRACE = 1,
+ KCOV_MODE_TRACE_PC = 2,
+ /* Collecting comparison operands mode. */
+ KCOV_MODE_TRACE_CMP = 3,
};
+#define KCOV_IN_CTXSW (1 << 30)
+
+void kcov_task_init(struct task_struct *t);
+void kcov_task_exit(struct task_struct *t);
+
+#define kcov_prepare_switch(t) \
+do { \
+ (t)->kcov_mode |= KCOV_IN_CTXSW; \
+} while (0)
+
+#define kcov_finish_switch(t) \
+do { \
+ (t)->kcov_mode &= ~KCOV_IN_CTXSW; \
+} while (0)
+
+/* See Documentation/dev-tools/kcov.rst for usage details. */
+void kcov_remote_start(u64 handle);
+void kcov_remote_stop(void);
+u64 kcov_common_handle(void);
+
+static inline void kcov_remote_start_common(u64 id)
+{
+ kcov_remote_start(kcov_remote_handle(KCOV_SUBSYSTEM_COMMON, id));
+}
+
+static inline void kcov_remote_start_usb(u64 id)
+{
+ kcov_remote_start(kcov_remote_handle(KCOV_SUBSYSTEM_USB, id));
+}
+
#else
static inline void kcov_task_init(struct task_struct *t) {}
static inline void kcov_task_exit(struct task_struct *t) {}
+static inline void kcov_prepare_switch(struct task_struct *t) {}
+static inline void kcov_finish_switch(struct task_struct *t) {}
+static inline void kcov_remote_start(u64 handle) {}
+static inline void kcov_remote_stop(void) {}
+static inline u64 kcov_common_handle(void)
+{
+ return 0;
+}
+static inline void kcov_remote_start_common(u64 id) {}
+static inline void kcov_remote_start_usb(u64 id) {}
#endif /* CONFIG_KCOV */
#endif /* _LINUX_KCOV_H */
diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h
index 1aa707a..8b54c59 100644
--- a/include/linux/libfdt_env.h
+++ b/include/linux/libfdt_env.h
@@ -7,6 +7,9 @@
#include <asm/byteorder.h>
+#define INT32_MAX S32_MAX
+#define UINT32_MAX U32_MAX
+
typedef __be16 fdt16_t;
typedef __be32 fdt32_t;
typedef __be64 fdt64_t;
diff --git a/include/linux/log2.h b/include/linux/log2.h
index c373295f..df50139a 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -67,16 +67,13 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
}
/**
- * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value
- * @n - parameter
+ * const_ilog2 - log base 2 of 32-bit or a 64-bit constant unsigned value
+ * @n: parameter
*
- * constant-capable log of base 2 calculation
- * - this can be used to initialise global variables from constant data, hence
- * the massive ternary operator construction
- *
- * selects the appropriately-sized optimised version depending on sizeof(n)
+ * Use this where sparse expects a true constant expression, e.g. for array
+ * indices.
*/
-#define ilog2(n) \
+#define const_ilog2(n) \
( \
__builtin_constant_p(n) ? ( \
(n) < 2 ? 0 : \
@@ -142,10 +139,26 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
(n) & (1ULL << 4) ? 4 : \
(n) & (1ULL << 3) ? 3 : \
(n) & (1ULL << 2) ? 2 : \
- 1 ) : \
- (sizeof(n) <= 4) ? \
- __ilog2_u32(n) : \
- __ilog2_u64(n) \
+ 1) : \
+ -1)
+
+/**
+ * ilog2 - log base 2 of 32-bit or a 64-bit unsigned value
+ * @n: parameter
+ *
+ * constant-capable log of base 2 calculation
+ * - this can be used to initialise global variables from constant data, hence
+ * the massive ternary operator construction
+ *
+ * selects the appropriately-sized optimised version depending on sizeof(n)
+ */
+#define ilog2(n) \
+( \
+ __builtin_constant_p(n) ? \
+ const_ilog2(n) : \
+ (sizeof(n) <= 4) ? \
+ __ilog2_u32(n) : \
+ __ilog2_u64(n) \
)
/**
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 4de703d..5e1e50b 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -56,6 +56,7 @@
#define UHID_MINOR 239
#define USERIO_MINOR 240
#define VHOST_VSOCK_MINOR 241
+#define RFKILL_MINOR 242
#define MISC_DYNAMIC_MINOR 255
struct device;
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 84362c3..d4a8c41 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -537,9 +537,9 @@ struct platform_device_id {
#define MDIO_NAME_SIZE 32
#define MDIO_MODULE_PREFIX "mdio:"
-#define MDIO_ID_FMT "%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d"
+#define MDIO_ID_FMT "%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u"
#define MDIO_ID_ARGS(_id) \
- (_id)>>31, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1, \
+ ((_id)>>31) & 1, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1, \
((_id)>>27) & 1, ((_id)>>26) & 1, ((_id)>>25) & 1, ((_id)>>24) & 1, \
((_id)>>23) & 1, ((_id)>>22) & 1, ((_id)>>21) & 1, ((_id)>>20) & 1, \
((_id)>>19) & 1, ((_id)>>18) & 1, ((_id)>>17) & 1, ((_id)>>16) & 1, \
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index a726f96..e9c3b98 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -279,6 +279,8 @@ struct nvme_fc_remote_port {
*
* Host/Initiator Transport Entrypoints/Parameters:
*
+ * @module: The LLDD module using the interface
+ *
* @localport_delete: The LLDD initiates deletion of a localport via
* nvme_fc_deregister_localport(). However, the teardown is
* asynchronous. This routine is called upon the completion of the
@@ -392,6 +394,8 @@ struct nvme_fc_remote_port {
* Value is Mandatory. Allowed to be zero.
*/
struct nvme_fc_port_template {
+ struct module *module;
+
/* initiator-based functions */
void (*localport_delete)(struct nvme_fc_local_port *);
void (*remoteport_delete)(struct nvme_fc_remote_port *);
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
index 38d8225..3097b08 100644
--- a/include/linux/posix-clock.h
+++ b/include/linux/posix-clock.h
@@ -82,29 +82,32 @@ struct posix_clock_operations {
*
* @ops: Functional interface to the clock
* @cdev: Character device instance for this clock
- * @kref: Reference count.
+ * @dev: Pointer to the clock's device.
* @rwsem: Protects the 'zombie' field from concurrent access.
* @zombie: If 'zombie' is true, then the hardware has disappeared.
- * @release: A function to free the structure when the reference count reaches
- * zero. May be NULL if structure is statically allocated.
*
* Drivers should embed their struct posix_clock within a private
* structure, obtaining a reference to it during callbacks using
* container_of().
+ *
+ * Drivers should supply an initialized but not exposed struct device
+ * to posix_clock_register(). It is used to manage lifetime of the
+ * driver's private structure. It's 'release' field should be set to
+ * a release function for this private structure.
*/
struct posix_clock {
struct posix_clock_operations ops;
struct cdev cdev;
- struct kref kref;
+ struct device *dev;
struct rw_semaphore rwsem;
bool zombie;
- void (*release)(struct posix_clock *clk);
};
/**
* posix_clock_register() - register a new clock
- * @clk: Pointer to the clock. Caller must provide 'ops' and 'release'
- * @devid: Allocated device id
+ * @clk: Pointer to the clock. Caller must provide 'ops' field
+ * @dev: Pointer to the initialized device. Caller must provide
+ * 'release' field
*
* A clock driver calls this function to register itself with the
* clock device subsystem. If 'clk' points to dynamically allocated
@@ -113,7 +116,7 @@ struct posix_clock {
*
* Returns zero on success, non-zero otherwise.
*/
-int posix_clock_register(struct posix_clock *clk, dev_t devid);
+int posix_clock_register(struct posix_clock *clk, struct device *dev);
/**
* posix_clock_unregister() - unregister a clock
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 5ac9de4..aa9a42e 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -263,7 +263,7 @@ enum {
};
struct dqstats {
- int stat[_DQST_DQSTAT_LAST];
+ unsigned long stat[_DQST_DQSTAT_LAST];
struct percpu_counter counter[_DQST_DQSTAT_LAST];
};
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index e4b257f..a10da54 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -101,6 +101,43 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
}
/**
+ * hlist_nulls_add_tail_rcu
+ * @n: the element to add to the hash list.
+ * @h: the list to add to.
+ *
+ * Description:
+ * Adds the specified element to the specified hlist_nulls,
+ * while permitting racing traversals.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
+ * or hlist_nulls_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
+ * problems on Alpha CPUs. Regardless of the type of CPU, the
+ * list-traversal primitive must be guarded by rcu_read_lock().
+ */
+static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
+ struct hlist_nulls_head *h)
+{
+ struct hlist_nulls_node *i, *last = NULL;
+
+ /* Note: write side code, so rcu accessors are not needed. */
+ for (i = h->first; !is_a_nulls(i); i = i->next)
+ last = i;
+
+ if (last) {
+ n->next = last->next;
+ n->pprev = &last->next;
+ rcu_assign_pointer(hlist_next_rcu(last), n);
+ } else {
+ hlist_nulls_add_head_rcu(n, h);
+ }
+}
+
+/**
* hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_nulls_node to use as a loop cursor.
diff --git a/include/linux/regulator/ab8500.h b/include/linux/regulator/ab8500.h
index d8ecefa..260c4aa 100644
--- a/include/linux/regulator/ab8500.h
+++ b/include/linux/regulator/ab8500.h
@@ -38,7 +38,6 @@ enum ab8505_regulator_id {
AB8505_LDO_AUX6,
AB8505_LDO_INTCORE,
AB8505_LDO_ADC,
- AB8505_LDO_USB,
AB8505_LDO_AUDIO,
AB8505_LDO_ANAMIC1,
AB8505_LDO_ANAMIC2,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0448efd..9e209dd 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1304,8 +1304,10 @@ struct task_struct {
#endif /* CONFIG_TRACING */
#ifdef CONFIG_KCOV
+ /* See kernel/kcov.c for more details. */
+
/* Coverage collection mode enabled for this task (0 if disabled): */
- enum kcov_mode kcov_mode;
+ unsigned int kcov_mode;
/* Size of the kcov_area: */
unsigned int kcov_size;
@@ -1315,6 +1317,12 @@ struct task_struct {
/* KCOV descriptor wired with this task or NULL: */
struct kcov *kcov;
+
+ /* KCOV common handle for remote coverage collection: */
+ u64 kcov_handle;
+
+ /* KCOV sequence number: */
+ int kcov_sequence;
#endif
#ifdef CONFIG_MEMCG
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 09c6e28..ab437dd 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -140,6 +140,20 @@ void *__seq_open_private(struct file *, const struct seq_operations *, int);
int seq_open_private(struct file *, const struct seq_operations *, int);
int seq_release_private(struct inode *, struct file *);
+#define DEFINE_SHOW_ATTRIBUTE(__name) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __name ## _show, inode->i_private); \
+} \
+ \
+static const struct file_operations __name ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
{
#ifdef CONFIG_USER_NS
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index ef55fd7..9578eb2 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1661,7 +1661,7 @@ static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
*/
static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
{
- struct sk_buff *skb = list_->prev;
+ struct sk_buff *skb = READ_ONCE(list_->prev);
if (skb == (struct sk_buff *)list_)
skb = NULL;
@@ -1729,7 +1729,9 @@ static inline void __skb_insert(struct sk_buff *newsk,
struct sk_buff *prev, struct sk_buff *next,
struct sk_buff_head *list)
{
- /* see skb_queue_empty_lockless() for the opposite READ_ONCE() */
+ /* See skb_queue_empty_lockless() and skb_peek_tail()
+ * for the opposite READ_ONCE()
+ */
WRITE_ONCE(newsk->next, next);
WRITE_ONCE(newsk->prev, prev);
WRITE_ONCE(next->prev, newsk);
diff --git a/include/linux/stat.h b/include/linux/stat.h
index 22484e4..0729584 100644
--- a/include/linux/stat.h
+++ b/include/linux/stat.h
@@ -33,7 +33,8 @@ struct kstat {
STATX_ATTR_IMMUTABLE | \
STATX_ATTR_APPEND | \
STATX_ATTR_NODUMP | \
- STATX_ATTR_ENCRYPTED \
+ STATX_ATTR_ENCRYPTED | \
+ STATX_ATTR_VERITY \
)/* Attrs corresponding to FS_*_FL flags */
u64 ino;
dev_t dev;
diff --git a/include/net/dst.h b/include/net/dst.h
index cbd5f09..8c714eb 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -110,7 +110,7 @@ struct dst_entry {
struct dst_metrics {
u32 metrics[RTAX_MAX];
refcount_t refcnt;
-};
+} __aligned(4); /* Low pointer bits contain DST_METRICS_FLAGS */
extern const struct dst_metrics dst_default_metrics;
u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
@@ -542,7 +542,16 @@ static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu)
struct dst_entry *dst = skb_dst(skb);
if (dst && dst->ops->update_pmtu)
- dst->ops->update_pmtu(dst, NULL, skb, mtu);
+ dst->ops->update_pmtu(dst, NULL, skb, mtu, true);
+}
+
+/* update dst pmtu but not do neighbor confirm */
+static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
+{
+ struct dst_entry *dst = skb_dst(skb);
+
+ if (dst && dst->ops->update_pmtu)
+ dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
}
#endif /* _NET_DST_H */
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 5ec645f..443863c 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -27,7 +27,8 @@ struct dst_ops {
struct dst_entry * (*negative_advice)(struct dst_entry *);
void (*link_failure)(struct sk_buff *);
void (*update_pmtu)(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu);
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh);
void (*redirect)(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb);
int (*local_out)(struct net *net, struct sock *sk, struct sk_buff *skb);
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 2dbbbff..573ab11 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -106,12 +106,18 @@ struct inet_bind_hashbucket {
struct hlist_head chain;
};
-/*
- * Sockets can be hashed in established or listening table
+/* Sockets can be hashed in established or listening table.
+ * We must use different 'nulls' end-of-chain value for all hash buckets :
+ * A socket might transition from ESTABLISH to LISTEN state without
+ * RCU grace period. A lookup in ehash table needs to handle this case.
*/
+#define LISTENING_NULLS_BASE (1U << 29)
struct inet_listen_hashbucket {
spinlock_t lock;
- struct hlist_head head;
+ union {
+ struct hlist_head head;
+ struct hlist_nulls_head nulls_head;
+ };
};
/* This is for listening sockets, thus all sockets which possess wildcards. */
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 92ff588..3652051 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -459,7 +459,7 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb
do {
seq = read_seqbegin(&hh->hh_lock);
- hh_len = hh->hh_len;
+ hh_len = READ_ONCE(hh->hh_len);
if (likely(hh_len <= HH_DATA_MOD)) {
hh_alen = HH_DATA_MOD;
diff --git a/include/net/sock.h b/include/net/sock.h
index 3717cd8..d2454cf 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -694,6 +694,11 @@ static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_h
hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
}
+static inline void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list)
+{
+ hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
+}
+
static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
sock_hold(sk);
diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
index df156f1..f0a01a5 100644
--- a/include/scsi/iscsi_proto.h
+++ b/include/scsi/iscsi_proto.h
@@ -638,6 +638,7 @@ struct iscsi_reject {
#define ISCSI_REASON_BOOKMARK_INVALID 9
#define ISCSI_REASON_BOOKMARK_NO_RESOURCES 10
#define ISCSI_REASON_NEGOTIATION_RESET 11
+#define ISCSI_REASON_WAITING_FOR_LOGOUT 12
/* Max. number of Key=Value pairs in a text message */
#define MAX_KEY_VALUE_PAIRS 8192
diff --git a/include/trace/events/wbt.h b/include/trace/events/wbt.h
index b048694..37342a1 100644
--- a/include/trace/events/wbt.h
+++ b/include/trace/events/wbt.h
@@ -33,7 +33,8 @@ TRACE_EVENT(wbt_stat,
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
+ strlcpy(__entry->name, dev_name(bdi->dev),
+ ARRAY_SIZE(__entry->name));
__entry->rmean = stat[0].mean;
__entry->rmin = stat[0].min;
__entry->rmax = stat[0].max;
@@ -67,7 +68,8 @@ TRACE_EVENT(wbt_lat,
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
+ strlcpy(__entry->name, dev_name(bdi->dev),
+ ARRAY_SIZE(__entry->name));
__entry->lat = div_u64(lat, 1000);
),
@@ -103,7 +105,8 @@ TRACE_EVENT(wbt_step,
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
+ strlcpy(__entry->name, dev_name(bdi->dev),
+ ARRAY_SIZE(__entry->name));
__entry->msg = msg;
__entry->step = step;
__entry->window = div_u64(window, 1000);
@@ -138,7 +141,8 @@ TRACE_EVENT(wbt_timer,
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
+ strlcpy(__entry->name, dev_name(bdi->dev),
+ ARRAY_SIZE(__entry->name));
__entry->status = status;
__entry->step = step;
__entry->inflight = inflight;
diff --git a/include/uapi/linux/android/binderfs.h b/include/uapi/linux/android/binderfs.h
new file mode 100644
index 0000000..65b2efd
--- /dev/null
+++ b/include/uapi/linux/android/binderfs.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2018 Canonical Ltd.
+ *
+ */
+
+#ifndef _UAPI_LINUX_BINDER_CTL_H
+#define _UAPI_LINUX_BINDER_CTL_H
+
+#include <linux/android/binder.h>
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define BINDERFS_MAX_NAME 255
+
+/**
+ * struct binderfs_device - retrieve information about a new binder device
+ * @name: the name to use for the new binderfs binder device
+ * @major: major number allocated for binderfs binder devices
+ * @minor: minor number allocated for the new binderfs binder device
+ *
+ */
+struct binderfs_device {
+ char name[BINDERFS_MAX_NAME + 1];
+ __u8 major;
+ __u8 minor;
+};
+
+/**
+ * Allocate a new binder device.
+ */
+#define BINDER_CTL_ADD _IOWR('b', 1, struct binderfs_device)
+
+#endif /* _UAPI_LINUX_BINDER_CTL_H */
+
diff --git a/include/uapi/linux/cec-funcs.h b/include/uapi/linux/cec-funcs.h
index 28e8a2a..2a114f7 100644
--- a/include/uapi/linux/cec-funcs.h
+++ b/include/uapi/linux/cec-funcs.h
@@ -952,7 +952,8 @@ static inline void cec_msg_give_deck_status(struct cec_msg *msg,
msg->len = 3;
msg->msg[1] = CEC_MSG_GIVE_DECK_STATUS;
msg->msg[2] = status_req;
- msg->reply = reply ? CEC_MSG_DECK_STATUS : 0;
+ msg->reply = (reply && status_req != CEC_OP_STATUS_REQ_OFF) ?
+ CEC_MSG_DECK_STATUS : 0;
}
static inline void cec_ops_give_deck_status(const struct cec_msg *msg,
@@ -1056,7 +1057,8 @@ static inline void cec_msg_give_tuner_device_status(struct cec_msg *msg,
msg->len = 3;
msg->msg[1] = CEC_MSG_GIVE_TUNER_DEVICE_STATUS;
msg->msg[2] = status_req;
- msg->reply = reply ? CEC_MSG_TUNER_DEVICE_STATUS : 0;
+ msg->reply = (reply && status_req != CEC_OP_STATUS_REQ_OFF) ?
+ CEC_MSG_TUNER_DEVICE_STATUS : 0;
}
static inline void cec_ops_give_tuner_device_status(const struct cec_msg *msg,
diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h
index ec740f8..b41296b 100644
--- a/include/uapi/linux/fscrypt.h
+++ b/include/uapi/linux/fscrypt.h
@@ -17,7 +17,8 @@
#define FSCRYPT_POLICY_FLAGS_PAD_32 0x03
#define FSCRYPT_POLICY_FLAGS_PAD_MASK 0x03
#define FSCRYPT_POLICY_FLAG_DIRECT_KEY 0x04
-#define FSCRYPT_POLICY_FLAGS_VALID 0x07
+#define FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 0x08
+#define FSCRYPT_POLICY_FLAGS_VALID 0x0F
/* Encryption algorithms */
#define FSCRYPT_MODE_AES_256_XTS 1
diff --git a/include/uapi/linux/kcov.h b/include/uapi/linux/kcov.h
index 33eabbb..1d0350e 100644
--- a/include/uapi/linux/kcov.h
+++ b/include/uapi/linux/kcov.h
@@ -4,8 +4,60 @@
#include <linux/types.h>
+/*
+ * Argument for KCOV_REMOTE_ENABLE ioctl, see Documentation/dev-tools/kcov.rst
+ * and the comment before kcov_remote_start() for usage details.
+ */
+struct kcov_remote_arg {
+ __u32 trace_mode; /* KCOV_TRACE_PC or KCOV_TRACE_CMP */
+ __u32 area_size; /* Length of coverage buffer in words */
+ __u32 num_handles; /* Size of handles array */
+ __aligned_u64 common_handle;
+ __aligned_u64 handles[0];
+};
+
+#define KCOV_REMOTE_MAX_HANDLES 0x100
+
#define KCOV_INIT_TRACE _IOR('c', 1, unsigned long)
#define KCOV_ENABLE _IO('c', 100)
#define KCOV_DISABLE _IO('c', 101)
+#define KCOV_REMOTE_ENABLE _IOW('c', 102, struct kcov_remote_arg)
+
+enum {
+ /*
+ * Tracing coverage collection mode.
+ * Covered PCs are collected in a per-task buffer.
+ * In new KCOV version the mode is chosen by calling
+ * ioctl(fd, KCOV_ENABLE, mode). In older versions the mode argument
+ * was supposed to be 0 in such a call. So, for reasons of backward
+ * compatibility, we have chosen the value KCOV_TRACE_PC to be 0.
+ */
+ KCOV_TRACE_PC = 0,
+ /* Collecting comparison operands mode. */
+ KCOV_TRACE_CMP = 1,
+};
+
+/*
+ * The format for the types of collected comparisons.
+ *
+ * Bit 0 shows whether one of the arguments is a compile-time constant.
+ * Bits 1 & 2 contain log2 of the argument size, up to 8 bytes.
+ */
+#define KCOV_CMP_CONST (1 << 0)
+#define KCOV_CMP_SIZE(n) ((n) << 1)
+#define KCOV_CMP_MASK KCOV_CMP_SIZE(3)
+
+#define KCOV_SUBSYSTEM_COMMON (0x00ull << 56)
+#define KCOV_SUBSYSTEM_USB (0x01ull << 56)
+
+#define KCOV_SUBSYSTEM_MASK (0xffull << 56)
+#define KCOV_INSTANCE_MASK (0xffffffffull)
+
+static inline __u64 kcov_remote_handle(__u64 subsys, __u64 inst)
+{
+ if (subsys & ~KCOV_SUBSYSTEM_MASK || inst & ~KCOV_INSTANCE_MASK)
+ return 0;
+ return subsys | inst;
+}
#endif /* _LINUX_KCOV_IOCTLS_H */
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index d94c457..de394b1 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -73,6 +73,7 @@
#define DAXFS_MAGIC 0x64646178
#define BINFMTFS_MAGIC 0x42494e4d
#define DEVPTS_SUPER_MAGIC 0x1cd1
+#define BINDERFS_SUPER_MAGIC 0x6c6f6f70
#define FUTEXFS_SUPER_MAGIC 0xBAD1DEA
#define PIPEFS_MAGIC 0x50495045
#define PROC_SUPER_MAGIC 0x9fa0
diff --git a/include/uapi/linux/netfilter/xt_sctp.h b/include/uapi/linux/netfilter/xt_sctp.h
index 4bc6d1a..b4d804a 100644
--- a/include/uapi/linux/netfilter/xt_sctp.h
+++ b/include/uapi/linux/netfilter/xt_sctp.h
@@ -41,19 +41,19 @@ struct xt_sctp_info {
#define SCTP_CHUNKMAP_SET(chunkmap, type) \
do { \
(chunkmap)[type / bytes(__u32)] |= \
- 1 << (type % bytes(__u32)); \
+ 1u << (type % bytes(__u32)); \
} while (0)
#define SCTP_CHUNKMAP_CLEAR(chunkmap, type) \
do { \
(chunkmap)[type / bytes(__u32)] &= \
- ~(1 << (type % bytes(__u32))); \
+ ~(1u << (type % bytes(__u32))); \
} while (0)
#define SCTP_CHUNKMAP_IS_SET(chunkmap, type) \
({ \
((chunkmap)[type / bytes (__u32)] & \
- (1 << (type % bytes (__u32)))) ? 1: 0; \
+ (1u << (type % bytes (__u32)))) ? 1: 0; \
})
#define SCTP_CHUNKMAP_RESET(chunkmap) \
diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h
index 7b35e98..ad80a5c 100644
--- a/include/uapi/linux/stat.h
+++ b/include/uapi/linux/stat.h
@@ -167,8 +167,8 @@ struct statx {
#define STATX_ATTR_APPEND 0x00000020 /* [I] File is append-only */
#define STATX_ATTR_NODUMP 0x00000040 /* [I] File is not to be dumped */
#define STATX_ATTR_ENCRYPTED 0x00000800 /* [I] File requires key to decrypt in fs */
-
#define STATX_ATTR_AUTOMOUNT 0x00001000 /* Dir: Automount trigger */
+#define STATX_ATTR_VERITY 0x00100000 /* [I] Verity protected file */
#endif /* _UAPI_LINUX_STAT_H */
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index a4875ff..615a2e4 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1251,6 +1251,30 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
return check_generic_ptr_alignment(reg, pointer_desc, off, size, strict);
}
+static int check_ctx_reg(struct bpf_verifier_env *env,
+ const struct bpf_reg_state *reg, int regno)
+{
+ /* Access to ctx or passing it to a helper is only allowed in
+ * its original, unmodified form.
+ */
+
+ if (reg->off) {
+ verbose("dereference of modified ctx ptr R%d off=%d disallowed\n",
+ regno, reg->off);
+ return -EACCES;
+ }
+
+ if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
+ char tn_buf[48];
+
+ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
+ verbose("variable ctx access var_off=%s disallowed\n", tn_buf);
+ return -EACCES;
+ }
+
+ return 0;
+}
+
/* truncate register to smaller size (in bytes)
* must be called with size < BPF_REG_SIZE
*/
@@ -1320,22 +1344,10 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
verbose("R%d leaks addr into ctx\n", value_regno);
return -EACCES;
}
- /* ctx accesses must be at a fixed offset, so that we can
- * determine what type of data were returned.
- */
- if (reg->off) {
- verbose("dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n",
- regno, reg->off, off - reg->off);
- return -EACCES;
- }
- if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
- char tn_buf[48];
+ err = check_ctx_reg(env, reg, regno);
+ if (err < 0)
+ return err;
- tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
- verbose("variable ctx access var_off=%s off=%d size=%d",
- tn_buf, off, size);
- return -EACCES;
- }
err = check_ctx_access(env, insn_idx, off, size, t, ®_type);
if (!err && t == BPF_READ && value_regno >= 0) {
/* ctx access returns either a scalar, or a
@@ -1573,6 +1585,9 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
expected_type = PTR_TO_CTX;
if (type != expected_type)
goto err_type;
+ err = check_ctx_reg(env, reg, regno);
+ if (err < 0)
+ return err;
} else if (arg_type == ARG_PTR_TO_MEM ||
arg_type == ARG_PTR_TO_UNINIT_MEM) {
expected_type = PTR_TO_STACK;
@@ -3442,6 +3457,7 @@ static bool may_access_skb(enum bpf_prog_type type)
static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
struct bpf_reg_state *regs = cur_regs(env);
+ static const int ctx_reg = BPF_REG_6;
u8 mode = BPF_MODE(insn->code);
int i, err;
@@ -3458,11 +3474,11 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
}
/* check whether implicit source operand (register R6) is readable */
- err = check_reg_arg(env, BPF_REG_6, SRC_OP);
+ err = check_reg_arg(env, ctx_reg, SRC_OP);
if (err)
return err;
- if (regs[BPF_REG_6].type != PTR_TO_CTX) {
+ if (regs[ctx_reg].type != PTR_TO_CTX) {
verbose("at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
return -EINVAL;
}
@@ -3474,6 +3490,10 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
return err;
}
+ err = check_ctx_reg(env, ®s[ctx_reg], ctx_reg);
+ if (err < 0)
+ return err;
+
/* reset caller saved regs to unreadable */
for (i = 0; i < CALLER_SAVED_REGS; i++) {
mark_reg_not_init(regs, caller_saved[i]);
diff --git a/kernel/cred.c b/kernel/cred.c
index 5ab1f7e..a9f0f8b 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -220,7 +220,7 @@ struct cred *cred_alloc_blank(void)
new->magic = CRED_MAGIC;
#endif
- if (security_cred_alloc_blank(new, GFP_KERNEL) < 0)
+ if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0)
goto error;
return new;
@@ -279,7 +279,7 @@ struct cred *prepare_creds(void)
new->security = NULL;
#endif
- if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
+ if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
goto error;
validate_creds(new);
return new;
@@ -654,7 +654,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
#ifdef CONFIG_SECURITY
new->security = NULL;
#endif
- if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
+ if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
goto error;
put_cred(old);
diff --git a/kernel/exit.c b/kernel/exit.c
index acf5c6f..5a25e76 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -581,10 +581,6 @@ static struct task_struct *find_child_reaper(struct task_struct *father,
}
write_unlock_irq(&tasklist_lock);
- if (unlikely(pid_ns == &init_pid_ns)) {
- panic("Attempted to kill init! exitcode=0x%08x\n",
- father->signal->group_exit_code ?: father->exit_code);
- }
list_for_each_entry_safe(p, n, dead, ptrace_entry) {
list_del_init(&p->ptrace_entry);
@@ -838,6 +834,14 @@ void __noreturn do_exit(long code)
acct_update_integrals(tsk);
group_dead = atomic_dec_and_test(&tsk->signal->live);
if (group_dead) {
+ /*
+ * If the last thread of global init has exited, panic
+ * immediately to get a useable coredump.
+ */
+ if (unlikely(is_global_init(tsk)))
+ panic("Attempted to kill init! exitcode=0x%08x\n",
+ tsk->signal->group_exit_code ?: (int)code);
+
#ifdef CONFIG_POSIX_TIMERS
hrtimer_cancel(&tsk->signal->real_timer);
exit_itimers(tsk->signal);
diff --git a/kernel/kcov.c b/kernel/kcov.c
index f1e060b..2f0048e 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include <linux/file.h>
#include <linux/fs.h>
+#include <linux/hashtable.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/preempt.h>
@@ -20,34 +21,157 @@
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/kcov.h>
+#include <linux/refcount.h>
+#include <linux/log2.h>
#include <asm/setup.h>
+#define kcov_debug(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__)
+
+/* Number of 64-bit words written per one comparison: */
+#define KCOV_WORDS_PER_CMP 4
+
/*
* kcov descriptor (one per opened debugfs file).
* State transitions of the descriptor:
* - initial state after open()
* - then there must be a single ioctl(KCOV_INIT_TRACE) call
* - then, mmap() call (several calls are allowed but not useful)
- * - then, repeated enable/disable for a task (only one task a time allowed)
+ * - then, ioctl(KCOV_ENABLE, arg), where arg is
+ * KCOV_TRACE_PC - to trace only the PCs
+ * or
+ * KCOV_TRACE_CMP - to trace only the comparison operands
+ * - then, ioctl(KCOV_DISABLE) to disable the task.
+ * Enabling/disabling ioctls can be repeated (only one task a time allowed).
*/
struct kcov {
/*
* Reference counter. We keep one for:
* - opened file descriptor
* - task with enabled coverage (we can't unwire it from another task)
+ * - each code section for remote coverage collection
*/
- atomic_t refcount;
+ refcount_t refcount;
/* The lock protects mode, size, area and t. */
spinlock_t lock;
enum kcov_mode mode;
- /* Size of arena (in long's for KCOV_MODE_TRACE). */
- unsigned size;
+ /* Size of arena (in long's). */
+ unsigned int size;
/* Coverage buffer shared with user space. */
void *area;
/* Task for which we collect coverage, or NULL. */
struct task_struct *t;
+ /* Collecting coverage from remote (background) threads. */
+ bool remote;
+ /* Size of remote area (in long's). */
+ unsigned int remote_size;
+ /*
+ * Sequence is incremented each time kcov is reenabled, used by
+ * kcov_remote_stop(), see the comment there.
+ */
+ int sequence;
};
+struct kcov_remote_area {
+ struct list_head list;
+ unsigned int size;
+};
+
+struct kcov_remote {
+ u64 handle;
+ struct kcov *kcov;
+ struct hlist_node hnode;
+};
+
+static DEFINE_SPINLOCK(kcov_remote_lock);
+static DEFINE_HASHTABLE(kcov_remote_map, 4);
+static struct list_head kcov_remote_areas = LIST_HEAD_INIT(kcov_remote_areas);
+
+/* Must be called with kcov_remote_lock locked. */
+static struct kcov_remote *kcov_remote_find(u64 handle)
+{
+ struct kcov_remote *remote;
+
+ hash_for_each_possible(kcov_remote_map, remote, hnode, handle) {
+ if (remote->handle == handle)
+ return remote;
+ }
+ return NULL;
+}
+
+static struct kcov_remote *kcov_remote_add(struct kcov *kcov, u64 handle)
+{
+ struct kcov_remote *remote;
+
+ if (kcov_remote_find(handle))
+ return ERR_PTR(-EEXIST);
+ remote = kmalloc(sizeof(*remote), GFP_ATOMIC);
+ if (!remote)
+ return ERR_PTR(-ENOMEM);
+ remote->handle = handle;
+ remote->kcov = kcov;
+ hash_add(kcov_remote_map, &remote->hnode, handle);
+ return remote;
+}
+
+/* Must be called with kcov_remote_lock locked. */
+static struct kcov_remote_area *kcov_remote_area_get(unsigned int size)
+{
+ struct kcov_remote_area *area;
+ struct list_head *pos;
+
+ kcov_debug("size = %u\n", size);
+ list_for_each(pos, &kcov_remote_areas) {
+ area = list_entry(pos, struct kcov_remote_area, list);
+ if (area->size == size) {
+ list_del(&area->list);
+ kcov_debug("rv = %px\n", area);
+ return area;
+ }
+ }
+ kcov_debug("rv = NULL\n");
+ return NULL;
+}
+
+/* Must be called with kcov_remote_lock locked. */
+static void kcov_remote_area_put(struct kcov_remote_area *area,
+ unsigned int size)
+{
+ kcov_debug("area = %px, size = %u\n", area, size);
+ INIT_LIST_HEAD(&area->list);
+ area->size = size;
+ list_add(&area->list, &kcov_remote_areas);
+}
+
+static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
+{
+ unsigned int mode;
+
+ /*
+ * We are interested in code coverage as a function of a syscall inputs,
+ * so we ignore code executed in interrupts.
+ */
+ if (!in_task())
+ return false;
+ mode = READ_ONCE(t->kcov_mode);
+ /*
+ * There is some code that runs in interrupts but for which
+ * in_interrupt() returns false (e.g. preempt_schedule_irq()).
+ * READ_ONCE()/barrier() effectively provides load-acquire wrt
+ * interrupts, there are paired barrier()/WRITE_ONCE() in
+ * kcov_start().
+ */
+ barrier();
+ return mode == needed_mode;
+}
+
+static notrace unsigned long canonicalize_ip(unsigned long ip)
+{
+#ifdef CONFIG_RANDOMIZE_BASE
+ ip -= kaslr_offset();
+#endif
+ return ip;
+}
+
/*
* Entry point from instrumented code.
* This is called once per basic-block/edge.
@@ -55,64 +179,223 @@ struct kcov {
void notrace __sanitizer_cov_trace_pc(void)
{
struct task_struct *t;
- enum kcov_mode mode;
+ unsigned long *area;
+ unsigned long ip = canonicalize_ip(_RET_IP_);
+ unsigned long pos;
t = current;
- /*
- * We are interested in code coverage as a function of a syscall inputs,
- * so we ignore code executed in interrupts.
- */
- if (!t || !in_task())
+ if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t))
return;
- mode = READ_ONCE(t->kcov_mode);
- if (mode == KCOV_MODE_TRACE) {
- unsigned long *area;
- unsigned long pos;
- unsigned long ip = _RET_IP_;
-#ifdef CONFIG_RANDOMIZE_BASE
- ip -= kaslr_offset();
-#endif
-
- /*
- * There is some code that runs in interrupts but for which
- * in_interrupt() returns false (e.g. preempt_schedule_irq()).
- * READ_ONCE()/barrier() effectively provides load-acquire wrt
- * interrupts, there are paired barrier()/WRITE_ONCE() in
- * kcov_ioctl_locked().
- */
- barrier();
- area = t->kcov_area;
- /* The first word is number of subsequent PCs. */
- pos = READ_ONCE(area[0]) + 1;
- if (likely(pos < t->kcov_size)) {
- area[pos] = ip;
- WRITE_ONCE(area[0], pos);
- }
+ area = t->kcov_area;
+ /* The first 64-bit word is the number of subsequent PCs. */
+ pos = READ_ONCE(area[0]) + 1;
+ if (likely(pos < t->kcov_size)) {
+ area[pos] = ip;
+ WRITE_ONCE(area[0], pos);
}
}
EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
-static void kcov_get(struct kcov *kcov)
+#ifdef CONFIG_KCOV_ENABLE_COMPARISONS
+static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
{
- atomic_inc(&kcov->refcount);
-}
+ struct task_struct *t;
+ u64 *area;
+ u64 count, start_index, end_pos, max_pos;
-static void kcov_put(struct kcov *kcov)
-{
- if (atomic_dec_and_test(&kcov->refcount)) {
- vfree(kcov->area);
- kfree(kcov);
+ t = current;
+ if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
+ return;
+
+ ip = canonicalize_ip(ip);
+
+ /*
+ * We write all comparison arguments and types as u64.
+ * The buffer was allocated for t->kcov_size unsigned longs.
+ */
+ area = (u64 *)t->kcov_area;
+ max_pos = t->kcov_size * sizeof(unsigned long);
+
+ count = READ_ONCE(area[0]);
+
+ /* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
+ start_index = 1 + count * KCOV_WORDS_PER_CMP;
+ end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
+ if (likely(end_pos <= max_pos)) {
+ area[start_index] = type;
+ area[start_index + 1] = arg1;
+ area[start_index + 2] = arg2;
+ area[start_index + 3] = ip;
+ WRITE_ONCE(area[0], count + 1);
}
}
-void kcov_task_init(struct task_struct *t)
+void notrace __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2)
+{
+ write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_);
+}
+EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1);
+
+void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2)
+{
+ write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_);
+}
+EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2);
+
+void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2)
+{
+ write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_);
+}
+EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4);
+
+void notrace __sanitizer_cov_trace_cmp8(u64 arg1, u64 arg2)
+{
+ write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_);
+}
+EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8);
+
+void notrace __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2)
+{
+ write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
+ _RET_IP_);
+}
+EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1);
+
+void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2)
+{
+ write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
+ _RET_IP_);
+}
+EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2);
+
+void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2)
+{
+ write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
+ _RET_IP_);
+}
+EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4);
+
+void notrace __sanitizer_cov_trace_const_cmp8(u64 arg1, u64 arg2)
+{
+ write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
+ _RET_IP_);
+}
+EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8);
+
+void notrace __sanitizer_cov_trace_switch(u64 val, u64 *cases)
+{
+ u64 i;
+ u64 count = cases[0];
+ u64 size = cases[1];
+ u64 type = KCOV_CMP_CONST;
+
+ switch (size) {
+ case 8:
+ type |= KCOV_CMP_SIZE(0);
+ break;
+ case 16:
+ type |= KCOV_CMP_SIZE(1);
+ break;
+ case 32:
+ type |= KCOV_CMP_SIZE(2);
+ break;
+ case 64:
+ type |= KCOV_CMP_SIZE(3);
+ break;
+ default:
+ return;
+ }
+ for (i = 0; i < count; i++)
+ write_comp_data(type, cases[i + 2], val, _RET_IP_);
+}
+EXPORT_SYMBOL(__sanitizer_cov_trace_switch);
+#endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */
+
+static void kcov_start(struct task_struct *t, unsigned int size,
+ void *area, enum kcov_mode mode, int sequence)
+{
+ kcov_debug("t = %px, size = %u, area = %px\n", t, size, area);
+ /* Cache in task struct for performance. */
+ t->kcov_size = size;
+ t->kcov_area = area;
+ /* See comment in check_kcov_mode(). */
+ barrier();
+ WRITE_ONCE(t->kcov_mode, mode);
+ t->kcov_sequence = sequence;
+}
+
+static void kcov_stop(struct task_struct *t)
{
WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
barrier();
t->kcov_size = 0;
t->kcov_area = NULL;
+}
+
+static void kcov_task_reset(struct task_struct *t)
+{
+ kcov_stop(t);
t->kcov = NULL;
+ t->kcov_sequence = 0;
+ t->kcov_handle = 0;
+}
+
+void kcov_task_init(struct task_struct *t)
+{
+ kcov_task_reset(t);
+ t->kcov_handle = current->kcov_handle;
+}
+
+static void kcov_reset(struct kcov *kcov)
+{
+ kcov->t = NULL;
+ kcov->mode = KCOV_MODE_INIT;
+ kcov->remote = false;
+ kcov->remote_size = 0;
+ kcov->sequence++;
+}
+
+static void kcov_remote_reset(struct kcov *kcov)
+{
+ int bkt;
+ struct kcov_remote *remote;
+ struct hlist_node *tmp;
+
+ spin_lock(&kcov_remote_lock);
+ hash_for_each_safe(kcov_remote_map, bkt, tmp, remote, hnode) {
+ if (remote->kcov != kcov)
+ continue;
+ kcov_debug("removing handle %llx\n", remote->handle);
+ hash_del(&remote->hnode);
+ kfree(remote);
+ }
+ /* Do reset before unlock to prevent races with kcov_remote_start(). */
+ kcov_reset(kcov);
+ spin_unlock(&kcov_remote_lock);
+}
+
+static void kcov_disable(struct task_struct *t, struct kcov *kcov)
+{
+ kcov_task_reset(t);
+ if (kcov->remote)
+ kcov_remote_reset(kcov);
+ else
+ kcov_reset(kcov);
+}
+
+static void kcov_get(struct kcov *kcov)
+{
+ refcount_inc(&kcov->refcount);
+}
+
+static void kcov_put(struct kcov *kcov)
+{
+ if (refcount_dec_and_test(&kcov->refcount)) {
+ kcov_remote_reset(kcov);
+ vfree(kcov->area);
+ kfree(kcov);
+ }
}
void kcov_task_exit(struct task_struct *t)
@@ -122,14 +405,36 @@ void kcov_task_exit(struct task_struct *t)
kcov = t->kcov;
if (kcov == NULL)
return;
+
spin_lock(&kcov->lock);
+ kcov_debug("t = %px, kcov->t = %px\n", t, kcov->t);
+ /*
+ * For KCOV_ENABLE devices we want to make sure that t->kcov->t == t,
+ * which comes down to:
+ * WARN_ON(!kcov->remote && kcov->t != t);
+ *
+ * For KCOV_REMOTE_ENABLE devices, the exiting task is either:
+ * 2. A remote task between kcov_remote_start() and kcov_remote_stop().
+ * In this case we should print a warning right away, since a task
+ * shouldn't be exiting when it's in a kcov coverage collection
+ * section. Here t points to the task that is collecting remote
+ * coverage, and t->kcov->t points to the thread that created the
+ * kcov device. Which means that to detect this case we need to
+ * check that t != t->kcov->t, and this gives us the following:
+ * WARN_ON(kcov->remote && kcov->t != t);
+ *
+ * 2. The task that created kcov exiting without calling KCOV_DISABLE,
+ * and then again we can make sure that t->kcov->t == t:
+ * WARN_ON(kcov->remote && kcov->t != t);
+ *
+ * By combining all three checks into one we get:
+ */
if (WARN_ON(kcov->t != t)) {
spin_unlock(&kcov->lock);
return;
}
/* Just to not leave dangling references behind. */
- kcov_task_init(t);
- kcov->t = NULL;
+ kcov_disable(t, kcov);
spin_unlock(&kcov->lock);
kcov_put(kcov);
}
@@ -148,7 +453,7 @@ static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
spin_lock(&kcov->lock);
size = kcov->size * sizeof(unsigned long);
- if (kcov->mode == KCOV_MODE_DISABLED || vma->vm_pgoff != 0 ||
+ if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 ||
vma->vm_end - vma->vm_start != size) {
res = -EINVAL;
goto exit;
@@ -177,7 +482,9 @@ static int kcov_open(struct inode *inode, struct file *filep)
kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
if (!kcov)
return -ENOMEM;
- atomic_set(&kcov->refcount, 1);
+ kcov->mode = KCOV_MODE_DISABLED;
+ kcov->sequence = 1;
+ refcount_set(&kcov->refcount, 1);
spin_lock_init(&kcov->lock);
filep->private_data = kcov;
return nonseekable_open(inode, filep);
@@ -189,14 +496,64 @@ static int kcov_close(struct inode *inode, struct file *filep)
return 0;
}
+static int kcov_get_mode(unsigned long arg)
+{
+ if (arg == KCOV_TRACE_PC)
+ return KCOV_MODE_TRACE_PC;
+ else if (arg == KCOV_TRACE_CMP)
+#ifdef CONFIG_KCOV_ENABLE_COMPARISONS
+ return KCOV_MODE_TRACE_CMP;
+#else
+ return -ENOTSUPP;
+#endif
+ else
+ return -EINVAL;
+}
+
+/*
+ * Fault in a lazily-faulted vmalloc area before it can be used by
+ * __santizer_cov_trace_pc(), to avoid recursion issues if any code on the
+ * vmalloc fault handling path is instrumented.
+ */
+static void kcov_fault_in_area(struct kcov *kcov)
+{
+ unsigned long stride = PAGE_SIZE / sizeof(unsigned long);
+ unsigned long *area = kcov->area;
+ unsigned long offset;
+
+ for (offset = 0; offset < kcov->size; offset += stride)
+ READ_ONCE(area[offset]);
+}
+
+static inline bool kcov_check_handle(u64 handle, bool common_valid,
+ bool uncommon_valid, bool zero_valid)
+{
+ if (handle & ~(KCOV_SUBSYSTEM_MASK | KCOV_INSTANCE_MASK))
+ return false;
+ switch (handle & KCOV_SUBSYSTEM_MASK) {
+ case KCOV_SUBSYSTEM_COMMON:
+ return (handle & KCOV_INSTANCE_MASK) ?
+ common_valid : zero_valid;
+ case KCOV_SUBSYSTEM_USB:
+ return uncommon_valid;
+ default:
+ return false;
+ }
+ return false;
+}
+
static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
unsigned long arg)
{
struct task_struct *t;
unsigned long size, unused;
+ int mode, i;
+ struct kcov_remote_arg *remote_arg;
+ struct kcov_remote *remote;
switch (cmd) {
case KCOV_INIT_TRACE:
+ kcov_debug("KCOV_INIT_TRACE\n");
/*
* Enable kcov in trace mode and setup buffer size.
* Must happen before anything else.
@@ -212,9 +569,10 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
if (size < 2 || size > INT_MAX / sizeof(unsigned long))
return -EINVAL;
kcov->size = size;
- kcov->mode = KCOV_MODE_TRACE;
+ kcov->mode = KCOV_MODE_INIT;
return 0;
case KCOV_ENABLE:
+ kcov_debug("KCOV_ENABLE\n");
/*
* Enable coverage for the current task.
* At this point user must have been enabled trace mode,
@@ -222,25 +580,25 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
* at task exit or voluntary by KCOV_DISABLE. After that it can
* be enabled for another task.
*/
- unused = arg;
- if (unused != 0 || kcov->mode == KCOV_MODE_DISABLED ||
- kcov->area == NULL)
+ if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
return -EINVAL;
t = current;
if (kcov->t != NULL || t->kcov != NULL)
return -EBUSY;
- /* Cache in task struct for performance. */
- t->kcov_size = kcov->size;
- t->kcov_area = kcov->area;
- /* See comment in __sanitizer_cov_trace_pc(). */
- barrier();
- WRITE_ONCE(t->kcov_mode, kcov->mode);
+ mode = kcov_get_mode(arg);
+ if (mode < 0)
+ return mode;
+ kcov_fault_in_area(kcov);
+ kcov->mode = mode;
+ kcov_start(t, kcov->size, kcov->area, kcov->mode,
+ kcov->sequence);
t->kcov = kcov;
kcov->t = t;
- /* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
+ /* Put either in kcov_task_exit() or in KCOV_DISABLE. */
kcov_get(kcov);
return 0;
case KCOV_DISABLE:
+ kcov_debug("KCOV_DISABLE\n");
/* Disable coverage for the current task. */
unused = arg;
if (unused != 0 || current->kcov != kcov)
@@ -248,10 +606,65 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
t = current;
if (WARN_ON(kcov->t != t))
return -EINVAL;
- kcov_task_init(t);
- kcov->t = NULL;
+ kcov_disable(t, kcov);
kcov_put(kcov);
return 0;
+ case KCOV_REMOTE_ENABLE:
+ kcov_debug("KCOV_REMOTE_ENABLE\n");
+ if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
+ return -EINVAL;
+ t = current;
+ if (kcov->t != NULL || t->kcov != NULL)
+ return -EBUSY;
+ remote_arg = (struct kcov_remote_arg *)arg;
+ mode = kcov_get_mode(remote_arg->trace_mode);
+ if (mode < 0)
+ return mode;
+ if (remote_arg->area_size > LONG_MAX / sizeof(unsigned long))
+ return -EINVAL;
+ kcov->mode = mode;
+ t->kcov = kcov;
+ kcov->t = t;
+ kcov->remote = true;
+ kcov->remote_size = remote_arg->area_size;
+ spin_lock(&kcov_remote_lock);
+ for (i = 0; i < remote_arg->num_handles; i++) {
+ kcov_debug("handle %llx\n", remote_arg->handles[i]);
+ if (!kcov_check_handle(remote_arg->handles[i],
+ false, true, false)) {
+ spin_unlock(&kcov_remote_lock);
+ kcov_disable(t, kcov);
+ return -EINVAL;
+ }
+ remote = kcov_remote_add(kcov, remote_arg->handles[i]);
+ if (IS_ERR(remote)) {
+ spin_unlock(&kcov_remote_lock);
+ kcov_disable(t, kcov);
+ return PTR_ERR(remote);
+ }
+ }
+ if (remote_arg->common_handle) {
+ kcov_debug("common handle %llx\n",
+ remote_arg->common_handle);
+ if (!kcov_check_handle(remote_arg->common_handle,
+ true, false, false)) {
+ spin_unlock(&kcov_remote_lock);
+ kcov_disable(t, kcov);
+ return -EINVAL;
+ }
+ remote = kcov_remote_add(kcov,
+ remote_arg->common_handle);
+ if (IS_ERR(remote)) {
+ spin_unlock(&kcov_remote_lock);
+ kcov_disable(t, kcov);
+ return PTR_ERR(remote);
+ }
+ t->kcov_handle = remote_arg->common_handle;
+ }
+ spin_unlock(&kcov_remote_lock);
+ /* Put either in kcov_task_exit() or in KCOV_DISABLE. */
+ kcov_get(kcov);
+ return 0;
default:
return -ENOTTY;
}
@@ -261,11 +674,35 @@ static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
struct kcov *kcov;
int res;
+ struct kcov_remote_arg *remote_arg = NULL;
+ unsigned int remote_num_handles;
+ unsigned long remote_arg_size;
+
+ if (cmd == KCOV_REMOTE_ENABLE) {
+ if (get_user(remote_num_handles, (unsigned __user *)(arg +
+ offsetof(struct kcov_remote_arg, num_handles))))
+ return -EFAULT;
+ if (remote_num_handles > KCOV_REMOTE_MAX_HANDLES)
+ return -EINVAL;
+ remote_arg_size = sizeof(*remote_arg) +
+ sizeof(remote_arg->handles[0]) * remote_num_handles;
+ remote_arg = memdup_user((void __user *)arg, remote_arg_size);
+ if (IS_ERR(remote_arg))
+ return PTR_ERR(remote_arg);
+ if (remote_arg->num_handles != remote_num_handles) {
+ kfree(remote_arg);
+ return -EINVAL;
+ }
+ arg = (unsigned long)remote_arg;
+ }
kcov = filep->private_data;
spin_lock(&kcov->lock);
res = kcov_ioctl_locked(kcov, cmd, arg);
spin_unlock(&kcov->lock);
+
+ kfree(remote_arg);
+
return res;
}
@@ -277,6 +714,207 @@ static const struct file_operations kcov_fops = {
.release = kcov_close,
};
+/*
+ * kcov_remote_start() and kcov_remote_stop() can be used to annotate a section
+ * of code in a kernel background thread to allow kcov to be used to collect
+ * coverage from that part of code.
+ *
+ * The handle argument of kcov_remote_start() identifies a code section that is
+ * used for coverage collection. A userspace process passes this handle to
+ * KCOV_REMOTE_ENABLE ioctl to make the used kcov device start collecting
+ * coverage for the code section identified by this handle.
+ *
+ * The usage of these annotations in the kernel code is different depending on
+ * the type of the kernel thread whose code is being annotated.
+ *
+ * For global kernel threads that are spawned in a limited number of instances
+ * (e.g. one USB hub_event() worker thread is spawned per USB HCD), each
+ * instance must be assigned a unique 4-byte instance id. The instance id is
+ * then combined with a 1-byte subsystem id to get a handle via
+ * kcov_remote_handle(subsystem_id, instance_id).
+ *
+ * For local kernel threads that are spawned from system calls handler when a
+ * user interacts with some kernel interface (e.g. vhost workers), a handle is
+ * passed from a userspace process as the common_handle field of the
+ * kcov_remote_arg struct (note, that the user must generate a handle by using
+ * kcov_remote_handle() with KCOV_SUBSYSTEM_COMMON as the subsystem id and an
+ * arbitrary 4-byte non-zero number as the instance id). This common handle
+ * then gets saved into the task_struct of the process that issued the
+ * KCOV_REMOTE_ENABLE ioctl. When this proccess issues system calls that spawn
+ * kernel threads, the common handle must be retrived via kcov_common_handle()
+ * and passed to the spawned threads via custom annotations. Those kernel
+ * threads must in turn be annotated with kcov_remote_start(common_handle) and
+ * kcov_remote_stop(). All of the threads that are spawned by the same process
+ * obtain the same handle, hence the name "common".
+ *
+ * See Documentation/dev-tools/kcov.rst for more details.
+ *
+ * Internally, this function looks up the kcov device associated with the
+ * provided handle, allocates an area for coverage collection, and saves the
+ * pointers to kcov and area into the current task_struct to allow coverage to
+ * be collected via __sanitizer_cov_trace_pc()
+ * In turns kcov_remote_stop() clears those pointers from task_struct to stop
+ * collecting coverage and copies all collected coverage into the kcov area.
+ */
+void kcov_remote_start(u64 handle)
+{
+ struct kcov_remote *remote;
+ void *area;
+ struct task_struct *t;
+ unsigned int size;
+ enum kcov_mode mode;
+ int sequence;
+
+ if (WARN_ON(!kcov_check_handle(handle, true, true, true)))
+ return;
+ if (WARN_ON(!in_task()))
+ return;
+ t = current;
+ /*
+ * Check that kcov_remote_start is not called twice
+ * nor called by user tasks (with enabled kcov).
+ */
+ if (WARN_ON(t->kcov))
+ return;
+
+ kcov_debug("handle = %llx\n", handle);
+
+ spin_lock(&kcov_remote_lock);
+ remote = kcov_remote_find(handle);
+ if (!remote) {
+ kcov_debug("no remote found");
+ spin_unlock(&kcov_remote_lock);
+ return;
+ }
+ /* Put in kcov_remote_stop(). */
+ kcov_get(remote->kcov);
+ t->kcov = remote->kcov;
+ /*
+ * Read kcov fields before unlock to prevent races with
+ * KCOV_DISABLE / kcov_remote_reset().
+ */
+ size = remote->kcov->remote_size;
+ mode = remote->kcov->mode;
+ sequence = remote->kcov->sequence;
+ area = kcov_remote_area_get(size);
+ spin_unlock(&kcov_remote_lock);
+
+ if (!area) {
+ area = vmalloc(size * sizeof(unsigned long));
+ if (!area) {
+ t->kcov = NULL;
+ kcov_put(remote->kcov);
+ return;
+ }
+ }
+ /* Reset coverage size. */
+ *(u64 *)area = 0;
+
+ kcov_debug("area = %px, size = %u", area, size);
+
+ kcov_start(t, size, area, mode, sequence);
+
+}
+EXPORT_SYMBOL(kcov_remote_start);
+
+static void kcov_move_area(enum kcov_mode mode, void *dst_area,
+ unsigned int dst_area_size, void *src_area)
+{
+ u64 word_size = sizeof(unsigned long);
+ u64 count_size, entry_size_log;
+ u64 dst_len, src_len;
+ void *dst_entries, *src_entries;
+ u64 dst_occupied, dst_free, bytes_to_move, entries_moved;
+
+ kcov_debug("%px %u <= %px %lu\n",
+ dst_area, dst_area_size, src_area, *(unsigned long *)src_area);
+
+ switch (mode) {
+ case KCOV_MODE_TRACE_PC:
+ dst_len = READ_ONCE(*(unsigned long *)dst_area);
+ src_len = *(unsigned long *)src_area;
+ count_size = sizeof(unsigned long);
+ entry_size_log = __ilog2_u64(sizeof(unsigned long));
+ break;
+ case KCOV_MODE_TRACE_CMP:
+ dst_len = READ_ONCE(*(u64 *)dst_area);
+ src_len = *(u64 *)src_area;
+ count_size = sizeof(u64);
+ BUILD_BUG_ON(!is_power_of_2(KCOV_WORDS_PER_CMP));
+ entry_size_log = __ilog2_u64(sizeof(u64) * KCOV_WORDS_PER_CMP);
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ /* As arm can't divide u64 integers use log of entry size. */
+ if (dst_len > ((dst_area_size * word_size - count_size) >>
+ entry_size_log))
+ return;
+ dst_occupied = count_size + (dst_len << entry_size_log);
+ dst_free = dst_area_size * word_size - dst_occupied;
+ bytes_to_move = min(dst_free, src_len << entry_size_log);
+ dst_entries = dst_area + dst_occupied;
+ src_entries = src_area + count_size;
+ memcpy(dst_entries, src_entries, bytes_to_move);
+ entries_moved = bytes_to_move >> entry_size_log;
+
+ switch (mode) {
+ case KCOV_MODE_TRACE_PC:
+ WRITE_ONCE(*(unsigned long *)dst_area, dst_len + entries_moved);
+ break;
+ case KCOV_MODE_TRACE_CMP:
+ WRITE_ONCE(*(u64 *)dst_area, dst_len + entries_moved);
+ break;
+ default:
+ break;
+ }
+}
+
+/* See the comment before kcov_remote_start() for usage details. */
+void kcov_remote_stop(void)
+{
+ struct task_struct *t = current;
+ struct kcov *kcov = t->kcov;
+ void *area = t->kcov_area;
+ unsigned int size = t->kcov_size;
+ int sequence = t->kcov_sequence;
+
+ if (!kcov) {
+ kcov_debug("no kcov found\n");
+ return;
+ }
+
+ kcov_stop(t);
+ t->kcov = NULL;
+
+ spin_lock(&kcov->lock);
+ /*
+ * KCOV_DISABLE could have been called between kcov_remote_start()
+ * and kcov_remote_stop(), hence the check.
+ */
+ kcov_debug("move if: %d == %d && %d\n",
+ sequence, kcov->sequence, (int)kcov->remote);
+ if (sequence == kcov->sequence && kcov->remote)
+ kcov_move_area(kcov->mode, kcov->area, kcov->size, area);
+ spin_unlock(&kcov->lock);
+
+ spin_lock(&kcov_remote_lock);
+ kcov_remote_area_put(area, size);
+ spin_unlock(&kcov_remote_lock);
+
+ kcov_put(kcov);
+}
+EXPORT_SYMBOL(kcov_remote_stop);
+
+/* See the comment before kcov_remote_start() for usage details. */
+u64 kcov_common_handle(void)
+{
+ return current->kcov_handle;
+}
+EXPORT_SYMBOL(kcov_common_handle);
+
static int __init kcov_init(void)
{
/*
@@ -284,10 +922,8 @@ static int __init kcov_init(void)
* there is no need to protect it against removal races. The
* use of debugfs_create_file_unsafe() is actually safe here.
*/
- if (!debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops)) {
- pr_err("failed to create kcov in debugfs\n");
- return -ENOMEM;
- }
+ debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops);
+
return 0;
}
diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
index c18dadd..8ab57cc 100644
--- a/kernel/locking/spinlock_debug.c
+++ b/kernel/locking/spinlock_debug.c
@@ -53,19 +53,19 @@ EXPORT_SYMBOL(__rwlock_init);
static void spin_dump(raw_spinlock_t *lock, const char *msg)
{
- struct task_struct *owner = NULL;
+ struct task_struct *owner = READ_ONCE(lock->owner);
- if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
- owner = lock->owner;
+ if (owner == SPINLOCK_OWNER_INIT)
+ owner = NULL;
printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
msg, raw_smp_processor_id(),
current->comm, task_pid_nr(current));
printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
".owner_cpu: %d\n",
- lock, lock->magic,
+ lock, READ_ONCE(lock->magic),
owner ? owner->comm : "<none>",
owner ? task_pid_nr(owner) : -1,
- lock->owner_cpu);
+ READ_ONCE(lock->owner_cpu));
#ifdef CONFIG_DEBUG_SPINLOCK_BITE_ON_BUG
msm_trigger_wdog_bite();
#elif defined(CONFIG_DEBUG_SPINLOCK_PANIC_ON_BUG)
@@ -87,16 +87,16 @@ static void spin_bug(raw_spinlock_t *lock, const char *msg)
static inline void
debug_spin_lock_before(raw_spinlock_t *lock)
{
- SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
- SPIN_BUG_ON(lock->owner == current, lock, "recursion");
- SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
+ SPIN_BUG_ON(READ_ONCE(lock->magic) != SPINLOCK_MAGIC, lock, "bad magic");
+ SPIN_BUG_ON(READ_ONCE(lock->owner) == current, lock, "recursion");
+ SPIN_BUG_ON(READ_ONCE(lock->owner_cpu) == raw_smp_processor_id(),
lock, "cpu recursion");
}
static inline void debug_spin_lock_after(raw_spinlock_t *lock)
{
- lock->owner_cpu = raw_smp_processor_id();
- lock->owner = current;
+ WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
+ WRITE_ONCE(lock->owner, current);
}
static inline void debug_spin_unlock(raw_spinlock_t *lock)
@@ -106,8 +106,8 @@ static inline void debug_spin_unlock(raw_spinlock_t *lock)
SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
lock, "wrong CPU");
- lock->owner = SPINLOCK_OWNER_INIT;
- lock->owner_cpu = -1;
+ WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
+ WRITE_ONCE(lock->owner_cpu, -1);
}
/*
@@ -195,8 +195,8 @@ static inline void debug_write_lock_before(rwlock_t *lock)
static inline void debug_write_lock_after(rwlock_t *lock)
{
- lock->owner_cpu = raw_smp_processor_id();
- lock->owner = current;
+ WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
+ WRITE_ONCE(lock->owner, current);
}
static inline void debug_write_unlock(rwlock_t *lock)
@@ -205,8 +205,8 @@ static inline void debug_write_unlock(rwlock_t *lock)
RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
lock, "wrong CPU");
- lock->owner = SPINLOCK_OWNER_INIT;
- lock->owner_cpu = -1;
+ WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
+ WRITE_ONCE(lock->owner_cpu, -1);
}
void do_raw_write_lock(rwlock_t *lock)
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index b7a6889..d4dbe4a 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -734,8 +734,15 @@ static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
* We have found the zone. Now walk the radix tree to find the leaf node
* for our PFN.
*/
+
+ /*
+ * If the zone we wish to scan is the the current zone and the
+ * pfn falls into the current node then we do not need to walk
+ * the tree.
+ */
node = bm->cur.node;
- if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
+ if (zone == bm->cur.zone &&
+ ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
goto node_found;
node = zone->rtree;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9902bf0..624bd5c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -18,6 +18,7 @@
#include <linux/rcupdate_wait.h>
#include <linux/blkdev.h>
+#include <linux/kcov.h>
#include <linux/kprobes.h>
#include <linux/mmu_context.h>
#include <linux/module.h>
@@ -2723,6 +2724,7 @@ static inline void
prepare_task_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
{
+ kcov_prepare_switch(prev);
sched_info_switch(rq, prev, next);
perf_event_task_sched_out(prev, next);
fire_sched_out_preempt_notifiers(prev, next);
@@ -2800,6 +2802,7 @@ static struct rq *finish_task_switch(struct task_struct *prev)
smp_mb__after_unlock_lock();
finish_lock_switch(rq, prev);
finish_arch_post_lock_switch();
+ kcov_finish_switch(current);
fire_sched_in_preempt_notifiers(current);
if (mm)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 0035c46..345258e 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1583,7 +1583,7 @@ static struct ctl_table vm_table[] = {
.procname = "drop_caches",
.data = &sysctl_drop_caches,
.maxlen = sizeof(int),
- .mode = 0644,
+ .mode = 0200,
.proc_handler = drop_caches_sysctl_handler,
.extra1 = &one,
.extra2 = &four,
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 4559e91..390c76d 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -568,25 +568,33 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
{
struct signal_struct *sig = tsk->signal;
- struct taskstats *stats;
+ struct taskstats *stats_new, *stats;
- if (sig->stats || thread_group_empty(tsk))
- goto ret;
+ /* Pairs with smp_store_release() below. */
+ stats = smp_load_acquire(&sig->stats);
+ if (stats || thread_group_empty(tsk))
+ return stats;
/* No problem if kmem_cache_zalloc() fails */
- stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
+ stats_new = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
spin_lock_irq(&tsk->sighand->siglock);
- if (!sig->stats) {
- sig->stats = stats;
- stats = NULL;
+ stats = sig->stats;
+ if (!stats) {
+ /*
+ * Pairs with smp_store_release() above and order the
+ * kmem_cache_zalloc().
+ */
+ smp_store_release(&sig->stats, stats_new);
+ stats = stats_new;
+ stats_new = NULL;
}
spin_unlock_irq(&tsk->sighand->siglock);
- if (stats)
- kmem_cache_free(taskstats_cache, stats);
-ret:
- return sig->stats;
+ if (stats_new)
+ kmem_cache_free(taskstats_cache, stats_new);
+
+ return stats;
}
/* Send pid data out on exit */
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index ff64c7b..58fd231 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -868,7 +868,8 @@ static int enqueue_hrtimer(struct hrtimer *timer,
base->cpu_base->active_bases |= 1 << base->index;
- timer->state |= HRTIMER_STATE_ENQUEUED;
+ /* Pairs with the lockless read in hrtimer_is_queued() */
+ WRITE_ONCE(timer->state, timer->state | HRTIMER_STATE_ENQUEUED);
return timerqueue_add(&base->active, &timer->node);
}
@@ -888,9 +889,17 @@ static void __remove_hrtimer(struct hrtimer *timer,
u8 newstate, int reprogram)
{
struct hrtimer_cpu_base *cpu_base = base->cpu_base;
+ u8 state = timer->state;
- if (!(timer->state & HRTIMER_STATE_ENQUEUED))
- goto out;
+ /*
+ * Pairs with the lockless read in hrtimer_is_queued()
+ *
+ * We need to preserve PINNED state here, otherwise we may end up
+ * migrating pinned hrtimers as well.
+ */
+ WRITE_ONCE(timer->state, newstate | (state & HRTIMER_STATE_PINNED));
+ if (!(state & HRTIMER_STATE_ENQUEUED))
+ return;
if (!timerqueue_del(&base->active, &timer->node))
cpu_base->active_bases &= ~(1 << base->index);
@@ -907,13 +916,6 @@ static void __remove_hrtimer(struct hrtimer *timer,
if (reprogram && timer == cpu_base->next_timer)
hrtimer_force_reprogram(cpu_base, 1);
#endif
-
-out:
- /*
- * We need to preserve PINNED state here, otherwise we may end up
- * migrating pinned hrtimers as well.
- */
- timer->state = newstate | (timer->state & HRTIMER_STATE_PINNED);
}
/*
@@ -922,8 +924,9 @@ static void __remove_hrtimer(struct hrtimer *timer,
static inline int
remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
{
- if (hrtimer_is_queued(timer)) {
- u8 state = timer->state;
+ u8 state = timer->state;
+
+ if (state & HRTIMER_STATE_ENQUEUED) {
int reprogram;
/*
diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
index 17cdc55..e5706a8 100644
--- a/kernel/time/posix-clock.c
+++ b/kernel/time/posix-clock.c
@@ -27,8 +27,6 @@
#include "posix-timers.h"
-static void delete_clock(struct kref *kref);
-
/*
* Returns NULL if the posix_clock instance attached to 'fp' is old and stale.
*/
@@ -138,7 +136,7 @@ static int posix_clock_open(struct inode *inode, struct file *fp)
err = 0;
if (!err) {
- kref_get(&clk->kref);
+ get_device(clk->dev);
fp->private_data = clk;
}
out:
@@ -154,7 +152,7 @@ static int posix_clock_release(struct inode *inode, struct file *fp)
if (clk->ops.release)
err = clk->ops.release(clk);
- kref_put(&clk->kref, delete_clock);
+ put_device(clk->dev);
fp->private_data = NULL;
@@ -174,38 +172,35 @@ static const struct file_operations posix_clock_file_operations = {
#endif
};
-int posix_clock_register(struct posix_clock *clk, dev_t devid)
+int posix_clock_register(struct posix_clock *clk, struct device *dev)
{
int err;
- kref_init(&clk->kref);
init_rwsem(&clk->rwsem);
cdev_init(&clk->cdev, &posix_clock_file_operations);
+ err = cdev_device_add(&clk->cdev, dev);
+ if (err) {
+ pr_err("%s unable to add device %d:%d\n",
+ dev_name(dev), MAJOR(dev->devt), MINOR(dev->devt));
+ return err;
+ }
clk->cdev.owner = clk->ops.owner;
- err = cdev_add(&clk->cdev, devid, 1);
+ clk->dev = dev;
- return err;
+ return 0;
}
EXPORT_SYMBOL_GPL(posix_clock_register);
-static void delete_clock(struct kref *kref)
-{
- struct posix_clock *clk = container_of(kref, struct posix_clock, kref);
-
- if (clk->release)
- clk->release(clk);
-}
-
void posix_clock_unregister(struct posix_clock *clk)
{
- cdev_del(&clk->cdev);
+ cdev_device_del(&clk->cdev, clk->dev);
down_write(&clk->rwsem);
clk->zombie = true;
up_write(&clk->rwsem);
- kref_put(&clk->kref, delete_clock);
+ put_device(clk->dev);
}
EXPORT_SYMBOL_GPL(posix_clock_unregister);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 8905860..551fdd4 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -623,8 +623,7 @@ static int function_stat_show(struct seq_file *m, void *v)
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- avg = rec->time;
- do_div(avg, rec->counter);
+ avg = div64_ul(rec->time, rec->counter);
if (tracing_thresh && (avg < tracing_thresh))
goto out;
#endif
@@ -650,7 +649,8 @@ static int function_stat_show(struct seq_file *m, void *v)
* Divide only 1000 for ns^2 -> us^2 conversion.
* trace_print_graph_duration will divide 1000 again.
*/
- do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
+ stddev = div64_ul(stddev,
+ rec->counter * (rec->counter - 1) * 1000);
}
trace_seq_init(&s);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ddf348e..aea0f44 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -4374,6 +4374,10 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
{
+ if ((mask == TRACE_ITER_RECORD_TGID) ||
+ (mask == TRACE_ITER_RECORD_CMD))
+ lockdep_assert_held(&event_mutex);
+
/* do nothing if flag is already set */
if (!!(tr->trace_flags & mask) == !!enabled)
return 0;
@@ -4439,6 +4443,7 @@ static int trace_set_options(struct trace_array *tr, char *option)
cmp += 2;
}
+ mutex_lock(&event_mutex);
mutex_lock(&trace_types_lock);
for (i = 0; trace_options[i]; i++) {
@@ -4453,6 +4458,7 @@ static int trace_set_options(struct trace_array *tr, char *option)
ret = set_tracer_option(tr, cmp, neg);
mutex_unlock(&trace_types_lock);
+ mutex_unlock(&event_mutex);
/*
* If the first trailing whitespace is replaced with '\0' by strstrip,
@@ -7384,9 +7390,11 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
if (val != 0 && val != 1)
return -EINVAL;
+ mutex_lock(&event_mutex);
mutex_lock(&trace_types_lock);
ret = set_tracer_flag(tr, 1 << index, val);
mutex_unlock(&trace_types_lock);
+ mutex_unlock(&event_mutex);
if (ret < 0)
return ret;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 654a158..2b0a01b 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -326,7 +326,8 @@ void trace_event_enable_cmd_record(bool enable)
struct trace_event_file *file;
struct trace_array *tr;
- mutex_lock(&event_mutex);
+ lockdep_assert_held(&event_mutex);
+
do_for_each_event_file(tr, file) {
if (!(file->flags & EVENT_FILE_FL_ENABLED))
@@ -340,7 +341,6 @@ void trace_event_enable_cmd_record(bool enable)
clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
}
} while_for_each_event_file();
- mutex_unlock(&event_mutex);
}
void trace_event_enable_tgid_record(bool enable)
@@ -348,7 +348,8 @@ void trace_event_enable_tgid_record(bool enable)
struct trace_event_file *file;
struct trace_array *tr;
- mutex_lock(&event_mutex);
+ lockdep_assert_held(&event_mutex);
+
do_for_each_event_file(tr, file) {
if (!(file->flags & EVENT_FILE_FL_ENABLED))
continue;
@@ -362,7 +363,6 @@ void trace_event_enable_tgid_record(bool enable)
&file->flags);
}
} while_for_each_event_file();
- mutex_unlock(&event_mutex);
}
static int __ftrace_event_enable_disable(struct trace_event_file *file,
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 0fa9dad..a5a4b56 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -640,7 +640,7 @@ static void start_wakeup_tracer(struct trace_array *tr)
if (ret) {
pr_info("wakeup trace: Couldn't activate tracepoint"
" probe to kernel_sched_migrate_task\n");
- return;
+ goto fail_deprobe_sched_switch;
}
wakeup_reset(tr);
@@ -658,6 +658,8 @@ static void start_wakeup_tracer(struct trace_array *tr)
printk(KERN_ERR "failed to start wakeup tracer\n");
return;
+fail_deprobe_sched_switch:
+ unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
fail_deprobe_wake_new:
unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
fail_deprobe:
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 719a52a..6f9091f 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -196,6 +196,11 @@ check_stack(unsigned long ip, unsigned long *stack)
local_irq_restore(flags);
}
+/* Some archs may not define MCOUNT_INSN_SIZE */
+#ifndef MCOUNT_INSN_SIZE
+# define MCOUNT_INSN_SIZE 0
+#endif
+
static void
stack_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
index 305039b..35b2ba0 100644
--- a/kernel/trace/tracing_map.c
+++ b/kernel/trace/tracing_map.c
@@ -90,8 +90,8 @@ static int tracing_map_cmp_atomic64(void *val_a, void *val_b)
#define DEFINE_TRACING_MAP_CMP_FN(type) \
static int tracing_map_cmp_##type(void *val_a, void *val_b) \
{ \
- type a = *(type *)val_a; \
- type b = *(type *)val_b; \
+ type a = (type)(*(u64 *)val_a); \
+ type b = (type)(*(u64 *)val_b); \
\
return (a > b) ? 1 : ((a < b) ? -1 : 0); \
}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 8d0f493..750982e 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -774,16 +774,21 @@
config ARCH_HAS_KCOV
bool
help
- KCOV does not have any arch-specific code, but currently it is enabled
- only for x86_64. KCOV requires testing on other archs, and most likely
- disabling of instrumentation for some early boot code.
+ An architecture should select this when it can successfully
+ build and run with CONFIG_KCOV. This typically requires
+ disabling instrumentation for some early boot code.
+
+# Upstream uses $(cc-option, -fsanitize-coverage=trace-pc), which requires
+# cc-option support. Here we instead check CC in scripts/Makefile.kcov.
+config CC_HAS_SANCOV_TRACE_PC
+ def_bool ARCH_HAS_KCOV
config KCOV
bool "Code coverage for fuzzing"
depends on ARCH_HAS_KCOV
+ depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS
select DEBUG_FS
- select GCC_PLUGINS if !COMPILE_TEST
- select GCC_PLUGIN_SANCOV if !COMPILE_TEST
+ select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC
help
KCOV exposes kernel code coverage information in a form suitable
for coverage-guided fuzzing (randomized testing).
@@ -794,10 +799,21 @@
For more details, see Documentation/dev-tools/kcov.rst.
+# Upstream uses $(cc-option, -fsanitize-coverage=trace-cmp), which requires
+# cc-option support. Here we instead check CC in scripts/Makefile.kcov.
+config KCOV_ENABLE_COMPARISONS
+ bool "Enable comparison operands collection by KCOV"
+ depends on KCOV
+ help
+ KCOV also exposes operands of every comparison in the instrumented
+ code along with operand sizes and PCs of the comparison instructions.
+ These operands can be used by fuzzing engines to improve the quality
+ of fuzzing coverage.
+
config KCOV_INSTRUMENT_ALL
bool "Instrument all code by default"
depends on KCOV
- default y if KCOV
+ default y
help
If you are doing generic system call fuzzing (like e.g. syzkaller),
then you will want to instrument the whole kernel and you should
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index ea4cc3d..61e7240 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -437,6 +437,7 @@ void debug_dma_dump_mappings(struct device *dev)
}
spin_unlock_irqrestore(&bucket->lock, flags);
+ cond_resched();
}
}
EXPORT_SYMBOL(debug_dma_dump_mappings);
diff --git a/mm/mmap.c b/mm/mmap.c
index d18bc9d..8edf95d 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -89,12 +89,6 @@ static void unmap_region(struct mm_struct *mm,
* MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
* w: (no) no w: (no) no w: (copy) copy w: (no) no
* x: (no) no x: (no) yes x: (no) yes x: (yes) yes
- *
- * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
- * MAP_PRIVATE:
- * r: (no) no
- * w: (no) no
- * x: (yes) yes
*/
pgprot_t protection_map[16] __ro_after_init = {
__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 878f3ba..84cedf4 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -2108,6 +2108,11 @@ int zs_page_migrate(struct address_space *mapping, struct page *newpage,
zs_pool_dec_isolated(pool);
}
+ if (page_zone(newpage) != page_zone(page)) {
+ dec_zone_page_state(page, NR_ZSPAGES);
+ inc_zone_page_state(newpage, NR_ZSPAGES);
+ }
+
reset_page(page);
put_page(page);
page = newpage;
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 0e7afdf..235bed8 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -110,6 +110,7 @@ int vlan_check_real_dev(struct net_device *real_dev,
void vlan_setup(struct net_device *dev);
int register_vlan_dev(struct net_device *dev);
void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
+void vlan_dev_uninit(struct net_device *dev);
bool vlan_dev_inherit_address(struct net_device *dev,
struct net_device *real_dev);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index ac4c93c..ed3717d 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -610,7 +610,8 @@ static int vlan_dev_init(struct net_device *dev)
return 0;
}
-static void vlan_dev_uninit(struct net_device *dev)
+/* Note: this function might be called multiple times for the same device. */
+void vlan_dev_uninit(struct net_device *dev)
{
struct vlan_priority_tci_mapping *pm;
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index 5e831de..fdf39dd 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -95,11 +95,13 @@ static int vlan_changelink(struct net_device *dev, struct nlattr *tb[],
struct ifla_vlan_flags *flags;
struct ifla_vlan_qos_mapping *m;
struct nlattr *attr;
- int rem;
+ int rem, err;
if (data[IFLA_VLAN_FLAGS]) {
flags = nla_data(data[IFLA_VLAN_FLAGS]);
- vlan_dev_change_flags(dev, flags->flags, flags->mask);
+ err = vlan_dev_change_flags(dev, flags->flags, flags->mask);
+ if (err)
+ return err;
}
if (data[IFLA_VLAN_INGRESS_QOS]) {
nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) {
@@ -110,7 +112,9 @@ static int vlan_changelink(struct net_device *dev, struct nlattr *tb[],
if (data[IFLA_VLAN_EGRESS_QOS]) {
nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) {
m = nla_data(attr);
- vlan_dev_set_egress_priority(dev, m->from, m->to);
+ err = vlan_dev_set_egress_priority(dev, m->from, m->to);
+ if (err)
+ return err;
}
}
return 0;
@@ -157,10 +161,11 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
return -EINVAL;
err = vlan_changelink(dev, tb, data, extack);
- if (err < 0)
- return err;
-
- return register_vlan_dev(dev);
+ if (!err)
+ err = register_vlan_dev(dev);
+ if (err)
+ vlan_dev_uninit(dev);
+ return err;
}
static inline size_t vlan_qos_map_size(unsigned int n)
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index bd41b78..1d085ee 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -1054,8 +1054,10 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
if (!conn)
return ERR_PTR(-ENOMEM);
- if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0)
+ if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
+ hci_conn_del(conn);
return ERR_PTR(-EBUSY);
+ }
conn->state = BT_CONNECT;
set_bit(HCI_CONN_SCANNING, &conn->flags);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 6bc679c..ff80a9d 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -802,8 +802,8 @@ static int hci_init4_req(struct hci_request *req, unsigned long opt)
if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
struct hci_cp_le_write_def_data_len cp;
- cp.tx_len = hdev->le_max_tx_len;
- cp.tx_time = hdev->le_max_tx_time;
+ cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
+ cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
}
@@ -4215,7 +4215,14 @@ static void hci_rx_work(struct work_struct *work)
hci_send_to_sock(hdev, skb);
}
- if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
+ /* If the device has been opened in HCI_USER_CHANNEL,
+ * the userspace has exclusive access to device.
+ * When device is HCI_INIT, we still need to process
+ * the data packets to the driver in order
+ * to complete its setup().
+ */
+ if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ !test_bit(HCI_INIT, &hdev->flags)) {
kfree_skb(skb);
continue;
}
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index b73ac14..759329b 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -1095,6 +1095,14 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
instance_flags = get_adv_instance_flags(hdev, instance);
+ /* If instance already has the flags set skip adding it once
+ * again.
+ */
+ if (adv_instance && eir_get_data(adv_instance->adv_data,
+ adv_instance->adv_data_len, EIR_FLAGS,
+ NULL))
+ goto skip_flags;
+
/* The Add Advertising command allows userspace to set both the general
* and limited discoverable flags.
*/
@@ -1127,6 +1135,7 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
}
}
+skip_flags:
if (adv_instance) {
memcpy(ptr, adv_instance->adv_data,
adv_instance->adv_data_len);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index f63d991..ebdf1b0 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -4918,10 +4918,8 @@ void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
chan, result, local_amp_id, remote_amp_id);
- if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
- l2cap_chan_unlock(chan);
+ if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
return;
- }
if (chan->state != BT_CONNECTED) {
l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 89936e0..6feab227 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -643,6 +643,9 @@ static unsigned int br_nf_forward_arp(void *priv,
nf_bridge_pull_encap_header(skb);
}
+ if (unlikely(!pskb_may_pull(skb, sizeof(struct arphdr))))
+ return NF_DROP;
+
if (arp_hdr(skb)->ar_pln != 4) {
if (IS_VLAN_ARP(skb))
nf_bridge_push_encap_header(skb);
diff --git a/net/bridge/br_nf_core.c b/net/bridge/br_nf_core.c
index 20cbb72..c217276 100644
--- a/net/bridge/br_nf_core.c
+++ b/net/bridge/br_nf_core.c
@@ -26,7 +26,8 @@
#endif
static void fake_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu)
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh)
{
}
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 100b4f8..35a670ec 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1876,7 +1876,7 @@ static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
}
static int ebt_buf_add(struct ebt_entries_buf_state *state,
- void *data, unsigned int sz)
+ const void *data, unsigned int sz)
{
if (state->buf_kern_start == NULL)
goto count_only;
@@ -1910,7 +1910,7 @@ enum compat_mwt {
EBT_COMPAT_TARGET,
};
-static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
+static int compat_mtw_from_user(const struct compat_ebt_entry_mwt *mwt,
enum compat_mwt compat_mwt,
struct ebt_entries_buf_state *state,
const unsigned char *base)
@@ -1986,22 +1986,23 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
/* return size of all matches, watchers or target, including necessary
* alignment and padding.
*/
-static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
+static int ebt_size_mwt(const struct compat_ebt_entry_mwt *match32,
unsigned int size_left, enum compat_mwt type,
struct ebt_entries_buf_state *state, const void *base)
{
+ const char *buf = (const char *)match32;
int growth = 0;
- char *buf;
if (size_left == 0)
return 0;
- buf = (char *) match32;
-
- while (size_left >= sizeof(*match32)) {
+ do {
struct ebt_entry_match *match_kern;
int ret;
+ if (size_left < sizeof(*match32))
+ return -EINVAL;
+
match_kern = (struct ebt_entry_match *) state->buf_kern_start;
if (match_kern) {
char *tmp;
@@ -2038,22 +2039,18 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
if (match_kern)
match_kern->match_size = ret;
- /* rule should have no remaining data after target */
- if (type == EBT_COMPAT_TARGET && size_left)
- return -EINVAL;
-
match32 = (struct compat_ebt_entry_mwt *) buf;
- }
+ } while (size_left);
return growth;
}
/* called for all ebt_entry structures. */
-static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
+static int size_entry_mwt(const struct ebt_entry *entry, const unsigned char *base,
unsigned int *total,
struct ebt_entries_buf_state *state)
{
- unsigned int i, j, startoff, new_offset = 0;
+ unsigned int i, j, startoff, next_expected_off, new_offset = 0;
/* stores match/watchers/targets & offset of next struct ebt_entry: */
unsigned int offsets[4];
unsigned int *offsets_update = NULL;
@@ -2140,11 +2137,13 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
return ret;
}
- startoff = state->buf_user_offset - startoff;
-
- if (WARN_ON(*total < startoff))
+ next_expected_off = state->buf_user_offset - startoff;
+ if (next_expected_off != entry->next_offset)
return -EINVAL;
- *total -= startoff;
+
+ if (*total < entry->next_offset)
+ return -EINVAL;
+ *total -= entry->next_offset;
return 0;
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 1ed3114..1fa68e0 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1103,7 +1103,7 @@ static void neigh_update_hhs(struct neighbour *neigh)
if (update) {
hh = &neigh->hh;
- if (hh->hh_len) {
+ if (READ_ONCE(hh->hh_len)) {
write_seqlock_bh(&hh->hh_lock);
update(hh, neigh->dev, neigh->ha);
write_sequnlock_bh(&hh->hh_lock);
@@ -1376,7 +1376,7 @@ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
struct net_device *dev = neigh->dev;
unsigned int seq;
- if (dev->header_ops->cache && !neigh->hh.hh_len)
+ if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len))
neigh_hh_init(neigh);
do {
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 144cd1a..069e3c4 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -274,6 +274,7 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
return ret;
}
+# ifdef CONFIG_HAVE_EBPF_JIT
static int
proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
@@ -284,6 +285,7 @@ proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
}
+# endif /* CONFIG_HAVE_EBPF_JIT */
static int
proc_dolongvec_minmax_bpf_restricted(struct ctl_table *table, int write,
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 0bd3afd0..ccc189b 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -118,7 +118,8 @@ static void dn_dst_ifdown(struct dst_entry *, struct net_device *dev, int how);
static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
static void dn_dst_link_failure(struct sk_buff *);
static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb , u32 mtu);
+ struct sk_buff *skb , u32 mtu,
+ bool confirm_neigh);
static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb);
static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
@@ -259,7 +260,8 @@ static int dn_dst_gc(struct dst_ops *ops)
* advertise to the other end).
*/
static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu)
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh)
{
struct dn_route *rt = (struct dn_route *) dst;
struct neighbour *n = rt->n;
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index eaeba9b..7e0e5f2 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -239,7 +239,12 @@ int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16
eth->h_proto = type;
memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
memcpy(eth->h_dest, neigh->ha, ETH_ALEN);
- hh->hh_len = ETH_HLEN;
+
+ /* Pairs with READ_ONCE() in neigh_resolve_output(),
+ * neigh_hh_output() and neigh_update_hhs().
+ */
+ smp_store_release(&hh->hh_len, ETH_HLEN);
+
return 0;
}
EXPORT_SYMBOL(eth_header_cache);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index f9d790b..995ef3d 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -254,10 +254,11 @@ bool icmp_global_allow(void)
bool rc = false;
/* Check if token bucket is empty and cannot be refilled
- * without taking the spinlock.
+ * without taking the spinlock. The READ_ONCE() are paired
+ * with the following WRITE_ONCE() in this same function.
*/
- if (!icmp_global.credit) {
- delta = min_t(u32, now - icmp_global.stamp, HZ);
+ if (!READ_ONCE(icmp_global.credit)) {
+ delta = min_t(u32, now - READ_ONCE(icmp_global.stamp), HZ);
if (delta < HZ / 50)
return false;
}
@@ -267,14 +268,14 @@ bool icmp_global_allow(void)
if (delta >= HZ / 50) {
incr = sysctl_icmp_msgs_per_sec * delta / HZ ;
if (incr)
- icmp_global.stamp = now;
+ WRITE_ONCE(icmp_global.stamp, now);
}
credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst);
if (credit) {
credit--;
rc = true;
}
- icmp_global.credit = credit;
+ WRITE_ONCE(icmp_global.credit, credit);
spin_unlock(&icmp_global.lock);
return rc;
}
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 91eceeb..6833894 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -1095,7 +1095,7 @@ struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
if (!dst)
goto out;
}
- dst->ops->update_pmtu(dst, sk, NULL, mtu);
+ dst->ops->update_pmtu(dst, sk, NULL, mtu, true);
dst = __sk_dst_check(sk, 0);
if (!dst)
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 33edccf..eb158ba 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -911,11 +911,12 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
struct inet_listen_hashbucket *ilb;
+ struct hlist_nulls_node *node;
num = 0;
ilb = &hashinfo->listening_hash[i];
spin_lock(&ilb->lock);
- sk_for_each(sk, &ilb->head) {
+ sk_nulls_for_each(sk, node, &ilb->nulls_head) {
struct inet_sock *inet = inet_sk(sk);
if (!net_eq(sock_net(sk), net))
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 1f26627c..0af13f5 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -219,9 +219,10 @@ struct sock *__inet_lookup_listener(struct net *net,
int score, hiscore = 0, matches = 0, reuseport = 0;
bool exact_dif = inet_exact_dif_match(net, skb);
struct sock *sk, *result = NULL;
+ struct hlist_nulls_node *node;
u32 phash = 0;
- sk_for_each_rcu(sk, &ilb->head) {
+ sk_nulls_for_each_rcu(sk, node, &ilb->nulls_head) {
score = compute_score(sk, net, hnum, daddr,
dif, sdif, exact_dif);
if (score > hiscore) {
@@ -442,10 +443,11 @@ static int inet_reuseport_add_sock(struct sock *sk,
struct inet_listen_hashbucket *ilb)
{
struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
+ const struct hlist_nulls_node *node;
struct sock *sk2;
kuid_t uid = sock_i_uid(sk);
- sk_for_each_rcu(sk2, &ilb->head) {
+ sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) {
if (sk2 != sk &&
sk2->sk_family == sk->sk_family &&
ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
@@ -480,9 +482,9 @@ int __inet_hash(struct sock *sk, struct sock *osk)
}
if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
sk->sk_family == AF_INET6)
- hlist_add_tail_rcu(&sk->sk_node, &ilb->head);
+ __sk_nulls_add_node_tail_rcu(sk, &ilb->nulls_head);
else
- hlist_add_head_rcu(&sk->sk_node, &ilb->head);
+ __sk_nulls_add_node_rcu(sk, &ilb->nulls_head);
sock_set_flag(sk, SOCK_RCU_FREE);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
unlock:
@@ -525,10 +527,7 @@ void inet_unhash(struct sock *sk)
spin_lock_bh(lock);
if (rcu_access_pointer(sk->sk_reuseport_cb))
reuseport_detach_sock(sk);
- if (listener)
- done = __sk_del_node_init(sk);
- else
- done = __sk_nulls_del_node_init_rcu(sk);
+ done = __sk_nulls_del_node_init_rcu(sk);
if (done)
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
spin_unlock_bh(lock);
@@ -664,7 +663,8 @@ void inet_hashinfo_init(struct inet_hashinfo *h)
for (i = 0; i < INET_LHTABLE_SIZE; i++) {
spin_lock_init(&h->listening_hash[i].lock);
- INIT_HLIST_HEAD(&h->listening_hash[i].head);
+ INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].nulls_head,
+ i + LISTENING_NULLS_BASE);
}
}
EXPORT_SYMBOL_GPL(inet_hashinfo_init);
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index f9cef27..f948814 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -159,7 +159,12 @@ static void inet_peer_gc(struct inet_peer_base *base,
base->total / inet_peer_threshold * HZ;
for (i = 0; i < gc_cnt; i++) {
p = gc_stack[i];
- delta = (__u32)jiffies - p->dtime;
+
+ /* The READ_ONCE() pairs with the WRITE_ONCE()
+ * in inet_putpeer()
+ */
+ delta = (__u32)jiffies - READ_ONCE(p->dtime);
+
if (delta < ttl || !refcount_dec_if_one(&p->refcnt))
gc_stack[i] = NULL;
}
@@ -236,7 +241,10 @@ EXPORT_SYMBOL_GPL(inet_getpeer);
void inet_putpeer(struct inet_peer *p)
{
- p->dtime = (__u32)jiffies;
+ /* The WRITE_ONCE() pairs with itself (we run lockless)
+ * and the READ_ONCE() in inet_peer_gc()
+ */
+ WRITE_ONCE(p->dtime, (__u32)jiffies);
if (refcount_dec_and_test(&p->refcnt))
call_rcu(&p->rcu, inetpeer_free_rcu);
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 7a31287..f178416 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -521,7 +521,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
else
mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
- skb_dst_update_pmtu(skb, mtu);
+ skb_dst_update_pmtu_no_confirm(skb, mtu);
if (skb->protocol == htons(ETH_P_IP)) {
if (!skb_is_gso(skb) &&
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index c07065b..08c15dd 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -244,7 +244,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
mtu = dst_mtu(dst);
if (skb->len > mtu) {
- skb_dst_update_pmtu(skb, mtu);
+ skb_dst_update_pmtu_no_confirm(skb, mtu);
if (skb->protocol == htons(ETH_P_IP)) {
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 356ae7d..e288489 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -394,10 +394,11 @@ next: ;
return 1;
}
-static inline int check_target(struct arpt_entry *e, const char *name)
+static int check_target(struct arpt_entry *e, struct net *net, const char *name)
{
struct xt_entry_target *t = arpt_get_target(e);
struct xt_tgchk_param par = {
+ .net = net,
.table = name,
.entryinfo = e,
.target = t->u.kernel.target,
@@ -409,8 +410,9 @@ static inline int check_target(struct arpt_entry *e, const char *name)
return xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false);
}
-static inline int
-find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
+static int
+find_check_entry(struct arpt_entry *e, struct net *net, const char *name,
+ unsigned int size,
struct xt_percpu_counter_alloc_state *alloc_state)
{
struct xt_entry_target *t;
@@ -429,7 +431,7 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
}
t->u.kernel.target = target;
- ret = check_target(e, name);
+ ret = check_target(e, net, name);
if (ret)
goto err;
return 0;
@@ -522,7 +524,9 @@ static inline void cleanup_entry(struct arpt_entry *e)
/* Checks and translates the user-supplied table segment (held in
* newinfo).
*/
-static int translate_table(struct xt_table_info *newinfo, void *entry0,
+static int translate_table(struct net *net,
+ struct xt_table_info *newinfo,
+ void *entry0,
const struct arpt_replace *repl)
{
struct xt_percpu_counter_alloc_state alloc_state = { 0 };
@@ -586,7 +590,7 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
/* Finally, each sanity check must pass */
i = 0;
xt_entry_foreach(iter, entry0, newinfo->size) {
- ret = find_check_entry(iter, repl->name, repl->size,
+ ret = find_check_entry(iter, net, repl->name, repl->size,
&alloc_state);
if (ret != 0)
break;
@@ -974,7 +978,7 @@ static int do_replace(struct net *net, const void __user *user,
goto free_newinfo;
}
- ret = translate_table(newinfo, loc_cpu_entry, &tmp);
+ ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
if (ret != 0)
goto free_newinfo;
@@ -1149,7 +1153,8 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
}
}
-static int translate_compat_table(struct xt_table_info **pinfo,
+static int translate_compat_table(struct net *net,
+ struct xt_table_info **pinfo,
void **pentry0,
const struct compat_arpt_replace *compatr)
{
@@ -1217,7 +1222,7 @@ static int translate_compat_table(struct xt_table_info **pinfo,
repl.num_counters = 0;
repl.counters = NULL;
repl.size = newinfo->size;
- ret = translate_table(newinfo, entry1, &repl);
+ ret = translate_table(net, newinfo, entry1, &repl);
if (ret)
goto free_newinfo;
@@ -1270,7 +1275,7 @@ static int compat_do_replace(struct net *net, void __user *user,
goto free_newinfo;
}
- ret = translate_compat_table(&newinfo, &loc_cpu_entry, &tmp);
+ ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
if (ret != 0)
goto free_newinfo;
@@ -1546,7 +1551,7 @@ int arpt_register_table(struct net *net,
loc_cpu_entry = newinfo->entries;
memcpy(loc_cpu_entry, repl->entries, repl->size);
- ret = translate_table(newinfo, loc_cpu_entry, repl);
+ ret = translate_table(net, newinfo, loc_cpu_entry, repl);
if (ret != 0)
goto out_free;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index de7f955..8b855d3 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -145,7 +145,8 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst);
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
static void ipv4_link_failure(struct sk_buff *skb);
static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu);
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh);
static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb);
static void ipv4_dst_destroy(struct dst_entry *dst);
@@ -1042,7 +1043,8 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
}
static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu)
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh)
{
struct rtable *rt = (struct rtable *) dst;
struct flowi4 fl4;
@@ -2529,7 +2531,8 @@ static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
}
static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu)
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh)
{
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 6988df0..5327fa6 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1751,8 +1751,11 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
}
/* Ignore very old stuff early */
- if (!after(sp[used_sacks].end_seq, prior_snd_una))
+ if (!after(sp[used_sacks].end_seq, prior_snd_una)) {
+ if (i == 0)
+ first_sack_index = -1;
continue;
+ }
used_sacks++;
}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 74299c0..4ee8edb 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1936,13 +1936,14 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
struct tcp_iter_state *st = seq->private;
struct net *net = seq_file_net(seq);
struct inet_listen_hashbucket *ilb;
+ struct hlist_nulls_node *node;
struct sock *sk = cur;
if (!sk) {
get_head:
ilb = &tcp_hashinfo.listening_hash[st->bucket];
spin_lock(&ilb->lock);
- sk = sk_head(&ilb->head);
+ sk = sk_nulls_head(&ilb->nulls_head);
st->offset = 0;
goto get_sk;
}
@@ -1950,9 +1951,9 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
++st->num;
++st->offset;
- sk = sk_next(sk);
+ sk = sk_nulls_next(sk);
get_sk:
- sk_for_each_from(sk) {
+ sk_nulls_for_each_from(sk, node) {
if (!net_eq(sock_net(sk), net))
continue;
if (sk->sk_family == st->family)
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 6ab68e8..2506960 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2380,6 +2380,14 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
if (tcp_small_queue_check(sk, skb, 0))
break;
+ /* Argh, we hit an empty skb(), presumably a thread
+ * is sleeping in sendmsg()/sk_stream_wait_memory().
+ * We do not want to send a pure-ack packet and have
+ * a strange looking rtx queue with empty packet(s).
+ */
+ if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq)
+ break;
+
if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
break;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index b2d2117..422744a 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1402,7 +1402,7 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
* queue contains some other skb
*/
rmem = atomic_add_return(size, &sk->sk_rmem_alloc);
- if (rmem > (size + sk->sk_rcvbuf))
+ if (rmem > (size + (unsigned int)sk->sk_rcvbuf))
goto uncharge_drop;
spin_lock(&list->lock);
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 5952dca..08f0022 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -222,12 +222,13 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
}
static void xfrm4_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu)
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh)
{
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
struct dst_entry *path = xdst->route;
- path->ops->update_pmtu(path, sk, skb, mtu);
+ path->ops->update_pmtu(path, sk, skb, mtu, confirm_neigh);
}
static void xfrm4_redirect(struct dst_entry *dst, struct sock *sk,
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 9a31d13..890adad 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -150,7 +150,7 @@ struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu)
if (IS_ERR(dst))
return NULL;
- dst->ops->update_pmtu(dst, sk, NULL, mtu);
+ dst->ops->update_pmtu(dst, sk, NULL, mtu, true);
dst = inet6_csk_route_socket(sk, &fl6);
return IS_ERR(dst) ? NULL : dst;
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 228983a..24a2197 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -137,9 +137,10 @@ struct sock *inet6_lookup_listener(struct net *net,
int score, hiscore = 0, matches = 0, reuseport = 0;
bool exact_dif = inet6_exact_dif_match(net, skb);
struct sock *sk, *result = NULL;
+ struct hlist_nulls_node *node;
u32 phash = 0;
- sk_for_each(sk, &ilb->head) {
+ sk_nulls_for_each(sk, node, &ilb->nulls_head) {
score = compute_score(sk, net, hnum, daddr, dif, sdif, exact_dif);
if (score > hiscore) {
reuseport = sk->sk_reuseport;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 4228f3b..726ba41 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -527,7 +527,7 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
/* TooBig packet may have updated dst->dev's mtu */
if (dst && dst_mtu(dst) > dst->dev->mtu)
- dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu);
+ dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu, false);
return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
NEXTHDR_GRE);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 067fc78..5bc2788 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -652,7 +652,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (rel_info > dst_mtu(skb_dst(skb2)))
goto out;
- skb_dst_update_pmtu(skb2, rel_info);
+ skb_dst_update_pmtu_no_confirm(skb2, rel_info);
}
if (rel_type == ICMP_REDIRECT)
skb_dst(skb2)->ops->redirect(skb_dst(skb2), NULL, skb2);
@@ -1138,7 +1138,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ?
IPV6_MIN_MTU : IPV4_MIN_MTU);
- skb_dst_update_pmtu(skb, mtu);
+ skb_dst_update_pmtu_no_confirm(skb, mtu);
if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
*pmtu = mtu;
err = -EMSGSIZE;
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 6b2416b..557fe38 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -483,7 +483,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
mtu = dst_mtu(dst);
if (skb->len > mtu) {
- skb_dst_update_pmtu(skb, mtu);
+ skb_dst_update_pmtu_no_confirm(skb, mtu);
if (skb->protocol == htons(ETH_P_IPV6)) {
if (mtu < IPV6_MIN_MTU)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 56624b9..f98c37a 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -93,7 +93,8 @@ static int ip6_pkt_prohibit(struct sk_buff *skb);
static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
static void ip6_link_failure(struct sk_buff *skb);
static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu);
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh);
static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb);
static void rt6_dst_from_metrics_check(struct rt6_info *rt);
@@ -264,7 +265,8 @@ static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
}
static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu)
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh)
{
}
@@ -1471,7 +1473,8 @@ static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
}
static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
- const struct ipv6hdr *iph, u32 mtu)
+ const struct ipv6hdr *iph, u32 mtu,
+ bool confirm_neigh)
{
const struct in6_addr *daddr, *saddr;
struct rt6_info *rt6 = (struct rt6_info *)dst;
@@ -1489,7 +1492,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
daddr = NULL;
saddr = NULL;
}
- dst_confirm_neigh(dst, daddr);
+
+ if (confirm_neigh)
+ dst_confirm_neigh(dst, daddr);
+
mtu = max_t(u32, mtu, IPV6_MIN_MTU);
if (mtu >= dst_mtu(dst))
return;
@@ -1518,9 +1524,11 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
}
static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu)
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh)
{
- __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
+ __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu,
+ confirm_neigh);
}
void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
@@ -1540,7 +1548,7 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
dst = ip6_route_output(net, NULL, &fl6);
if (!dst->error)
- __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
+ __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu), true);
dst_release(dst);
}
EXPORT_SYMBOL_GPL(ip6_update_pmtu);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index d2529c3..fb3f917 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -932,7 +932,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
}
if (tunnel->parms.iph.daddr)
- skb_dst_update_pmtu(skb, mtu);
+ skb_dst_update_pmtu_no_confirm(skb, mtu);
if (skb->len > mtu && !skb_is_gso(skb)) {
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index d6b0122..b0d80ce 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -219,12 +219,13 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
}
static void xfrm6_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu)
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh)
{
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
struct dst_entry *path = xdst->route;
- path->ops->update_pmtu(path, sk, skb, mtu);
+ path->ops->update_pmtu(path, sk, skb, mtu, confirm_neigh);
}
static void xfrm6_redirect(struct dst_entry *dst, struct sock *sk,
diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
index 204a835..c29170e 100644
--- a/net/llc/llc_station.c
+++ b/net/llc/llc_station.c
@@ -32,7 +32,7 @@ static int llc_stat_ev_rx_null_dsap_xid_c(struct sk_buff *skb)
return LLC_PDU_IS_CMD(pdu) && /* command PDU */
LLC_PDU_TYPE_IS_U(pdu) && /* U type PDU */
LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_XID &&
- !pdu->dsap ? 0 : 1; /* NULL DSAP value */
+ !pdu->dsap; /* NULL DSAP value */
}
static int llc_stat_ev_rx_null_dsap_test_c(struct sk_buff *skb)
@@ -42,7 +42,7 @@ static int llc_stat_ev_rx_null_dsap_test_c(struct sk_buff *skb)
return LLC_PDU_IS_CMD(pdu) && /* command PDU */
LLC_PDU_TYPE_IS_U(pdu) && /* U type PDU */
LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_TEST &&
- !pdu->dsap ? 0 : 1; /* NULL DSAP */
+ !pdu->dsap; /* NULL DSAP */
}
static int llc_station_ac_send_xid_r(struct sk_buff *skb)
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index b18466c..fbe7354 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -856,7 +856,8 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
I802_DEBUG_INC(local->dot11FailedCount);
}
- if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc) &&
+ if ((ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
+ ieee80211_has_pm(fc) &&
ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS) &&
!(info->flags & IEEE80211_TX_CTL_INJECTED) &&
local->ps_sdata && !(local->scanning)) {
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 94d74ec..c2b21c9 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1639,6 +1639,7 @@ static int ip_set_utest(struct net *net, struct sock *ctnl, struct sk_buff *skb,
struct ip_set *set;
struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {};
int ret = 0;
+ u32 lineno;
if (unlikely(protocol_failed(attr) ||
!attr[IPSET_ATTR_SETNAME] ||
@@ -1655,7 +1656,7 @@ static int ip_set_utest(struct net *net, struct sock *ctnl, struct sk_buff *skb,
return -IPSET_ERR_PROTOCOL;
rcu_read_lock_bh();
- ret = set->variant->uadt(set, tb, IPSET_TEST, NULL, 0, 0);
+ ret = set->variant->uadt(set, tb, IPSET_TEST, &lineno, 0, 0);
rcu_read_unlock_bh();
/* Userspace can't trigger element to be re-added */
if (ret == -EAGAIN)
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 4527921..97d4110 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -209,7 +209,7 @@ static inline void maybe_update_pmtu(int skb_af, struct sk_buff *skb, int mtu)
struct rtable *ort = skb_rtable(skb);
if (!skb->dev && sk && sk_fullsock(sk))
- ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu);
+ ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu, true);
}
static inline bool ensure_mtu_is_adequate(struct netns_ipvs *ipvs, int skb_af,
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 08b31b1..829b897 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -3437,6 +3437,9 @@ static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
list_for_each_entry(net, net_exit_list, exit_list)
ctnetlink_net_exit(net);
+
+ /* wait for other cpus until they are done with ctnl_notifiers */
+ synchronize_rcu();
}
static struct pernet_operations ctnetlink_net_ops = {
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 37efcc1..b06ef4c 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -138,7 +138,7 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
goto err;
}
- if (!skb_dst_force(skb) && state->hook != NF_INET_PRE_ROUTING) {
+ if (skb_dst(skb) && !skb_dst_force(skb)) {
status = -ENETDOWN;
goto err;
}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 7ef1264..91490446 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3917,14 +3917,20 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
if (nla[NFTA_SET_ELEM_DATA] == NULL &&
!(flags & NFT_SET_ELEM_INTERVAL_END))
return -EINVAL;
- if (nla[NFTA_SET_ELEM_DATA] != NULL &&
- flags & NFT_SET_ELEM_INTERVAL_END)
- return -EINVAL;
} else {
if (nla[NFTA_SET_ELEM_DATA] != NULL)
return -EINVAL;
}
+ if ((flags & NFT_SET_ELEM_INTERVAL_END) &&
+ (nla[NFTA_SET_ELEM_DATA] ||
+ nla[NFTA_SET_ELEM_OBJREF] ||
+ nla[NFTA_SET_ELEM_TIMEOUT] ||
+ nla[NFTA_SET_ELEM_EXPIRATION] ||
+ nla[NFTA_SET_ELEM_USERDATA] ||
+ nla[NFTA_SET_ELEM_EXPR]))
+ return -EINVAL;
+
timeout = 0;
if (nla[NFTA_SET_ELEM_TIMEOUT] != NULL) {
if (!(set->flags & NFT_SET_TIMEOUT))
diff --git a/net/nfc/nci/uart.c b/net/nfc/nci/uart.c
index 8d104c1..6f5addb 100644
--- a/net/nfc/nci/uart.c
+++ b/net/nfc/nci/uart.c
@@ -348,7 +348,7 @@ static int nci_uart_default_recv_buf(struct nci_uart *nu, const u8 *data,
nu->rx_packet_len = -1;
nu->rx_skb = nci_skb_alloc(nu->ndev,
NCI_MAX_PACKET_SIZE,
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!nu->rx_skb)
return -ENOMEM;
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 4799e37..5aa11d1 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -593,7 +593,8 @@ static int prb_calc_retire_blk_tmo(struct packet_sock *po,
msec = 1;
div = ecmd.base.speed / 1000;
}
- }
+ } else
+ return DEFAULT_PRB_RETIRE_TOV;
mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index d223a86e..fdca887 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -993,10 +993,13 @@ static void rfkill_sync_work(struct work_struct *work)
int __must_check rfkill_register(struct rfkill *rfkill)
{
static unsigned long rfkill_no;
- struct device *dev = &rfkill->dev;
+ struct device *dev;
int error;
- BUG_ON(!rfkill);
+ if (!rfkill)
+ return -EINVAL;
+
+ dev = &rfkill->dev;
mutex_lock(&rfkill_global_mutex);
@@ -1307,10 +1310,12 @@ static const struct file_operations rfkill_fops = {
.llseek = no_llseek,
};
+#define RFKILL_NAME "rfkill"
+
static struct miscdevice rfkill_miscdev = {
- .name = "rfkill",
.fops = &rfkill_fops,
- .minor = MISC_DYNAMIC_MINOR,
+ .name = RFKILL_NAME,
+ .minor = RFKILL_MINOR,
};
static int __init rfkill_init(void)
@@ -1362,3 +1367,6 @@ static void __exit rfkill_exit(void)
class_unregister(&rfkill_class);
}
module_exit(rfkill_exit);
+
+MODULE_ALIAS_MISCDEV(RFKILL_MINOR);
+MODULE_ALIAS("devname:" RFKILL_NAME);
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 7f74950..7d73e8c 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -150,6 +150,9 @@ void rxrpc_error_report(struct sock *sk)
struct rxrpc_peer *peer;
struct sk_buff *skb;
+ if (unlikely(!local))
+ return;
+
_enter("%p{%d}", sk, local->debug_id);
skb = sock_dequeue_err_skb(sk);
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index f50eb87..7a944f5 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -734,7 +734,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
if (tb[TCA_FQ_QUANTUM]) {
u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
- if (quantum > 0)
+ if (quantum > 0 && quantum <= (1 << 20))
q->quantum = quantum;
else
err = -EINVAL;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index cc997ac..a8fc70a 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -265,8 +265,14 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
struct prio_sched_data *q = qdisc_priv(sch);
unsigned long band = arg - 1;
- if (new == NULL)
- new = &noop_qdisc;
+ if (!new) {
+ new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+ TC_H_MAKE(sch->handle, arg));
+ if (!new)
+ new = &noop_qdisc;
+ else
+ qdisc_hash_add(new, true);
+ }
*old = qdisc_replace(sch, new, &q->queues[band]);
return 0;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 01f88e9..bf39f31 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -253,6 +253,7 @@ static void sctp_v4_from_skb(union sctp_addr *addr, struct sk_buff *skb,
sa->sin_port = sh->dest;
sa->sin_addr.s_addr = ip_hdr(skb)->daddr;
}
+ memset(sa->sin_zero, 0, sizeof(sa->sin_zero));
}
/* Initialize an sctp_addr from a socket. */
@@ -261,6 +262,7 @@ static void sctp_v4_from_sk(union sctp_addr *addr, struct sock *sk)
addr->v4.sin_family = AF_INET;
addr->v4.sin_port = 0;
addr->v4.sin_addr.s_addr = inet_sk(sk)->inet_rcv_saddr;
+ memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
}
/* Initialize sk->sk_rcv_saddr from sctp_addr. */
@@ -283,6 +285,7 @@ static void sctp_v4_from_addr_param(union sctp_addr *addr,
addr->v4.sin_family = AF_INET;
addr->v4.sin_port = port;
addr->v4.sin_addr.s_addr = param->v4.addr.s_addr;
+ memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
}
/* Initialize an address parameter from a sctp_addr and return the length
@@ -307,6 +310,7 @@ static void sctp_v4_dst_saddr(union sctp_addr *saddr, struct flowi4 *fl4,
saddr->v4.sin_family = AF_INET;
saddr->v4.sin_port = port;
saddr->v4.sin_addr.s_addr = fl4->saddr;
+ memset(saddr->v4.sin_zero, 0, sizeof(saddr->v4.sin_zero));
}
/* Compare two addresses exactly. */
@@ -329,6 +333,7 @@ static void sctp_v4_inaddr_any(union sctp_addr *addr, __be16 port)
addr->v4.sin_family = AF_INET;
addr->v4.sin_addr.s_addr = htonl(INADDR_ANY);
addr->v4.sin_port = port;
+ memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
}
/* Is this a wildcard address? */
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index bf0c61a..482bb0a 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1359,8 +1359,10 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
/* Generate an INIT ACK chunk. */
new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC,
0);
- if (!new_obj)
- goto nomem;
+ if (!new_obj) {
+ error = -ENOMEM;
+ break;
+ }
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(new_obj));
@@ -1382,7 +1384,8 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
if (!new_obj) {
if (cmd->obj.chunk)
sctp_chunk_free(cmd->obj.chunk);
- goto nomem;
+ error = -ENOMEM;
+ break;
}
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(new_obj));
@@ -1429,8 +1432,10 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
/* Generate a SHUTDOWN chunk. */
new_obj = sctp_make_shutdown(asoc, chunk);
- if (!new_obj)
- goto nomem;
+ if (!new_obj) {
+ error = -ENOMEM;
+ break;
+ }
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(new_obj));
break;
@@ -1760,11 +1765,17 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
break;
}
- if (error)
+ if (error) {
+ cmd = sctp_next_cmd(commands);
+ while (cmd) {
+ if (cmd->verb == SCTP_CMD_REPLY)
+ sctp_chunk_free(cmd->obj.chunk);
+ cmd = sctp_next_cmd(commands);
+ }
break;
+ }
}
-out:
/* If this is in response to a received chunk, wait until
* we are done with the packet to open the queue so that we don't
* send multiple packets in response to a single request.
@@ -1779,8 +1790,5 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
sp->data_ready_signalled = 0;
return error;
-nomem:
- error = -ENOMEM;
- goto out;
}
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 274df89..4c55b75 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -272,7 +272,7 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
pf->af->from_sk(&addr, sk);
pf->to_sk_daddr(&t->ipaddr, sk);
- dst->ops->update_pmtu(dst, sk, NULL, pmtu);
+ dst->ops->update_pmtu(dst, sk, NULL, pmtu, true);
pf->to_sk_daddr(&addr, sk);
dst = sctp_transport_dst_check(t);
diff --git a/net/socket.c b/net/socket.c
index a29179e..454033e 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -903,7 +903,7 @@ static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to)
.msg_iocb = iocb};
ssize_t res;
- if (file->f_flags & O_NONBLOCK)
+ if (file->f_flags & O_NONBLOCK || (iocb->ki_flags & IOCB_NOWAIT))
msg.msg_flags = MSG_DONTWAIT;
if (iocb->ki_pos != 0)
@@ -928,7 +928,7 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (iocb->ki_pos != 0)
return -ESPIPE;
- if (file->f_flags & O_NONBLOCK)
+ if (file->f_flags & O_NONBLOCK || (iocb->ki_flags & IOCB_NOWAIT))
msg.msg_flags = MSG_DONTWAIT;
if (sock->type == SOCK_SEQPACKET)
diff --git a/samples/bpf/cgroup_helpers.c b/samples/bpf/cgroup_helpers.c
index 09afadd..b5c09cd 100644
--- a/samples/bpf/cgroup_helpers.c
+++ b/samples/bpf/cgroup_helpers.c
@@ -43,7 +43,7 @@
*/
int setup_cgroup_environment(void)
{
- char cgroup_workdir[PATH_MAX + 1];
+ char cgroup_workdir[PATH_MAX - 24];
format_cgroup_path(cgroup_workdir, "");
diff --git a/samples/bpf/syscall_tp_kern.c b/samples/bpf/syscall_tp_kern.c
index 9149c52..8833aac 100644
--- a/samples/bpf/syscall_tp_kern.c
+++ b/samples/bpf/syscall_tp_kern.c
@@ -50,13 +50,27 @@ static __always_inline void count(void *map)
SEC("tracepoint/syscalls/sys_enter_open")
int trace_enter_open(struct syscalls_enter_open_args *ctx)
{
- count((void *)&enter_open_map);
+ count(&enter_open_map);
+ return 0;
+}
+
+SEC("tracepoint/syscalls/sys_enter_openat")
+int trace_enter_open_at(struct syscalls_enter_open_args *ctx)
+{
+ count(&enter_open_map);
return 0;
}
SEC("tracepoint/syscalls/sys_exit_open")
int trace_enter_exit(struct syscalls_exit_open_args *ctx)
{
- count((void *)&exit_open_map);
+ count(&exit_open_map);
+ return 0;
+}
+
+SEC("tracepoint/syscalls/sys_exit_openat")
+int trace_enter_exit_at(struct syscalls_exit_open_args *ctx)
+{
+ count(&exit_open_map);
return 0;
}
diff --git a/samples/bpf/trace_event_user.c b/samples/bpf/trace_event_user.c
index c7d525e..8c74458 100644
--- a/samples/bpf/trace_event_user.c
+++ b/samples/bpf/trace_event_user.c
@@ -34,9 +34,9 @@ static void print_ksym(__u64 addr)
return;
sym = ksym_search(addr);
printf("%s;", sym->name);
- if (!strcmp(sym->name, "sys_read"))
+ if (!strstr(sym->name, "sys_read"))
sys_read_seen = true;
- else if (!strcmp(sym->name, "sys_write"))
+ else if (!strstr(sym->name, "sys_write"))
sys_write_seen = true;
}
diff --git a/samples/pktgen/functions.sh b/samples/pktgen/functions.sh
index 205e4cd..065a7e2 100644
--- a/samples/pktgen/functions.sh
+++ b/samples/pktgen/functions.sh
@@ -5,6 +5,8 @@
# Author: Jesper Dangaaard Brouer
# License: GPL
+set -o errexit
+
## -- General shell logging cmds --
function err() {
local exitcode=$1
@@ -58,6 +60,7 @@
function proc_cmd() {
local result
local proc_file=$1
+ local status=0
# after shift, the remaining args are contained in $@
shift
local proc_ctrl=${PROC_DIR}/$proc_file
@@ -73,13 +76,13 @@
echo "cmd: $@ > $proc_ctrl"
fi
# Quoting of "$@" is important for space expansion
- echo "$@" > "$proc_ctrl"
- local status=$?
+ echo "$@" > "$proc_ctrl" || status=$?
- result=$(grep "Result: OK:" $proc_ctrl)
- # Due to pgctrl, cannot use exit code $? from grep
- if [[ "$result" == "" ]]; then
- grep "Result:" $proc_ctrl >&2
+ if [[ "$proc_file" != "pgctrl" ]]; then
+ result=$(grep "Result: OK:" $proc_ctrl) || true
+ if [[ "$result" == "" ]]; then
+ grep "Result:" $proc_ctrl >&2
+ fi
fi
if (( $status != 0 )); then
err 5 "Write error($status) occurred cmd: \"$@ > $proc_ctrl\""
@@ -105,6 +108,8 @@
fi
}
+[[ $EUID -eq 0 ]] && trap 'pg_ctrl "reset"' EXIT
+
## -- General shell tricks --
function root_check_run_with_sudo() {
diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins
index b2a95af..708c8f6 100644
--- a/scripts/Makefile.gcc-plugins
+++ b/scripts/Makefile.gcc-plugins
@@ -14,16 +14,12 @@
endif
ifdef CONFIG_GCC_PLUGIN_SANCOV
- ifeq ($(CFLAGS_KCOV),)
# It is needed because of the gcc-plugin.sh and gcc version checks.
gcc-plugin-$(CONFIG_GCC_PLUGIN_SANCOV) += sancov_plugin.so
- ifneq ($(PLUGINCC),)
- CFLAGS_KCOV := $(SANCOV_PLUGIN)
- else
+ ifeq ($(PLUGINCC),)
$(warning warning: cannot use CONFIG_KCOV: -fsanitize-coverage=trace-pc is not supported by compiler)
endif
- endif
endif
gcc-plugin-$(CONFIG_GCC_PLUGIN_STRUCTLEAK) += structleak_plugin.so
@@ -38,7 +34,7 @@
GCC_PLUGINS_CFLAGS := $(strip $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y)) $(gcc-plugin-cflags-y))
export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGIN GCC_PLUGIN_SUBDIR
- export SANCOV_PLUGIN DISABLE_LATENT_ENTROPY_PLUGIN
+ export DISABLE_LATENT_ENTROPY_PLUGIN
ifneq ($(PLUGINCC),)
# SANCOV_PLUGIN can be only in CFLAGS_KCOV because avoid duplication.
diff --git a/scripts/Makefile.kcov b/scripts/Makefile.kcov
new file mode 100644
index 0000000..945724d2
--- /dev/null
+++ b/scripts/Makefile.kcov
@@ -0,0 +1,25 @@
+ifdef CONFIG_KCOV
+
+ifeq ($(call cc-option, -fsanitize-coverage=trace-pc -Werror),)
+ ifneq ($(CONFIG_COMPILE_TEST),y)
+ $(error Cannot use CONFIG_KCOV: \
+ -fsanitize-coverage=trace-pc is not supported by compiler)
+ endif
+endif
+
+ifdef CONFIG_KCOV_ENABLE_COMPARISONS
+ ifeq ($(call cc-option, -fsanitize-coverage=trace-cmp -Werror),)
+ ifneq ($(CONFIG_COMPILE_TEST),y)
+ $(error Cannot use CONFIG_KCOV_ENABLE_COMPARISONS: \
+ -fsanitize-coverage=trace-cmp is not supported by compiler)
+ endif
+ endif
+endif
+
+kcov-flags-$(CONFIG_CC_HAS_SANCOV_TRACE_PC) += -fsanitize-coverage=trace-pc
+kcov-flags-$(CONFIG_KCOV_ENABLE_COMPARISONS) += -fsanitize-coverage=trace-cmp
+kcov-flags-$(CONFIG_GCC_PLUGIN_SANCOV) += -fplugin=$(objtree)/scripts/gcc-plugins/sancov_plugin.so
+
+export CFLAGS_KCOV := $(kcov-flags-y)
+
+endif
diff --git a/scripts/gcc-plugins/Makefile b/scripts/gcc-plugins/Makefile
index e2ff425..ea46579 100644
--- a/scripts/gcc-plugins/Makefile
+++ b/scripts/gcc-plugins/Makefile
@@ -13,10 +13,6 @@
export HOST_EXTRACXXFLAGS
endif
-ifneq ($(CFLAGS_KCOV), $(SANCOV_PLUGIN))
- GCC_PLUGIN := $(filter-out $(SANCOV_PLUGIN), $(GCC_PLUGIN))
-endif
-
export HOSTLIBS
$(obj)/randomize_layout_plugin.o: $(objtree)/$(obj)/randomize_layout_seed.h
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index b471022..b435318 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -510,6 +510,8 @@ static void build_initial_tok_table(void)
table[pos] = table[i];
learn_symbol(table[pos].sym, table[pos].len);
pos++;
+ } else {
+ free(table[i].sym);
}
}
table_cnt = pos;
diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
index ed29bad..96420b6 100644
--- a/scripts/kconfig/expr.c
+++ b/scripts/kconfig/expr.c
@@ -201,6 +201,13 @@ static int expr_eq(struct expr *e1, struct expr *e2)
{
int res, old_count;
+ /*
+ * A NULL expr is taken to be yes, but there's also a different way to
+ * represent yes. expr_is_yes() checks for either representation.
+ */
+ if (!e1 || !e2)
+ return expr_is_yes(e1) && expr_is_yes(e2);
+
if (e1->type != e2->type)
return 0;
switch (e1->type) {
diff --git a/security/apparmor/label.c b/security/apparmor/label.c
index c5b99b9..ea63710 100644
--- a/security/apparmor/label.c
+++ b/security/apparmor/label.c
@@ -1463,11 +1463,13 @@ static inline bool use_label_hname(struct aa_ns *ns, struct aa_label *label,
/* helper macro for snprint routines */
#define update_for_len(total, len, size, str) \
do { \
+ size_t ulen = len; \
+ \
AA_BUG(len < 0); \
- total += len; \
- len = min(len, size); \
- size -= len; \
- str += len; \
+ total += ulen; \
+ ulen = min(ulen, size); \
+ size -= ulen; \
+ str += ulen; \
} while (0)
/**
@@ -1602,7 +1604,7 @@ int aa_label_snxprint(char *str, size_t size, struct aa_ns *ns,
struct aa_ns *prev_ns = NULL;
struct label_it i;
int count = 0, total = 0;
- size_t len;
+ ssize_t len;
AA_BUG(!str && size != 0);
AA_BUG(!label);
diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig
index 8af7a69..8297e48 100644
--- a/security/selinux/Kconfig
+++ b/security/selinux/Kconfig
@@ -99,3 +99,15 @@
via /selinux/checkreqprot if authorized by policy.
If you are unsure how to answer this question, answer 0.
+
+config SECURITY_SELINUX_SIDTAB_HASH_BITS
+ int "NSA SELinux sidtab hashtable size"
+ depends on SECURITY_SELINUX
+ range 8 13
+ default 9
+ help
+ This option sets the number of buckets used in the sidtab hashtable
+ to 2^SECURITY_SELINUX_SIDTAB_HASH_BITS buckets. The number of hash
+ collisions may be viewed at /sys/fs/selinux/ss/sidtab_hash_stats. If
+ chain lengths are high (e.g. > 20) then selecting a higher value here
+ will ensure that lookups times are short and stable.
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 491ede7..245e707 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -82,14 +82,42 @@ struct avc_callback_node {
struct avc_callback_node *next;
};
-/* Exported via selinufs */
-unsigned int avc_cache_threshold = AVC_DEF_CACHE_THRESHOLD;
-
#ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
DEFINE_PER_CPU(struct avc_cache_stats, avc_cache_stats) = { 0 };
#endif
-static struct avc_cache avc_cache;
+struct selinux_avc {
+ unsigned int avc_cache_threshold;
+ struct avc_cache avc_cache;
+};
+
+static struct selinux_avc selinux_avc;
+
+void selinux_avc_init(struct selinux_avc **avc)
+{
+ int i;
+
+ selinux_avc.avc_cache_threshold = AVC_DEF_CACHE_THRESHOLD;
+ for (i = 0; i < AVC_CACHE_SLOTS; i++) {
+ INIT_HLIST_HEAD(&selinux_avc.avc_cache.slots[i]);
+ spin_lock_init(&selinux_avc.avc_cache.slots_lock[i]);
+ }
+ atomic_set(&selinux_avc.avc_cache.active_nodes, 0);
+ atomic_set(&selinux_avc.avc_cache.lru_hint, 0);
+ *avc = &selinux_avc;
+}
+
+unsigned int avc_get_cache_threshold(struct selinux_avc *avc)
+{
+ return avc->avc_cache_threshold;
+}
+
+void avc_set_cache_threshold(struct selinux_avc *avc,
+ unsigned int cache_threshold)
+{
+ avc->avc_cache_threshold = cache_threshold;
+}
+
static struct avc_callback_node *avc_callbacks;
static struct kmem_cache *avc_node_cachep;
static struct kmem_cache *avc_xperms_data_cachep;
@@ -143,13 +171,14 @@ static void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av)
* @tsid: target security identifier
* @tclass: target security class
*/
-static void avc_dump_query(struct audit_buffer *ab, u32 ssid, u32 tsid, u16 tclass)
+static void avc_dump_query(struct audit_buffer *ab, struct selinux_state *state,
+ u32 ssid, u32 tsid, u16 tclass)
{
int rc;
char *scontext;
u32 scontext_len;
- rc = security_sid_to_context(ssid, &scontext, &scontext_len);
+ rc = security_sid_to_context(state, ssid, &scontext, &scontext_len);
if (rc)
audit_log_format(ab, "ssid=%d", ssid);
else {
@@ -157,7 +186,7 @@ static void avc_dump_query(struct audit_buffer *ab, u32 ssid, u32 tsid, u16 tcla
kfree(scontext);
}
- rc = security_sid_to_context(tsid, &scontext, &scontext_len);
+ rc = security_sid_to_context(state, tsid, &scontext, &scontext_len);
if (rc)
audit_log_format(ab, " tsid=%d", tsid);
else {
@@ -176,15 +205,6 @@ static void avc_dump_query(struct audit_buffer *ab, u32 ssid, u32 tsid, u16 tcla
*/
void __init avc_init(void)
{
- int i;
-
- for (i = 0; i < AVC_CACHE_SLOTS; i++) {
- INIT_HLIST_HEAD(&avc_cache.slots[i]);
- spin_lock_init(&avc_cache.slots_lock[i]);
- }
- atomic_set(&avc_cache.active_nodes, 0);
- atomic_set(&avc_cache.lru_hint, 0);
-
avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node),
0, SLAB_PANIC, NULL);
avc_xperms_cachep = kmem_cache_create("avc_xperms_node",
@@ -199,7 +219,7 @@ void __init avc_init(void)
0, SLAB_PANIC, NULL);
}
-int avc_get_hash_stats(char *page)
+int avc_get_hash_stats(struct selinux_avc *avc, char *page)
{
int i, chain_len, max_chain_len, slots_used;
struct avc_node *node;
@@ -210,7 +230,7 @@ int avc_get_hash_stats(char *page)
slots_used = 0;
max_chain_len = 0;
for (i = 0; i < AVC_CACHE_SLOTS; i++) {
- head = &avc_cache.slots[i];
+ head = &avc->avc_cache.slots[i];
if (!hlist_empty(head)) {
slots_used++;
chain_len = 0;
@@ -225,7 +245,7 @@ int avc_get_hash_stats(char *page)
return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
"longest chain: %d\n",
- atomic_read(&avc_cache.active_nodes),
+ atomic_read(&avc->avc_cache.active_nodes),
slots_used, AVC_CACHE_SLOTS, max_chain_len);
}
@@ -464,11 +484,12 @@ static inline u32 avc_xperms_audit_required(u32 requested,
return audited;
}
-static inline int avc_xperms_audit(u32 ssid, u32 tsid, u16 tclass,
- u32 requested, struct av_decision *avd,
- struct extended_perms_decision *xpd,
- u8 perm, int result,
- struct common_audit_data *ad)
+static inline int avc_xperms_audit(struct selinux_state *state,
+ u32 ssid, u32 tsid, u16 tclass,
+ u32 requested, struct av_decision *avd,
+ struct extended_perms_decision *xpd,
+ u8 perm, int result,
+ struct common_audit_data *ad)
{
u32 audited, denied;
@@ -476,7 +497,7 @@ static inline int avc_xperms_audit(u32 ssid, u32 tsid, u16 tclass,
requested, avd, xpd, perm, result, &denied);
if (likely(!audited))
return 0;
- return slow_avc_audit(ssid, tsid, tclass, requested,
+ return slow_avc_audit(state, ssid, tsid, tclass, requested,
audited, denied, result, ad, 0);
}
@@ -488,29 +509,30 @@ static void avc_node_free(struct rcu_head *rhead)
avc_cache_stats_incr(frees);
}
-static void avc_node_delete(struct avc_node *node)
+static void avc_node_delete(struct selinux_avc *avc, struct avc_node *node)
{
hlist_del_rcu(&node->list);
call_rcu(&node->rhead, avc_node_free);
- atomic_dec(&avc_cache.active_nodes);
+ atomic_dec(&avc->avc_cache.active_nodes);
}
-static void avc_node_kill(struct avc_node *node)
+static void avc_node_kill(struct selinux_avc *avc, struct avc_node *node)
{
avc_xperms_free(node->ae.xp_node);
kmem_cache_free(avc_node_cachep, node);
avc_cache_stats_incr(frees);
- atomic_dec(&avc_cache.active_nodes);
+ atomic_dec(&avc->avc_cache.active_nodes);
}
-static void avc_node_replace(struct avc_node *new, struct avc_node *old)
+static void avc_node_replace(struct selinux_avc *avc,
+ struct avc_node *new, struct avc_node *old)
{
hlist_replace_rcu(&old->list, &new->list);
call_rcu(&old->rhead, avc_node_free);
- atomic_dec(&avc_cache.active_nodes);
+ atomic_dec(&avc->avc_cache.active_nodes);
}
-static inline int avc_reclaim_node(void)
+static inline int avc_reclaim_node(struct selinux_avc *avc)
{
struct avc_node *node;
int hvalue, try, ecx;
@@ -519,16 +541,17 @@ static inline int avc_reclaim_node(void)
spinlock_t *lock;
for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) {
- hvalue = atomic_inc_return(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1);
- head = &avc_cache.slots[hvalue];
- lock = &avc_cache.slots_lock[hvalue];
+ hvalue = atomic_inc_return(&avc->avc_cache.lru_hint) &
+ (AVC_CACHE_SLOTS - 1);
+ head = &avc->avc_cache.slots[hvalue];
+ lock = &avc->avc_cache.slots_lock[hvalue];
if (!spin_trylock_irqsave(lock, flags))
continue;
rcu_read_lock();
hlist_for_each_entry(node, head, list) {
- avc_node_delete(node);
+ avc_node_delete(avc, node);
avc_cache_stats_incr(reclaims);
ecx++;
if (ecx >= AVC_CACHE_RECLAIM) {
@@ -544,7 +567,7 @@ static inline int avc_reclaim_node(void)
return ecx;
}
-static struct avc_node *avc_alloc_node(void)
+static struct avc_node *avc_alloc_node(struct selinux_avc *avc)
{
struct avc_node *node;
@@ -555,8 +578,9 @@ static struct avc_node *avc_alloc_node(void)
INIT_HLIST_NODE(&node->list);
avc_cache_stats_incr(allocations);
- if (atomic_inc_return(&avc_cache.active_nodes) > avc_cache_threshold)
- avc_reclaim_node();
+ if (atomic_inc_return(&avc->avc_cache.active_nodes) >
+ avc->avc_cache_threshold)
+ avc_reclaim_node(avc);
out:
return node;
@@ -570,14 +594,15 @@ static void avc_node_populate(struct avc_node *node, u32 ssid, u32 tsid, u16 tcl
memcpy(&node->ae.avd, avd, sizeof(node->ae.avd));
}
-static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass)
+static inline struct avc_node *avc_search_node(struct selinux_avc *avc,
+ u32 ssid, u32 tsid, u16 tclass)
{
struct avc_node *node, *ret = NULL;
int hvalue;
struct hlist_head *head;
hvalue = avc_hash(ssid, tsid, tclass);
- head = &avc_cache.slots[hvalue];
+ head = &avc->avc_cache.slots[hvalue];
hlist_for_each_entry_rcu(node, head, list) {
if (ssid == node->ae.ssid &&
tclass == node->ae.tclass &&
@@ -602,12 +627,13 @@ static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass)
* then this function returns the avc_node.
* Otherwise, this function returns NULL.
*/
-static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass)
+static struct avc_node *avc_lookup(struct selinux_avc *avc,
+ u32 ssid, u32 tsid, u16 tclass)
{
struct avc_node *node;
avc_cache_stats_incr(lookups);
- node = avc_search_node(ssid, tsid, tclass);
+ node = avc_search_node(avc, ssid, tsid, tclass);
if (node)
return node;
@@ -616,7 +642,8 @@ static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass)
return NULL;
}
-static int avc_latest_notif_update(int seqno, int is_insert)
+static int avc_latest_notif_update(struct selinux_avc *avc,
+ int seqno, int is_insert)
{
int ret = 0;
static DEFINE_SPINLOCK(notif_lock);
@@ -624,14 +651,14 @@ static int avc_latest_notif_update(int seqno, int is_insert)
spin_lock_irqsave(¬if_lock, flag);
if (is_insert) {
- if (seqno < avc_cache.latest_notif) {
+ if (seqno < avc->avc_cache.latest_notif) {
printk(KERN_WARNING "SELinux: avc: seqno %d < latest_notif %d\n",
- seqno, avc_cache.latest_notif);
+ seqno, avc->avc_cache.latest_notif);
ret = -EAGAIN;
}
} else {
- if (seqno > avc_cache.latest_notif)
- avc_cache.latest_notif = seqno;
+ if (seqno > avc->avc_cache.latest_notif)
+ avc->avc_cache.latest_notif = seqno;
}
spin_unlock_irqrestore(¬if_lock, flag);
@@ -656,18 +683,19 @@ static int avc_latest_notif_update(int seqno, int is_insert)
* the access vectors into a cache entry, returns
* avc_node inserted. Otherwise, this function returns NULL.
*/
-static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass,
- struct av_decision *avd,
- struct avc_xperms_node *xp_node)
+static struct avc_node *avc_insert(struct selinux_avc *avc,
+ u32 ssid, u32 tsid, u16 tclass,
+ struct av_decision *avd,
+ struct avc_xperms_node *xp_node)
{
struct avc_node *pos, *node = NULL;
int hvalue;
unsigned long flag;
- if (avc_latest_notif_update(avd->seqno, 1))
+ if (avc_latest_notif_update(avc, avd->seqno, 1))
goto out;
- node = avc_alloc_node();
+ node = avc_alloc_node(avc);
if (node) {
struct hlist_head *head;
spinlock_t *lock;
@@ -680,15 +708,15 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass,
kmem_cache_free(avc_node_cachep, node);
return NULL;
}
- head = &avc_cache.slots[hvalue];
- lock = &avc_cache.slots_lock[hvalue];
+ head = &avc->avc_cache.slots[hvalue];
+ lock = &avc->avc_cache.slots_lock[hvalue];
spin_lock_irqsave(lock, flag);
hlist_for_each_entry(pos, head, list) {
if (pos->ae.ssid == ssid &&
pos->ae.tsid == tsid &&
pos->ae.tclass == tclass) {
- avc_node_replace(node, pos);
+ avc_node_replace(avc, node, pos);
goto found;
}
}
@@ -726,9 +754,10 @@ static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
{
struct common_audit_data *ad = a;
audit_log_format(ab, " ");
- avc_dump_query(ab, ad->selinux_audit_data->ssid,
- ad->selinux_audit_data->tsid,
- ad->selinux_audit_data->tclass);
+ avc_dump_query(ab, ad->selinux_audit_data->state,
+ ad->selinux_audit_data->ssid,
+ ad->selinux_audit_data->tsid,
+ ad->selinux_audit_data->tclass);
if (ad->selinux_audit_data->denied) {
audit_log_format(ab, " permissive=%u",
ad->selinux_audit_data->result ? 0 : 1);
@@ -736,10 +765,11 @@ static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
}
/* This is the slow part of avc audit with big stack footprint */
-noinline int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass,
- u32 requested, u32 audited, u32 denied, int result,
- struct common_audit_data *a,
- unsigned flags)
+noinline int slow_avc_audit(struct selinux_state *state,
+ u32 ssid, u32 tsid, u16 tclass,
+ u32 requested, u32 audited, u32 denied, int result,
+ struct common_audit_data *a,
+ unsigned int flags)
{
struct common_audit_data stack_data;
struct selinux_audit_data sad;
@@ -767,6 +797,7 @@ noinline int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass,
sad.audited = audited;
sad.denied = denied;
sad.result = result;
+ sad.state = state;
a->selinux_audit_data = &sad;
@@ -815,10 +846,11 @@ int __init avc_add_callback(int (*callback)(u32 event), u32 events)
* otherwise, this function updates the AVC entry. The original AVC-entry object
* will release later by RCU.
*/
-static int avc_update_node(u32 event, u32 perms, u8 driver, u8 xperm, u32 ssid,
- u32 tsid, u16 tclass, u32 seqno,
- struct extended_perms_decision *xpd,
- u32 flags)
+static int avc_update_node(struct selinux_avc *avc,
+ u32 event, u32 perms, u8 driver, u8 xperm, u32 ssid,
+ u32 tsid, u16 tclass, u32 seqno,
+ struct extended_perms_decision *xpd,
+ u32 flags)
{
int hvalue, rc = 0;
unsigned long flag;
@@ -826,7 +858,7 @@ static int avc_update_node(u32 event, u32 perms, u8 driver, u8 xperm, u32 ssid,
struct hlist_head *head;
spinlock_t *lock;
- node = avc_alloc_node();
+ node = avc_alloc_node(avc);
if (!node) {
rc = -ENOMEM;
goto out;
@@ -835,8 +867,8 @@ static int avc_update_node(u32 event, u32 perms, u8 driver, u8 xperm, u32 ssid,
/* Lock the target slot */
hvalue = avc_hash(ssid, tsid, tclass);
- head = &avc_cache.slots[hvalue];
- lock = &avc_cache.slots_lock[hvalue];
+ head = &avc->avc_cache.slots[hvalue];
+ lock = &avc->avc_cache.slots_lock[hvalue];
spin_lock_irqsave(lock, flag);
@@ -852,7 +884,7 @@ static int avc_update_node(u32 event, u32 perms, u8 driver, u8 xperm, u32 ssid,
if (!orig) {
rc = -ENOENT;
- avc_node_kill(node);
+ avc_node_kill(avc, node);
goto out_unlock;
}
@@ -896,7 +928,7 @@ static int avc_update_node(u32 event, u32 perms, u8 driver, u8 xperm, u32 ssid,
avc_add_xperms_decision(node, xpd);
break;
}
- avc_node_replace(node, orig);
+ avc_node_replace(avc, node, orig);
out_unlock:
spin_unlock_irqrestore(lock, flag);
out:
@@ -906,7 +938,7 @@ static int avc_update_node(u32 event, u32 perms, u8 driver, u8 xperm, u32 ssid,
/**
* avc_flush - Flush the cache
*/
-static void avc_flush(void)
+static void avc_flush(struct selinux_avc *avc)
{
struct hlist_head *head;
struct avc_node *node;
@@ -915,8 +947,8 @@ static void avc_flush(void)
int i;
for (i = 0; i < AVC_CACHE_SLOTS; i++) {
- head = &avc_cache.slots[i];
- lock = &avc_cache.slots_lock[i];
+ head = &avc->avc_cache.slots[i];
+ lock = &avc->avc_cache.slots_lock[i];
spin_lock_irqsave(lock, flag);
/*
@@ -925,7 +957,7 @@ static void avc_flush(void)
*/
rcu_read_lock();
hlist_for_each_entry(node, head, list)
- avc_node_delete(node);
+ avc_node_delete(avc, node);
rcu_read_unlock();
spin_unlock_irqrestore(lock, flag);
}
@@ -935,12 +967,12 @@ static void avc_flush(void)
* avc_ss_reset - Flush the cache and revalidate migrated permissions.
* @seqno: policy sequence number
*/
-int avc_ss_reset(u32 seqno)
+int avc_ss_reset(struct selinux_avc *avc, u32 seqno)
{
struct avc_callback_node *c;
int rc = 0, tmprc;
- avc_flush();
+ avc_flush(avc);
for (c = avc_callbacks; c; c = c->next) {
if (c->events & AVC_CALLBACK_RESET) {
@@ -952,7 +984,7 @@ int avc_ss_reset(u32 seqno)
}
}
- avc_latest_notif_update(seqno, 0);
+ avc_latest_notif_update(avc, seqno, 0);
return rc;
}
@@ -965,30 +997,34 @@ int avc_ss_reset(u32 seqno)
* Don't inline this, since it's the slow-path and just
* results in a bigger stack frame.
*/
-static noinline struct avc_node *avc_compute_av(u32 ssid, u32 tsid,
- u16 tclass, struct av_decision *avd,
- struct avc_xperms_node *xp_node)
+static noinline
+struct avc_node *avc_compute_av(struct selinux_state *state,
+ u32 ssid, u32 tsid,
+ u16 tclass, struct av_decision *avd,
+ struct avc_xperms_node *xp_node)
{
rcu_read_unlock();
INIT_LIST_HEAD(&xp_node->xpd_head);
- security_compute_av(ssid, tsid, tclass, avd, &xp_node->xp);
+ security_compute_av(state, ssid, tsid, tclass, avd, &xp_node->xp);
rcu_read_lock();
- return avc_insert(ssid, tsid, tclass, avd, xp_node);
+ return avc_insert(state->avc, ssid, tsid, tclass, avd, xp_node);
}
-static noinline int avc_denied(u32 ssid, u32 tsid,
- u16 tclass, u32 requested,
- u8 driver, u8 xperm, unsigned flags,
- struct av_decision *avd)
+static noinline int avc_denied(struct selinux_state *state,
+ u32 ssid, u32 tsid,
+ u16 tclass, u32 requested,
+ u8 driver, u8 xperm, unsigned int flags,
+ struct av_decision *avd)
{
if (flags & AVC_STRICT)
return -EACCES;
- if (selinux_enforcing && !(avd->flags & AVD_FLAGS_PERMISSIVE))
+ if (enforcing_enabled(state) &&
+ !(avd->flags & AVD_FLAGS_PERMISSIVE))
return -EACCES;
- avc_update_node(AVC_CALLBACK_GRANT, requested, driver, xperm, ssid,
- tsid, tclass, avd->seqno, NULL, flags);
+ avc_update_node(state->avc, AVC_CALLBACK_GRANT, requested, driver,
+ xperm, ssid, tsid, tclass, avd->seqno, NULL, flags);
return 0;
}
@@ -999,8 +1035,9 @@ static noinline int avc_denied(u32 ssid, u32 tsid,
* as-is the case with ioctls, then multiple may be chained together and the
* driver field is used to specify which set contains the permission.
*/
-int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested,
- u8 driver, u8 xperm, struct common_audit_data *ad)
+int avc_has_extended_perms(struct selinux_state *state,
+ u32 ssid, u32 tsid, u16 tclass, u32 requested,
+ u8 driver, u8 xperm, struct common_audit_data *ad)
{
struct avc_node *node;
struct av_decision avd;
@@ -1019,9 +1056,9 @@ int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested,
rcu_read_lock();
- node = avc_lookup(ssid, tsid, tclass);
+ node = avc_lookup(state->avc, ssid, tsid, tclass);
if (unlikely(!node)) {
- node = avc_compute_av(ssid, tsid, tclass, &avd, xp_node);
+ node = avc_compute_av(state, ssid, tsid, tclass, &avd, xp_node);
} else {
memcpy(&avd, &node->ae.avd, sizeof(avd));
xp_node = node->ae.xp_node;
@@ -1045,11 +1082,12 @@ int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested,
goto decision;
}
rcu_read_unlock();
- security_compute_xperms_decision(ssid, tsid, tclass, driver,
- &local_xpd);
+ security_compute_xperms_decision(state, ssid, tsid, tclass,
+ driver, &local_xpd);
rcu_read_lock();
- avc_update_node(AVC_CALLBACK_ADD_XPERMS, requested, driver, xperm,
- ssid, tsid, tclass, avd.seqno, &local_xpd, 0);
+ avc_update_node(state->avc, AVC_CALLBACK_ADD_XPERMS, requested,
+ driver, xperm, ssid, tsid, tclass, avd.seqno,
+ &local_xpd, 0);
} else {
avc_quick_copy_xperms_decision(xperm, &local_xpd, xpd);
}
@@ -1061,12 +1099,12 @@ int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested,
decision:
denied = requested & ~(avd.allowed);
if (unlikely(denied))
- rc = avc_denied(ssid, tsid, tclass, requested, driver, xperm,
- AVC_EXTENDED_PERMS, &avd);
+ rc = avc_denied(state, ssid, tsid, tclass, requested,
+ driver, xperm, AVC_EXTENDED_PERMS, &avd);
rcu_read_unlock();
- rc2 = avc_xperms_audit(ssid, tsid, tclass, requested,
+ rc2 = avc_xperms_audit(state, ssid, tsid, tclass, requested,
&avd, xpd, xperm, rc, ad);
if (rc2)
return rc2;
@@ -1093,10 +1131,11 @@ int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested,
* auditing, e.g. in cases where a lock must be held for the check but
* should be released for the auditing.
*/
-inline int avc_has_perm_noaudit(u32 ssid, u32 tsid,
- u16 tclass, u32 requested,
- unsigned flags,
- struct av_decision *avd)
+inline int avc_has_perm_noaudit(struct selinux_state *state,
+ u32 ssid, u32 tsid,
+ u16 tclass, u32 requested,
+ unsigned int flags,
+ struct av_decision *avd)
{
struct avc_node *node;
struct avc_xperms_node xp_node;
@@ -1107,15 +1146,16 @@ inline int avc_has_perm_noaudit(u32 ssid, u32 tsid,
rcu_read_lock();
- node = avc_lookup(ssid, tsid, tclass);
+ node = avc_lookup(state->avc, ssid, tsid, tclass);
if (unlikely(!node))
- node = avc_compute_av(ssid, tsid, tclass, avd, &xp_node);
+ node = avc_compute_av(state, ssid, tsid, tclass, avd, &xp_node);
else
memcpy(avd, &node->ae.avd, sizeof(*avd));
denied = requested & ~(avd->allowed);
if (unlikely(denied))
- rc = avc_denied(ssid, tsid, tclass, requested, 0, 0, flags, avd);
+ rc = avc_denied(state, ssid, tsid, tclass, requested, 0, 0,
+ flags, avd);
rcu_read_unlock();
return rc;
@@ -1137,39 +1177,43 @@ inline int avc_has_perm_noaudit(u32 ssid, u32 tsid,
* permissions are granted, -%EACCES if any permissions are denied, or
* another -errno upon other errors.
*/
-int avc_has_perm(u32 ssid, u32 tsid, u16 tclass,
+int avc_has_perm(struct selinux_state *state, u32 ssid, u32 tsid, u16 tclass,
u32 requested, struct common_audit_data *auditdata)
{
struct av_decision avd;
int rc, rc2;
- rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd);
+ rc = avc_has_perm_noaudit(state, ssid, tsid, tclass, requested, 0,
+ &avd);
- rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata, 0);
+ rc2 = avc_audit(state, ssid, tsid, tclass, requested, &avd, rc,
+ auditdata, 0);
if (rc2)
return rc2;
return rc;
}
-int avc_has_perm_flags(u32 ssid, u32 tsid, u16 tclass,
- u32 requested, struct common_audit_data *auditdata,
+int avc_has_perm_flags(struct selinux_state *state,
+ u32 ssid, u32 tsid, u16 tclass, u32 requested,
+ struct common_audit_data *auditdata,
int flags)
{
struct av_decision avd;
int rc, rc2;
- rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd);
+ rc = avc_has_perm_noaudit(state, ssid, tsid, tclass, requested, 0,
+ &avd);
- rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc,
+ rc2 = avc_audit(state, ssid, tsid, tclass, requested, &avd, rc,
auditdata, flags);
if (rc2)
return rc2;
return rc;
}
-u32 avc_policy_seqno(void)
+u32 avc_policy_seqno(struct selinux_state *state)
{
- return avc_cache.latest_notif;
+ return state->avc->avc_cache.latest_notif;
}
void avc_disable(void)
@@ -1186,7 +1230,7 @@ void avc_disable(void)
* the cache and get that memory back.
*/
if (avc_node_cachep) {
- avc_flush();
+ avc_flush(selinux_state.avc);
/* kmem_cache_destroy(avc_node_cachep); */
}
}
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 2ad151e..2595465 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -98,20 +98,24 @@
#include "audit.h"
#include "avc_ss.h"
+struct selinux_state selinux_state;
+
/* SECMARK reference count */
static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
#ifdef CONFIG_SECURITY_SELINUX_DEVELOP
-int selinux_enforcing;
+static int selinux_enforcing_boot;
static int __init enforcing_setup(char *str)
{
unsigned long enforcing;
if (!kstrtoul(str, 0, &enforcing))
- selinux_enforcing = enforcing ? 1 : 0;
+ selinux_enforcing_boot = enforcing ? 1 : 0;
return 1;
}
__setup("enforcing=", enforcing_setup);
+#else
+#define selinux_enforcing_boot 1
#endif
#ifdef CONFIG_SECURITY_SELINUX_BOOTPARAM
@@ -129,6 +133,19 @@ __setup("selinux=", selinux_enabled_setup);
int selinux_enabled = 1;
#endif
+static unsigned int selinux_checkreqprot_boot =
+ CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE;
+
+static int __init checkreqprot_setup(char *str)
+{
+ unsigned long checkreqprot;
+
+ if (!kstrtoul(str, 0, &checkreqprot))
+ selinux_checkreqprot_boot = checkreqprot ? 1 : 0;
+ return 1;
+}
+__setup("checkreqprot=", checkreqprot_setup);
+
static struct kmem_cache *sel_inode_cache;
static struct kmem_cache *file_security_cache;
@@ -145,7 +162,8 @@ static struct kmem_cache *file_security_cache;
*/
static int selinux_secmark_enabled(void)
{
- return (selinux_policycap_alwaysnetwork || atomic_read(&selinux_secmark_refcount));
+ return (selinux_policycap_alwaysnetwork() ||
+ atomic_read(&selinux_secmark_refcount));
}
/**
@@ -160,7 +178,8 @@ static int selinux_secmark_enabled(void)
*/
static int selinux_peerlbl_enabled(void)
{
- return (selinux_policycap_alwaysnetwork || netlbl_enabled() || selinux_xfrm_enabled());
+ return (selinux_policycap_alwaysnetwork() ||
+ netlbl_enabled() || selinux_xfrm_enabled());
}
static int selinux_netcache_avc_callback(u32 event)
@@ -264,7 +283,8 @@ static int __inode_security_revalidate(struct inode *inode,
might_sleep_if(may_sleep);
- if (ss_initialized && isec->initialized != LABEL_INITIALIZED) {
+ if (selinux_state.initialized &&
+ isec->initialized != LABEL_INITIALIZED) {
if (!may_sleep)
return -ECHILD;
@@ -446,12 +466,14 @@ static int may_context_mount_sb_relabel(u32 sid,
const struct task_security_struct *tsec = cred->security;
int rc;
- rc = avc_has_perm(tsec->sid, sbsec->sid, SECCLASS_FILESYSTEM,
+ rc = avc_has_perm(&selinux_state,
+ tsec->sid, sbsec->sid, SECCLASS_FILESYSTEM,
FILESYSTEM__RELABELFROM, NULL);
if (rc)
return rc;
- rc = avc_has_perm(tsec->sid, sid, SECCLASS_FILESYSTEM,
+ rc = avc_has_perm(&selinux_state,
+ tsec->sid, sid, SECCLASS_FILESYSTEM,
FILESYSTEM__RELABELTO, NULL);
return rc;
}
@@ -462,12 +484,14 @@ static int may_context_mount_inode_relabel(u32 sid,
{
const struct task_security_struct *tsec = cred->security;
int rc;
- rc = avc_has_perm(tsec->sid, sbsec->sid, SECCLASS_FILESYSTEM,
+ rc = avc_has_perm(&selinux_state,
+ tsec->sid, sbsec->sid, SECCLASS_FILESYSTEM,
FILESYSTEM__RELABELFROM, NULL);
if (rc)
return rc;
- rc = avc_has_perm(sid, sbsec->sid, SECCLASS_FILESYSTEM,
+ rc = avc_has_perm(&selinux_state,
+ sid, sbsec->sid, SECCLASS_FILESYSTEM,
FILESYSTEM__ASSOCIATE, NULL);
return rc;
}
@@ -480,7 +504,7 @@ static int selinux_is_genfs_special_handling(struct super_block *sb)
!strcmp(sb->s_type->name, "debugfs") ||
!strcmp(sb->s_type->name, "tracefs") ||
!strcmp(sb->s_type->name, "rootfs") ||
- (selinux_policycap_cgroupseclabel &&
+ (selinux_policycap_cgroupseclabel() &&
(!strcmp(sb->s_type->name, "cgroup") ||
!strcmp(sb->s_type->name, "cgroup2")));
}
@@ -608,7 +632,7 @@ static int selinux_get_mnt_opts(const struct super_block *sb,
if (!(sbsec->flags & SE_SBINITIALIZED))
return -EINVAL;
- if (!ss_initialized)
+ if (!selinux_state.initialized)
return -EINVAL;
/* make sure we always check enough bits to cover the mask */
@@ -639,21 +663,25 @@ static int selinux_get_mnt_opts(const struct super_block *sb,
i = 0;
if (sbsec->flags & FSCONTEXT_MNT) {
- rc = security_sid_to_context(sbsec->sid, &context, &len);
+ rc = security_sid_to_context(&selinux_state, sbsec->sid,
+ &context, &len);
if (rc)
goto out_free;
opts->mnt_opts[i] = context;
opts->mnt_opts_flags[i++] = FSCONTEXT_MNT;
}
if (sbsec->flags & CONTEXT_MNT) {
- rc = security_sid_to_context(sbsec->mntpoint_sid, &context, &len);
+ rc = security_sid_to_context(&selinux_state,
+ sbsec->mntpoint_sid,
+ &context, &len);
if (rc)
goto out_free;
opts->mnt_opts[i] = context;
opts->mnt_opts_flags[i++] = CONTEXT_MNT;
}
if (sbsec->flags & DEFCONTEXT_MNT) {
- rc = security_sid_to_context(sbsec->def_sid, &context, &len);
+ rc = security_sid_to_context(&selinux_state, sbsec->def_sid,
+ &context, &len);
if (rc)
goto out_free;
opts->mnt_opts[i] = context;
@@ -663,7 +691,8 @@ static int selinux_get_mnt_opts(const struct super_block *sb,
struct dentry *root = sbsec->sb->s_root;
struct inode_security_struct *isec = backing_inode_security(root);
- rc = security_sid_to_context(isec->sid, &context, &len);
+ rc = security_sid_to_context(&selinux_state, isec->sid,
+ &context, &len);
if (rc)
goto out_free;
opts->mnt_opts[i] = context;
@@ -726,7 +755,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
mutex_lock(&sbsec->lock);
- if (!ss_initialized) {
+ if (!selinux_state.initialized) {
if (!num_opts) {
/* Defer initialization until selinux_complete_init,
after the initial policy is loaded and the security
@@ -772,7 +801,9 @@ static int selinux_set_mnt_opts(struct super_block *sb,
if (flags[i] == SBLABEL_MNT)
continue;
- rc = security_context_str_to_sid(mount_options[i], &sid, GFP_KERNEL);
+ rc = security_context_str_to_sid(&selinux_state,
+ mount_options[i], &sid,
+ GFP_KERNEL);
if (rc) {
printk(KERN_WARNING "SELinux: security_context_str_to_sid"
"(%s) failed for (dev %s, type %s) errno=%d\n",
@@ -848,7 +879,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
* Determine the labeling behavior to use for this
* filesystem type.
*/
- rc = security_fs_use(sb);
+ rc = security_fs_use(&selinux_state, sb);
if (rc) {
printk(KERN_WARNING
"%s: security_fs_use(%s) returned %d\n",
@@ -873,7 +904,9 @@ static int selinux_set_mnt_opts(struct super_block *sb,
}
if (sbsec->behavior == SECURITY_FS_USE_XATTR) {
sbsec->behavior = SECURITY_FS_USE_MNTPOINT;
- rc = security_transition_sid(current_sid(), current_sid(),
+ rc = security_transition_sid(&selinux_state,
+ current_sid(),
+ current_sid(),
SECCLASS_FILE, NULL,
&sbsec->mntpoint_sid);
if (rc)
@@ -1009,7 +1042,7 @@ static int selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
* if the parent was able to be mounted it clearly had no special lsm
* mount options. thus we can safely deal with this superblock later
*/
- if (!ss_initialized)
+ if (!selinux_state.initialized)
return 0;
/*
@@ -1039,7 +1072,7 @@ static int selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
if (newsbsec->behavior == SECURITY_FS_USE_NATIVE &&
!(kern_flags & SECURITY_LSM_NATIVE_LABELS) && !set_context) {
- rc = security_fs_use(newsb);
+ rc = security_fs_use(&selinux_state, newsb);
if (rc)
goto out;
}
@@ -1322,7 +1355,7 @@ static inline int default_protocol_dgram(int protocol)
static inline u16 socket_type_to_security_class(int family, int type, int protocol)
{
- int extsockclass = selinux_policycap_extsockclass;
+ int extsockclass = selinux_policycap_extsockclass();
switch (family) {
case PF_UNIX:
@@ -1496,7 +1529,8 @@ static int selinux_genfs_get_sid(struct dentry *dentry,
path++;
}
}
- rc = security_genfs_sid(sb->s_type->name, path, tclass, sid);
+ rc = security_genfs_sid(&selinux_state, sb->s_type->name,
+ path, tclass, sid);
}
free_page((unsigned long)buffer);
return rc;
@@ -1614,7 +1648,8 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
sid = sbsec->def_sid;
rc = 0;
} else {
- rc = security_context_to_sid_default(context, rc, &sid,
+ rc = security_context_to_sid_default(&selinux_state,
+ context, rc, &sid,
sbsec->def_sid,
GFP_NOFS);
if (rc) {
@@ -1647,7 +1682,8 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
sid = sbsec->sid;
/* Try to obtain a transition SID. */
- rc = security_transition_sid(task_sid, sid, sclass, NULL, &sid);
+ rc = security_transition_sid(&selinux_state, task_sid, sid,
+ sclass, NULL, &sid);
if (rc)
goto out;
break;
@@ -1765,9 +1801,11 @@ static int cred_has_capability(const struct cred *cred,
return -EINVAL;
}
- rc = avc_has_perm_noaudit(sid, sid, sclass, av, 0, &avd);
+ rc = avc_has_perm_noaudit(&selinux_state,
+ sid, sid, sclass, av, 0, &avd);
if (audit == SECURITY_CAP_AUDIT) {
- int rc2 = avc_audit(sid, sid, sclass, av, &avd, rc, &ad, 0);
+ int rc2 = avc_audit(&selinux_state,
+ sid, sid, sclass, av, &avd, rc, &ad, 0);
if (rc2)
return rc2;
}
@@ -1793,7 +1831,8 @@ static int inode_has_perm(const struct cred *cred,
sid = cred_sid(cred);
isec = inode->i_security;
- return avc_has_perm(sid, isec->sid, isec->sclass, perms, adp);
+ return avc_has_perm(&selinux_state,
+ sid, isec->sid, isec->sclass, perms, adp);
}
/* Same as inode_has_perm, but pass explicit audit data containing
@@ -1866,7 +1905,8 @@ static int file_has_perm(const struct cred *cred,
ad.u.file = file;
if (sid != fsec->sid) {
- rc = avc_has_perm(sid, fsec->sid,
+ rc = avc_has_perm(&selinux_state,
+ sid, fsec->sid,
SECCLASS_FD,
FD__USE,
&ad);
@@ -1908,7 +1948,8 @@ selinux_determine_inode_label(const struct task_security_struct *tsec,
*_new_isid = tsec->create_sid;
} else {
const struct inode_security_struct *dsec = inode_security(dir);
- return security_transition_sid(tsec->sid, dsec->sid, tclass,
+ return security_transition_sid(&selinux_state, tsec->sid,
+ dsec->sid, tclass,
name, _new_isid);
}
@@ -1935,7 +1976,8 @@ static int may_create(struct inode *dir,
ad.type = LSM_AUDIT_DATA_DENTRY;
ad.u.dentry = dentry;
- rc = avc_has_perm(sid, dsec->sid, SECCLASS_DIR,
+ rc = avc_has_perm(&selinux_state,
+ sid, dsec->sid, SECCLASS_DIR,
DIR__ADD_NAME | DIR__SEARCH,
&ad);
if (rc)
@@ -1946,11 +1988,13 @@ static int may_create(struct inode *dir,
if (rc)
return rc;
- rc = avc_has_perm(sid, newsid, tclass, FILE__CREATE, &ad);
+ rc = avc_has_perm(&selinux_state,
+ sid, newsid, tclass, FILE__CREATE, &ad);
if (rc)
return rc;
- return avc_has_perm(newsid, sbsec->sid,
+ return avc_has_perm(&selinux_state,
+ newsid, sbsec->sid,
SECCLASS_FILESYSTEM,
FILESYSTEM__ASSOCIATE, &ad);
}
@@ -1979,7 +2023,8 @@ static int may_link(struct inode *dir,
av = DIR__SEARCH;
av |= (kind ? DIR__REMOVE_NAME : DIR__ADD_NAME);
- rc = avc_has_perm(sid, dsec->sid, SECCLASS_DIR, av, &ad);
+ rc = avc_has_perm(&selinux_state,
+ sid, dsec->sid, SECCLASS_DIR, av, &ad);
if (rc)
return rc;
@@ -1999,7 +2044,8 @@ static int may_link(struct inode *dir,
return 0;
}
- rc = avc_has_perm(sid, isec->sid, isec->sclass, av, &ad);
+ rc = avc_has_perm(&selinux_state,
+ sid, isec->sid, isec->sclass, av, &ad);
return rc;
}
@@ -2023,16 +2069,19 @@ static inline int may_rename(struct inode *old_dir,
ad.type = LSM_AUDIT_DATA_DENTRY;
ad.u.dentry = old_dentry;
- rc = avc_has_perm(sid, old_dsec->sid, SECCLASS_DIR,
+ rc = avc_has_perm(&selinux_state,
+ sid, old_dsec->sid, SECCLASS_DIR,
DIR__REMOVE_NAME | DIR__SEARCH, &ad);
if (rc)
return rc;
- rc = avc_has_perm(sid, old_isec->sid,
+ rc = avc_has_perm(&selinux_state,
+ sid, old_isec->sid,
old_isec->sclass, FILE__RENAME, &ad);
if (rc)
return rc;
if (old_is_dir && new_dir != old_dir) {
- rc = avc_has_perm(sid, old_isec->sid,
+ rc = avc_has_perm(&selinux_state,
+ sid, old_isec->sid,
old_isec->sclass, DIR__REPARENT, &ad);
if (rc)
return rc;
@@ -2042,13 +2091,15 @@ static inline int may_rename(struct inode *old_dir,
av = DIR__ADD_NAME | DIR__SEARCH;
if (d_is_positive(new_dentry))
av |= DIR__REMOVE_NAME;
- rc = avc_has_perm(sid, new_dsec->sid, SECCLASS_DIR, av, &ad);
+ rc = avc_has_perm(&selinux_state,
+ sid, new_dsec->sid, SECCLASS_DIR, av, &ad);
if (rc)
return rc;
if (d_is_positive(new_dentry)) {
new_isec = backing_inode_security(new_dentry);
new_is_dir = d_is_dir(new_dentry);
- rc = avc_has_perm(sid, new_isec->sid,
+ rc = avc_has_perm(&selinux_state,
+ sid, new_isec->sid,
new_isec->sclass,
(new_is_dir ? DIR__RMDIR : FILE__UNLINK), &ad);
if (rc)
@@ -2068,7 +2119,8 @@ static int superblock_has_perm(const struct cred *cred,
u32 sid = cred_sid(cred);
sbsec = sb->s_security;
- return avc_has_perm(sid, sbsec->sid, SECCLASS_FILESYSTEM, perms, ad);
+ return avc_has_perm(&selinux_state,
+ sid, sbsec->sid, SECCLASS_FILESYSTEM, perms, ad);
}
/* Convert a Linux mode and permission mask to an access vector. */
@@ -2131,7 +2183,8 @@ static inline u32 open_file_to_av(struct file *file)
u32 av = file_to_av(file);
struct inode *inode = file_inode(file);
- if (selinux_policycap_openperm && inode->i_sb->s_magic != SOCKFS_MAGIC)
+ if (selinux_policycap_openperm() &&
+ inode->i_sb->s_magic != SOCKFS_MAGIC)
av |= FILE__OPEN;
return av;
@@ -2144,7 +2197,8 @@ static int selinux_binder_set_context_mgr(struct task_struct *mgr)
u32 mysid = current_sid();
u32 mgrsid = task_sid(mgr);
- return avc_has_perm(mysid, mgrsid, SECCLASS_BINDER,
+ return avc_has_perm(&selinux_state,
+ mysid, mgrsid, SECCLASS_BINDER,
BINDER__SET_CONTEXT_MGR, NULL);
}
@@ -2157,13 +2211,15 @@ static int selinux_binder_transaction(struct task_struct *from,
int rc;
if (mysid != fromsid) {
- rc = avc_has_perm(mysid, fromsid, SECCLASS_BINDER,
+ rc = avc_has_perm(&selinux_state,
+ mysid, fromsid, SECCLASS_BINDER,
BINDER__IMPERSONATE, NULL);
if (rc)
return rc;
}
- return avc_has_perm(fromsid, tosid, SECCLASS_BINDER, BINDER__CALL,
+ return avc_has_perm(&selinux_state,
+ fromsid, tosid, SECCLASS_BINDER, BINDER__CALL,
NULL);
}
@@ -2173,7 +2229,8 @@ static int selinux_binder_transfer_binder(struct task_struct *from,
u32 fromsid = task_sid(from);
u32 tosid = task_sid(to);
- return avc_has_perm(fromsid, tosid, SECCLASS_BINDER, BINDER__TRANSFER,
+ return avc_has_perm(&selinux_state,
+ fromsid, tosid, SECCLASS_BINDER, BINDER__TRANSFER,
NULL);
}
@@ -2192,7 +2249,8 @@ static int selinux_binder_transfer_file(struct task_struct *from,
ad.u.path = file->f_path;
if (sid != fsec->sid) {
- rc = avc_has_perm(sid, fsec->sid,
+ rc = avc_has_perm(&selinux_state,
+ sid, fsec->sid,
SECCLASS_FD,
FD__USE,
&ad);
@@ -2210,7 +2268,8 @@ static int selinux_binder_transfer_file(struct task_struct *from,
return 0;
isec = backing_inode_security(dentry);
- return avc_has_perm(sid, isec->sid, isec->sclass, file_to_av(file),
+ return avc_has_perm(&selinux_state,
+ sid, isec->sid, isec->sclass, file_to_av(file),
&ad);
}
@@ -2221,21 +2280,25 @@ static int selinux_ptrace_access_check(struct task_struct *child,
u32 csid = task_sid(child);
if (mode & PTRACE_MODE_READ)
- return avc_has_perm(sid, csid, SECCLASS_FILE, FILE__READ, NULL);
+ return avc_has_perm(&selinux_state,
+ sid, csid, SECCLASS_FILE, FILE__READ, NULL);
- return avc_has_perm(sid, csid, SECCLASS_PROCESS, PROCESS__PTRACE, NULL);
+ return avc_has_perm(&selinux_state,
+ sid, csid, SECCLASS_PROCESS, PROCESS__PTRACE, NULL);
}
static int selinux_ptrace_traceme(struct task_struct *parent)
{
- return avc_has_perm(task_sid(parent), current_sid(), SECCLASS_PROCESS,
+ return avc_has_perm(&selinux_state,
+ task_sid(parent), current_sid(), SECCLASS_PROCESS,
PROCESS__PTRACE, NULL);
}
static int selinux_capget(struct task_struct *target, kernel_cap_t *effective,
kernel_cap_t *inheritable, kernel_cap_t *permitted)
{
- return avc_has_perm(current_sid(), task_sid(target), SECCLASS_PROCESS,
+ return avc_has_perm(&selinux_state,
+ current_sid(), task_sid(target), SECCLASS_PROCESS,
PROCESS__GETCAP, NULL);
}
@@ -2244,7 +2307,8 @@ static int selinux_capset(struct cred *new, const struct cred *old,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted)
{
- return avc_has_perm(cred_sid(old), cred_sid(new), SECCLASS_PROCESS,
+ return avc_has_perm(&selinux_state,
+ cred_sid(old), cred_sid(new), SECCLASS_PROCESS,
PROCESS__SETCAP, NULL);
}
@@ -2304,18 +2368,21 @@ static int selinux_syslog(int type)
switch (type) {
case SYSLOG_ACTION_READ_ALL: /* Read last kernel messages */
case SYSLOG_ACTION_SIZE_BUFFER: /* Return size of the log buffer */
- return avc_has_perm(current_sid(), SECINITSID_KERNEL,
+ return avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_KERNEL,
SECCLASS_SYSTEM, SYSTEM__SYSLOG_READ, NULL);
case SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging to console */
case SYSLOG_ACTION_CONSOLE_ON: /* Enable logging to console */
/* Set level of messages printed to console */
case SYSLOG_ACTION_CONSOLE_LEVEL:
- return avc_has_perm(current_sid(), SECINITSID_KERNEL,
+ return avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_KERNEL,
SECCLASS_SYSTEM, SYSTEM__SYSLOG_CONSOLE,
NULL);
}
/* All other syslog types */
- return avc_has_perm(current_sid(), SECINITSID_KERNEL,
+ return avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_KERNEL,
SECCLASS_SYSTEM, SYSTEM__SYSLOG_MOD, NULL);
}
@@ -2376,13 +2443,14 @@ static int check_nnp_nosuid(const struct linux_binprm *bprm,
* policy allows the corresponding permission between
* the old and new contexts.
*/
- if (selinux_policycap_nnp_nosuid_transition) {
+ if (selinux_policycap_nnp_nosuid_transition()) {
av = 0;
if (nnp)
av |= PROCESS2__NNP_TRANSITION;
if (nosuid)
av |= PROCESS2__NOSUID_TRANSITION;
- rc = avc_has_perm(old_tsec->sid, new_tsec->sid,
+ rc = avc_has_perm(&selinux_state,
+ old_tsec->sid, new_tsec->sid,
SECCLASS_PROCESS2, av, NULL);
if (!rc)
return 0;
@@ -2393,7 +2461,8 @@ static int check_nnp_nosuid(const struct linux_binprm *bprm,
* i.e. SIDs that are guaranteed to only be allowed a subset
* of the permissions of the current SID.
*/
- rc = security_bounded_transition(old_tsec->sid, new_tsec->sid);
+ rc = security_bounded_transition(&selinux_state, old_tsec->sid,
+ new_tsec->sid);
if (!rc)
return 0;
@@ -2445,8 +2514,8 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
return rc;
} else {
/* Check for a default transition on this program. */
- rc = security_transition_sid(old_tsec->sid, isec->sid,
- SECCLASS_PROCESS, NULL,
+ rc = security_transition_sid(&selinux_state, old_tsec->sid,
+ isec->sid, SECCLASS_PROCESS, NULL,
&new_tsec->sid);
if (rc)
return rc;
@@ -2464,25 +2533,29 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
ad.u.file = bprm->file;
if (new_tsec->sid == old_tsec->sid) {
- rc = avc_has_perm(old_tsec->sid, isec->sid,
+ rc = avc_has_perm(&selinux_state,
+ old_tsec->sid, isec->sid,
SECCLASS_FILE, FILE__EXECUTE_NO_TRANS, &ad);
if (rc)
return rc;
} else {
/* Check permissions for the transition. */
- rc = avc_has_perm(old_tsec->sid, new_tsec->sid,
+ rc = avc_has_perm(&selinux_state,
+ old_tsec->sid, new_tsec->sid,
SECCLASS_PROCESS, PROCESS__TRANSITION, &ad);
if (rc)
return rc;
- rc = avc_has_perm(new_tsec->sid, isec->sid,
+ rc = avc_has_perm(&selinux_state,
+ new_tsec->sid, isec->sid,
SECCLASS_FILE, FILE__ENTRYPOINT, &ad);
if (rc)
return rc;
/* Check for shared state */
if (bprm->unsafe & LSM_UNSAFE_SHARE) {
- rc = avc_has_perm(old_tsec->sid, new_tsec->sid,
+ rc = avc_has_perm(&selinux_state,
+ old_tsec->sid, new_tsec->sid,
SECCLASS_PROCESS, PROCESS__SHARE,
NULL);
if (rc)
@@ -2494,7 +2567,8 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
if (bprm->unsafe & LSM_UNSAFE_PTRACE) {
u32 ptsid = ptrace_parent_sid();
if (ptsid != 0) {
- rc = avc_has_perm(ptsid, new_tsec->sid,
+ rc = avc_has_perm(&selinux_state,
+ ptsid, new_tsec->sid,
SECCLASS_PROCESS,
PROCESS__PTRACE, NULL);
if (rc)
@@ -2508,7 +2582,8 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
/* Enable secure mode for SIDs transitions unless
the noatsecure permission is granted between
the two SIDs, i.e. ahp returns 0. */
- rc = avc_has_perm(old_tsec->sid, new_tsec->sid,
+ rc = avc_has_perm(&selinux_state,
+ old_tsec->sid, new_tsec->sid,
SECCLASS_PROCESS, PROCESS__NOATSECURE,
NULL);
bprm->secureexec |= !!rc;
@@ -2600,7 +2675,8 @@ static void selinux_bprm_committing_creds(struct linux_binprm *bprm)
* higher than the default soft limit for cases where the default is
* lower than the hard limit, e.g. RLIMIT_CORE or RLIMIT_STACK.
*/
- rc = avc_has_perm(new_tsec->osid, new_tsec->sid, SECCLASS_PROCESS,
+ rc = avc_has_perm(&selinux_state,
+ new_tsec->osid, new_tsec->sid, SECCLASS_PROCESS,
PROCESS__RLIMITINH, NULL);
if (rc) {
/* protect against do_prlimit() */
@@ -2640,7 +2716,8 @@ static void selinux_bprm_committed_creds(struct linux_binprm *bprm)
* This must occur _after_ the task SID has been updated so that any
* kill done after the flush will be checked against the new SID.
*/
- rc = avc_has_perm(osid, sid, SECCLASS_PROCESS, PROCESS__SIGINH, NULL);
+ rc = avc_has_perm(&selinux_state,
+ osid, sid, SECCLASS_PROCESS, PROCESS__SIGINH, NULL);
if (rc) {
if (IS_ENABLED(CONFIG_POSIX_TIMERS)) {
memset(&itimer, 0, sizeof itimer);
@@ -2804,7 +2881,9 @@ static int selinux_sb_remount(struct super_block *sb, void *data)
if (flags[i] == SBLABEL_MNT)
continue;
- rc = security_context_str_to_sid(mount_options[i], &sid, GFP_KERNEL);
+ rc = security_context_str_to_sid(&selinux_state,
+ mount_options[i], &sid,
+ GFP_KERNEL);
if (rc) {
printk(KERN_WARNING "SELinux: security_context_str_to_sid"
"(%s) failed for (dev %s, type %s) errno=%d\n",
@@ -2929,7 +3008,8 @@ static int selinux_dentry_init_security(struct dentry *dentry, int mode,
if (rc)
return rc;
- return security_sid_to_context(newsid, (char **)ctx, ctxlen);
+ return security_sid_to_context(&selinux_state, newsid, (char **)ctx,
+ ctxlen);
}
static int selinux_dentry_create_files_as(struct dentry *dentry, int mode,
@@ -2984,14 +3064,15 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
isec->initialized = LABEL_INITIALIZED;
}
- if (!ss_initialized || !(sbsec->flags & SBLABEL_MNT))
+ if (!selinux_state.initialized || !(sbsec->flags & SBLABEL_MNT))
return -EOPNOTSUPP;
if (name)
*name = XATTR_SELINUX_SUFFIX;
if (value && len) {
- rc = security_sid_to_context_force(newsid, &context, &clen);
+ rc = security_sid_to_context_force(&selinux_state, newsid,
+ &context, &clen);
if (rc)
return rc;
*value = context;
@@ -3066,7 +3147,8 @@ static int selinux_inode_follow_link(struct dentry *dentry, struct inode *inode,
if (IS_ERR(isec))
return PTR_ERR(isec);
- return avc_has_perm_flags(sid, isec->sid, isec->sclass, FILE__READ, &ad,
+ return avc_has_perm_flags(&selinux_state,
+ sid, isec->sid, isec->sclass, FILE__READ, &ad,
rcu ? MAY_NOT_BLOCK : 0);
}
@@ -3082,7 +3164,8 @@ static noinline int audit_inode_permission(struct inode *inode,
ad.type = LSM_AUDIT_DATA_INODE;
ad.u.inode = inode;
- rc = slow_avc_audit(current_sid(), isec->sid, isec->sclass, perms,
+ rc = slow_avc_audit(&selinux_state,
+ current_sid(), isec->sid, isec->sclass, perms,
audited, denied, result, &ad, flags);
if (rc)
return rc;
@@ -3120,7 +3203,8 @@ static int selinux_inode_permission(struct inode *inode, int mask)
if (IS_ERR(isec))
return PTR_ERR(isec);
- rc = avc_has_perm_noaudit(sid, isec->sid, isec->sclass, perms, 0, &avd);
+ rc = avc_has_perm_noaudit(&selinux_state,
+ sid, isec->sid, isec->sclass, perms, 0, &avd);
audited = avc_audit_required(perms, &avd, rc,
from_access ? FILE__AUDIT_ACCESS : 0,
&denied);
@@ -3152,7 +3236,7 @@ static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_TIMES_SET))
return dentry_has_perm(cred, dentry, FILE__SETATTR);
- if (selinux_policycap_openperm &&
+ if (selinux_policycap_openperm() &&
inode->i_sb->s_magic != SOCKFS_MAGIC &&
(ia_valid & ATTR_SIZE) &&
!(ia_valid & ATTR_FILE))
@@ -3223,12 +3307,14 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
ad.u.dentry = dentry;
isec = backing_inode_security(dentry);
- rc = avc_has_perm(sid, isec->sid, isec->sclass,
+ rc = avc_has_perm(&selinux_state,
+ sid, isec->sid, isec->sclass,
FILE__RELABELFROM, &ad);
if (rc)
return rc;
- rc = security_context_to_sid(value, size, &newsid, GFP_KERNEL);
+ rc = security_context_to_sid(&selinux_state, value, size, &newsid,
+ GFP_KERNEL);
if (rc == -EINVAL) {
if (!has_cap_mac_admin(true)) {
struct audit_buffer *ab;
@@ -3254,22 +3340,25 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
return rc;
}
- rc = security_context_to_sid_force(value, size, &newsid);
+ rc = security_context_to_sid_force(&selinux_state, value,
+ size, &newsid);
}
if (rc)
return rc;
- rc = avc_has_perm(sid, newsid, isec->sclass,
+ rc = avc_has_perm(&selinux_state,
+ sid, newsid, isec->sclass,
FILE__RELABELTO, &ad);
if (rc)
return rc;
- rc = security_validate_transition(isec->sid, newsid, sid,
- isec->sclass);
+ rc = security_validate_transition(&selinux_state, isec->sid, newsid,
+ sid, isec->sclass);
if (rc)
return rc;
- return avc_has_perm(newsid,
+ return avc_has_perm(&selinux_state,
+ newsid,
sbsec->sid,
SECCLASS_FILESYSTEM,
FILESYSTEM__ASSOCIATE,
@@ -3290,7 +3379,8 @@ static void selinux_inode_post_setxattr(struct dentry *dentry, const char *name,
return;
}
- rc = security_context_to_sid_force(value, size, &newsid);
+ rc = security_context_to_sid_force(&selinux_state, value, size,
+ &newsid);
if (rc) {
printk(KERN_ERR "SELinux: unable to map context to SID"
"for (%s, %lu), rc=%d\n",
@@ -3358,10 +3448,12 @@ static int selinux_inode_getsecurity(struct inode *inode, const char *name, void
*/
isec = inode_security(inode);
if (has_cap_mac_admin(false))
- error = security_sid_to_context_force(isec->sid, &context,
+ error = security_sid_to_context_force(&selinux_state,
+ isec->sid, &context,
&size);
else
- error = security_sid_to_context(isec->sid, &context, &size);
+ error = security_sid_to_context(&selinux_state, isec->sid,
+ &context, &size);
if (error)
return error;
error = size;
@@ -3391,7 +3483,8 @@ static int selinux_inode_setsecurity(struct inode *inode, const char *name,
if (!value || !size)
return -EACCES;
- rc = security_context_to_sid(value, size, &newsid, GFP_KERNEL);
+ rc = security_context_to_sid(&selinux_state, value, size, &newsid,
+ GFP_KERNEL);
if (rc)
return rc;
@@ -3480,7 +3573,7 @@ static int selinux_file_permission(struct file *file, int mask)
isec = inode_security(inode);
if (sid == fsec->sid && fsec->isid == isec->sid &&
- fsec->pseqno == avc_policy_seqno())
+ fsec->pseqno == avc_policy_seqno(&selinux_state))
/* No change since file_open check. */
return 0;
@@ -3520,7 +3613,8 @@ static int ioctl_has_perm(const struct cred *cred, struct file *file,
ad.u.op->path = file->f_path;
if (ssid != fsec->sid) {
- rc = avc_has_perm(ssid, fsec->sid,
+ rc = avc_has_perm(&selinux_state,
+ ssid, fsec->sid,
SECCLASS_FD,
FD__USE,
&ad);
@@ -3532,8 +3626,9 @@ static int ioctl_has_perm(const struct cred *cred, struct file *file,
return 0;
isec = inode_security(inode);
- rc = avc_has_extended_perms(ssid, isec->sid, isec->sclass,
- requested, driver, xperm, &ad);
+ rc = avc_has_extended_perms(&selinux_state,
+ ssid, isec->sid, isec->sclass,
+ requested, driver, xperm, &ad);
out:
return rc;
}
@@ -3601,7 +3696,8 @@ static int file_map_prot_check(struct file *file, unsigned long prot, int shared
* private file mapping that will also be writable.
* This has an additional check.
*/
- rc = avc_has_perm(sid, sid, SECCLASS_PROCESS,
+ rc = avc_has_perm(&selinux_state,
+ sid, sid, SECCLASS_PROCESS,
PROCESS__EXECMEM, NULL);
if (rc)
goto error;
@@ -3631,7 +3727,8 @@ static int selinux_mmap_addr(unsigned long addr)
if (addr < CONFIG_LSM_MMAP_MIN_ADDR) {
u32 sid = current_sid();
- rc = avc_has_perm(sid, sid, SECCLASS_MEMPROTECT,
+ rc = avc_has_perm(&selinux_state,
+ sid, sid, SECCLASS_MEMPROTECT,
MEMPROTECT__MMAP_ZERO, NULL);
}
@@ -3653,7 +3750,7 @@ static int selinux_mmap_file(struct file *file, unsigned long reqprot,
return rc;
}
- if (selinux_checkreqprot)
+ if (selinux_state.checkreqprot)
prot = reqprot;
return file_map_prot_check(file, prot,
@@ -3667,7 +3764,7 @@ static int selinux_file_mprotect(struct vm_area_struct *vma,
const struct cred *cred = current_cred();
u32 sid = cred_sid(cred);
- if (selinux_checkreqprot)
+ if (selinux_state.checkreqprot)
prot = reqprot;
if (default_noexec &&
@@ -3675,13 +3772,15 @@ static int selinux_file_mprotect(struct vm_area_struct *vma,
int rc = 0;
if (vma->vm_start >= vma->vm_mm->start_brk &&
vma->vm_end <= vma->vm_mm->brk) {
- rc = avc_has_perm(sid, sid, SECCLASS_PROCESS,
+ rc = avc_has_perm(&selinux_state,
+ sid, sid, SECCLASS_PROCESS,
PROCESS__EXECHEAP, NULL);
} else if (!vma->vm_file &&
((vma->vm_start <= vma->vm_mm->start_stack &&
vma->vm_end >= vma->vm_mm->start_stack) ||
vma_is_stack_for_current(vma))) {
- rc = avc_has_perm(sid, sid, SECCLASS_PROCESS,
+ rc = avc_has_perm(&selinux_state,
+ sid, sid, SECCLASS_PROCESS,
PROCESS__EXECSTACK, NULL);
} else if (vma->vm_file && vma->anon_vma) {
/*
@@ -3773,7 +3872,8 @@ static int selinux_file_send_sigiotask(struct task_struct *tsk,
else
perm = signal_to_av(signum);
- return avc_has_perm(fsec->fown_sid, sid,
+ return avc_has_perm(&selinux_state,
+ fsec->fown_sid, sid,
SECCLASS_PROCESS, perm, NULL);
}
@@ -3799,7 +3899,7 @@ static int selinux_file_open(struct file *file, const struct cred *cred)
* struct as its SID.
*/
fsec->isid = isec->sid;
- fsec->pseqno = avc_policy_seqno();
+ fsec->pseqno = avc_policy_seqno(&selinux_state);
/*
* Since the inode label or policy seqno may have changed
* between the selinux_inode_permission check and the saving
@@ -3818,7 +3918,8 @@ static int selinux_task_alloc(struct task_struct *task,
{
u32 sid = current_sid();
- return avc_has_perm(sid, sid, SECCLASS_PROCESS, PROCESS__FORK, NULL);
+ return avc_has_perm(&selinux_state,
+ sid, sid, SECCLASS_PROCESS, PROCESS__FORK, NULL);
}
/*
@@ -3892,7 +3993,8 @@ static int selinux_kernel_act_as(struct cred *new, u32 secid)
u32 sid = current_sid();
int ret;
- ret = avc_has_perm(sid, secid,
+ ret = avc_has_perm(&selinux_state,
+ sid, secid,
SECCLASS_KERNEL_SERVICE,
KERNEL_SERVICE__USE_AS_OVERRIDE,
NULL);
@@ -3916,7 +4018,8 @@ static int selinux_kernel_create_files_as(struct cred *new, struct inode *inode)
u32 sid = current_sid();
int ret;
- ret = avc_has_perm(sid, isec->sid,
+ ret = avc_has_perm(&selinux_state,
+ sid, isec->sid,
SECCLASS_KERNEL_SERVICE,
KERNEL_SERVICE__CREATE_FILES_AS,
NULL);
@@ -3933,7 +4036,8 @@ static int selinux_kernel_module_request(char *kmod_name)
ad.type = LSM_AUDIT_DATA_KMOD;
ad.u.kmod_name = kmod_name;
- return avc_has_perm(current_sid(), SECINITSID_KERNEL, SECCLASS_SYSTEM,
+ return avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_KERNEL, SECCLASS_SYSTEM,
SYSTEM__MODULE_REQUEST, &ad);
}
@@ -3947,7 +4051,8 @@ static int selinux_kernel_module_from_file(struct file *file)
/* init_module */
if (file == NULL)
- return avc_has_perm(sid, sid, SECCLASS_SYSTEM,
+ return avc_has_perm(&selinux_state,
+ sid, sid, SECCLASS_SYSTEM,
SYSTEM__MODULE_LOAD, NULL);
/* finit_module */
@@ -3957,13 +4062,15 @@ static int selinux_kernel_module_from_file(struct file *file)
fsec = file->f_security;
if (sid != fsec->sid) {
- rc = avc_has_perm(sid, fsec->sid, SECCLASS_FD, FD__USE, &ad);
+ rc = avc_has_perm(&selinux_state,
+ sid, fsec->sid, SECCLASS_FD, FD__USE, &ad);
if (rc)
return rc;
}
isec = inode_security(file_inode(file));
- return avc_has_perm(sid, isec->sid, SECCLASS_SYSTEM,
+ return avc_has_perm(&selinux_state,
+ sid, isec->sid, SECCLASS_SYSTEM,
SYSTEM__MODULE_LOAD, &ad);
}
@@ -3985,19 +4092,22 @@ static int selinux_kernel_read_file(struct file *file,
static int selinux_task_setpgid(struct task_struct *p, pid_t pgid)
{
- return avc_has_perm(current_sid(), task_sid(p), SECCLASS_PROCESS,
+ return avc_has_perm(&selinux_state,
+ current_sid(), task_sid(p), SECCLASS_PROCESS,
PROCESS__SETPGID, NULL);
}
static int selinux_task_getpgid(struct task_struct *p)
{
- return avc_has_perm(current_sid(), task_sid(p), SECCLASS_PROCESS,
+ return avc_has_perm(&selinux_state,
+ current_sid(), task_sid(p), SECCLASS_PROCESS,
PROCESS__GETPGID, NULL);
}
static int selinux_task_getsid(struct task_struct *p)
{
- return avc_has_perm(current_sid(), task_sid(p), SECCLASS_PROCESS,
+ return avc_has_perm(&selinux_state,
+ current_sid(), task_sid(p), SECCLASS_PROCESS,
PROCESS__GETSESSION, NULL);
}
@@ -4008,19 +4118,22 @@ static void selinux_task_getsecid(struct task_struct *p, u32 *secid)
static int selinux_task_setnice(struct task_struct *p, int nice)
{
- return avc_has_perm(current_sid(), task_sid(p), SECCLASS_PROCESS,
+ return avc_has_perm(&selinux_state,
+ current_sid(), task_sid(p), SECCLASS_PROCESS,
PROCESS__SETSCHED, NULL);
}
static int selinux_task_setioprio(struct task_struct *p, int ioprio)
{
- return avc_has_perm(current_sid(), task_sid(p), SECCLASS_PROCESS,
+ return avc_has_perm(&selinux_state,
+ current_sid(), task_sid(p), SECCLASS_PROCESS,
PROCESS__SETSCHED, NULL);
}
static int selinux_task_getioprio(struct task_struct *p)
{
- return avc_has_perm(current_sid(), task_sid(p), SECCLASS_PROCESS,
+ return avc_has_perm(&selinux_state,
+ current_sid(), task_sid(p), SECCLASS_PROCESS,
PROCESS__GETSCHED, NULL);
}
@@ -4035,7 +4148,8 @@ int selinux_task_prlimit(const struct cred *cred, const struct cred *tcred,
av |= PROCESS__SETRLIMIT;
if (flags & LSM_PRLIMIT_READ)
av |= PROCESS__GETRLIMIT;
- return avc_has_perm(cred_sid(cred), cred_sid(tcred),
+ return avc_has_perm(&selinux_state,
+ cred_sid(cred), cred_sid(tcred),
SECCLASS_PROCESS, av, NULL);
}
@@ -4049,7 +4163,8 @@ static int selinux_task_setrlimit(struct task_struct *p, unsigned int resource,
later be used as a safe reset point for the soft limit
upon context transitions. See selinux_bprm_committing_creds. */
if (old_rlim->rlim_max != new_rlim->rlim_max)
- return avc_has_perm(current_sid(), task_sid(p),
+ return avc_has_perm(&selinux_state,
+ current_sid(), task_sid(p),
SECCLASS_PROCESS, PROCESS__SETRLIMIT, NULL);
return 0;
@@ -4057,19 +4172,22 @@ static int selinux_task_setrlimit(struct task_struct *p, unsigned int resource,
static int selinux_task_setscheduler(struct task_struct *p)
{
- return avc_has_perm(current_sid(), task_sid(p), SECCLASS_PROCESS,
+ return avc_has_perm(&selinux_state,
+ current_sid(), task_sid(p), SECCLASS_PROCESS,
PROCESS__SETSCHED, NULL);
}
static int selinux_task_getscheduler(struct task_struct *p)
{
- return avc_has_perm(current_sid(), task_sid(p), SECCLASS_PROCESS,
+ return avc_has_perm(&selinux_state,
+ current_sid(), task_sid(p), SECCLASS_PROCESS,
PROCESS__GETSCHED, NULL);
}
static int selinux_task_movememory(struct task_struct *p)
{
- return avc_has_perm(current_sid(), task_sid(p), SECCLASS_PROCESS,
+ return avc_has_perm(&selinux_state,
+ current_sid(), task_sid(p), SECCLASS_PROCESS,
PROCESS__SETSCHED, NULL);
}
@@ -4084,7 +4202,8 @@ static int selinux_task_kill(struct task_struct *p, struct siginfo *info,
perm = signal_to_av(sig);
if (!secid)
secid = current_sid();
- return avc_has_perm(secid, task_sid(p), SECCLASS_PROCESS, perm, NULL);
+ return avc_has_perm(&selinux_state,
+ secid, task_sid(p), SECCLASS_PROCESS, perm, NULL);
}
static void selinux_task_to_inode(struct task_struct *p,
@@ -4325,7 +4444,8 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid)
if (unlikely(err))
return -EACCES;
- err = security_net_peersid_resolve(nlbl_sid, nlbl_type, xfrm_sid, sid);
+ err = security_net_peersid_resolve(&selinux_state, nlbl_sid,
+ nlbl_type, xfrm_sid, sid);
if (unlikely(err)) {
printk(KERN_WARNING
"SELinux: failure in selinux_skb_peerlbl_sid(),"
@@ -4353,7 +4473,8 @@ static int selinux_conn_sid(u32 sk_sid, u32 skb_sid, u32 *conn_sid)
int err = 0;
if (skb_sid != SECSID_NULL)
- err = security_sid_mls_copy(sk_sid, skb_sid, conn_sid);
+ err = security_sid_mls_copy(&selinux_state, sk_sid, skb_sid,
+ conn_sid);
else
*conn_sid = sk_sid;
@@ -4370,8 +4491,8 @@ static int socket_sockcreate_sid(const struct task_security_struct *tsec,
return 0;
}
- return security_transition_sid(tsec->sid, tsec->sid, secclass, NULL,
- socksid);
+ return security_transition_sid(&selinux_state, tsec->sid, tsec->sid,
+ secclass, NULL, socksid);
}
static int sock_has_perm(struct sock *sk, u32 perms)
@@ -4387,7 +4508,8 @@ static int sock_has_perm(struct sock *sk, u32 perms)
ad.u.net = &net;
ad.u.net->sk = sk;
- return avc_has_perm(current_sid(), sksec->sid, sksec->sclass, perms,
+ return avc_has_perm(&selinux_state,
+ current_sid(), sksec->sid, sksec->sclass, perms,
&ad);
}
@@ -4407,7 +4529,8 @@ static int selinux_socket_create(int family, int type,
if (rc)
return rc;
- return avc_has_perm(tsec->sid, newsid, secclass, SOCKET__CREATE, NULL);
+ return avc_has_perm(&selinux_state,
+ tsec->sid, newsid, secclass, SOCKET__CREATE, NULL);
}
static int selinux_socket_post_create(struct socket *sock, int family,
@@ -4503,7 +4626,8 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
ad.u.net = &net;
ad.u.net->sport = htons(snum);
ad.u.net->family = family;
- err = avc_has_perm(sksec->sid, sid,
+ err = avc_has_perm(&selinux_state,
+ sksec->sid, sid,
sksec->sclass,
SOCKET__NAME_BIND, &ad);
if (err)
@@ -4543,7 +4667,8 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
else
ad.u.net->v6info.saddr = addr6->sin6_addr;
- err = avc_has_perm(sksec->sid, sid,
+ err = avc_has_perm(&selinux_state,
+ sksec->sid, sid,
sksec->sclass, node_perm, &ad);
if (err)
goto out;
@@ -4597,7 +4722,8 @@ static int selinux_socket_connect(struct socket *sock, struct sockaddr *address,
ad.u.net = &net;
ad.u.net->dport = htons(snum);
ad.u.net->family = sk->sk_family;
- err = avc_has_perm(sksec->sid, sid, sksec->sclass, perm, &ad);
+ err = avc_has_perm(&selinux_state,
+ sksec->sid, sid, sksec->sclass, perm, &ad);
if (err)
goto out;
}
@@ -4698,7 +4824,8 @@ static int selinux_socket_unix_stream_connect(struct sock *sock,
ad.u.net = &net;
ad.u.net->sk = other;
- err = avc_has_perm(sksec_sock->sid, sksec_other->sid,
+ err = avc_has_perm(&selinux_state,
+ sksec_sock->sid, sksec_other->sid,
sksec_other->sclass,
UNIX_STREAM_SOCKET__CONNECTTO, &ad);
if (err)
@@ -4706,8 +4833,8 @@ static int selinux_socket_unix_stream_connect(struct sock *sock,
/* server child socket */
sksec_new->peer_sid = sksec_sock->sid;
- err = security_sid_mls_copy(sksec_other->sid, sksec_sock->sid,
- &sksec_new->sid);
+ err = security_sid_mls_copy(&selinux_state, sksec_other->sid,
+ sksec_sock->sid, &sksec_new->sid);
if (err)
return err;
@@ -4729,7 +4856,8 @@ static int selinux_socket_unix_may_send(struct socket *sock,
ad.u.net = &net;
ad.u.net->sk = other->sk;
- return avc_has_perm(ssec->sid, osec->sid, osec->sclass, SOCKET__SENDTO,
+ return avc_has_perm(&selinux_state,
+ ssec->sid, osec->sid, osec->sclass, SOCKET__SENDTO,
&ad);
}
@@ -4744,7 +4872,8 @@ static int selinux_inet_sys_rcv_skb(struct net *ns, int ifindex,
err = sel_netif_sid(ns, ifindex, &if_sid);
if (err)
return err;
- err = avc_has_perm(peer_sid, if_sid,
+ err = avc_has_perm(&selinux_state,
+ peer_sid, if_sid,
SECCLASS_NETIF, NETIF__INGRESS, ad);
if (err)
return err;
@@ -4752,7 +4881,8 @@ static int selinux_inet_sys_rcv_skb(struct net *ns, int ifindex,
err = sel_netnode_sid(addrp, family, &node_sid);
if (err)
return err;
- return avc_has_perm(peer_sid, node_sid,
+ return avc_has_perm(&selinux_state,
+ peer_sid, node_sid,
SECCLASS_NODE, NODE__RECVFROM, ad);
}
@@ -4775,7 +4905,8 @@ static int selinux_sock_rcv_skb_compat(struct sock *sk, struct sk_buff *skb,
return err;
if (selinux_secmark_enabled()) {
- err = avc_has_perm(sk_sid, skb->secmark, SECCLASS_PACKET,
+ err = avc_has_perm(&selinux_state,
+ sk_sid, skb->secmark, SECCLASS_PACKET,
PACKET__RECV, &ad);
if (err)
return err;
@@ -4812,7 +4943,7 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
* to the selinux_sock_rcv_skb_compat() function to deal with the
* special handling. We do this in an attempt to keep this function
* as fast and as clean as possible. */
- if (!selinux_policycap_netpeer)
+ if (!selinux_policycap_netpeer())
return selinux_sock_rcv_skb_compat(sk, skb, family);
secmark_active = selinux_secmark_enabled();
@@ -4840,7 +4971,8 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
selinux_netlbl_err(skb, family, err, 0);
return err;
}
- err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER,
+ err = avc_has_perm(&selinux_state,
+ sk_sid, peer_sid, SECCLASS_PEER,
PEER__RECV, &ad);
if (err) {
selinux_netlbl_err(skb, family, err, 0);
@@ -4849,7 +4981,8 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
}
if (secmark_active) {
- err = avc_has_perm(sk_sid, skb->secmark, SECCLASS_PACKET,
+ err = avc_has_perm(&selinux_state,
+ sk_sid, skb->secmark, SECCLASS_PACKET,
PACKET__RECV, &ad);
if (err)
return err;
@@ -4873,7 +5006,8 @@ static int selinux_socket_getpeersec_stream(struct socket *sock, char __user *op
if (peer_sid == SECSID_NULL)
return -ENOPROTOOPT;
- err = security_sid_to_context(peer_sid, &scontext, &scontext_len);
+ err = security_sid_to_context(&selinux_state, peer_sid, &scontext,
+ &scontext_len);
if (err)
return err;
@@ -5039,7 +5173,9 @@ static int selinux_secmark_relabel_packet(u32 sid)
__tsec = current_security();
tsid = __tsec->sid;
- return avc_has_perm(tsid, sid, SECCLASS_PACKET, PACKET__RELABELTO, NULL);
+ return avc_has_perm(&selinux_state,
+ tsid, sid, SECCLASS_PACKET, PACKET__RELABELTO,
+ NULL);
}
static void selinux_secmark_refcount_inc(void)
@@ -5087,7 +5223,8 @@ static int selinux_tun_dev_create(void)
* connections unlike traditional sockets - check the TUN driver to
* get a better understanding of why this socket is special */
- return avc_has_perm(sid, sid, SECCLASS_TUN_SOCKET, TUN_SOCKET__CREATE,
+ return avc_has_perm(&selinux_state,
+ sid, sid, SECCLASS_TUN_SOCKET, TUN_SOCKET__CREATE,
NULL);
}
@@ -5095,7 +5232,8 @@ static int selinux_tun_dev_attach_queue(void *security)
{
struct tun_security_struct *tunsec = security;
- return avc_has_perm(current_sid(), tunsec->sid, SECCLASS_TUN_SOCKET,
+ return avc_has_perm(&selinux_state,
+ current_sid(), tunsec->sid, SECCLASS_TUN_SOCKET,
TUN_SOCKET__ATTACH_QUEUE, NULL);
}
@@ -5123,11 +5261,13 @@ static int selinux_tun_dev_open(void *security)
u32 sid = current_sid();
int err;
- err = avc_has_perm(sid, tunsec->sid, SECCLASS_TUN_SOCKET,
+ err = avc_has_perm(&selinux_state,
+ sid, tunsec->sid, SECCLASS_TUN_SOCKET,
TUN_SOCKET__RELABELFROM, NULL);
if (err)
return err;
- err = avc_has_perm(sid, sid, SECCLASS_TUN_SOCKET,
+ err = avc_has_perm(&selinux_state,
+ sid, sid, SECCLASS_TUN_SOCKET,
TUN_SOCKET__RELABELTO, NULL);
if (err)
return err;
@@ -5158,7 +5298,8 @@ static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb)
sk->sk_protocol, nlh->nlmsg_type,
secclass_map[sksec->sclass - 1].name,
task_pid_nr(current), current->comm);
- if (!selinux_enforcing || security_get_allow_unknown())
+ if (!enforcing_enabled(&selinux_state) ||
+ security_get_allow_unknown(&selinux_state))
err = 0;
}
@@ -5188,7 +5329,7 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb,
u8 netlbl_active;
u8 peerlbl_active;
- if (!selinux_policycap_netpeer)
+ if (!selinux_policycap_netpeer())
return NF_ACCEPT;
secmark_active = selinux_secmark_enabled();
@@ -5217,7 +5358,8 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb,
}
if (secmark_active)
- if (avc_has_perm(peer_sid, skb->secmark,
+ if (avc_has_perm(&selinux_state,
+ peer_sid, skb->secmark,
SECCLASS_PACKET, PACKET__FORWARD_IN, &ad))
return NF_DROP;
@@ -5329,7 +5471,8 @@ static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb,
return NF_DROP;
if (selinux_secmark_enabled())
- if (avc_has_perm(sksec->sid, skb->secmark,
+ if (avc_has_perm(&selinux_state,
+ sksec->sid, skb->secmark,
SECCLASS_PACKET, PACKET__SEND, &ad))
return NF_DROP_ERR(-ECONNREFUSED);
@@ -5357,7 +5500,7 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb,
* to the selinux_ip_postroute_compat() function to deal with the
* special handling. We do this in an attempt to keep this function
* as fast and as clean as possible. */
- if (!selinux_policycap_netpeer)
+ if (!selinux_policycap_netpeer())
return selinux_ip_postroute_compat(skb, ifindex, family);
secmark_active = selinux_secmark_enabled();
@@ -5452,7 +5595,8 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb,
return NF_DROP;
if (secmark_active)
- if (avc_has_perm(peer_sid, skb->secmark,
+ if (avc_has_perm(&selinux_state,
+ peer_sid, skb->secmark,
SECCLASS_PACKET, secmark_perm, &ad))
return NF_DROP_ERR(-ECONNREFUSED);
@@ -5462,13 +5606,15 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb,
if (sel_netif_sid(dev_net(outdev), ifindex, &if_sid))
return NF_DROP;
- if (avc_has_perm(peer_sid, if_sid,
+ if (avc_has_perm(&selinux_state,
+ peer_sid, if_sid,
SECCLASS_NETIF, NETIF__EGRESS, &ad))
return NF_DROP_ERR(-ECONNREFUSED);
if (sel_netnode_sid(addrp, family, &node_sid))
return NF_DROP;
- if (avc_has_perm(peer_sid, node_sid,
+ if (avc_has_perm(&selinux_state,
+ peer_sid, node_sid,
SECCLASS_NODE, NODE__SENDTO, &ad))
return NF_DROP_ERR(-ECONNREFUSED);
}
@@ -5556,7 +5702,8 @@ static int ipc_has_perm(struct kern_ipc_perm *ipc_perms,
ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = ipc_perms->key;
- return avc_has_perm(sid, isec->sid, isec->sclass, perms, &ad);
+ return avc_has_perm(&selinux_state,
+ sid, isec->sid, isec->sclass, perms, &ad);
}
static int selinux_msg_msg_alloc_security(struct msg_msg *msg)
@@ -5586,7 +5733,8 @@ static int selinux_msg_queue_alloc_security(struct msg_queue *msq)
ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = msq->q_perm.key;
- rc = avc_has_perm(sid, isec->sid, SECCLASS_MSGQ,
+ rc = avc_has_perm(&selinux_state,
+ sid, isec->sid, SECCLASS_MSGQ,
MSGQ__CREATE, &ad);
if (rc) {
ipc_free_security(&msq->q_perm);
@@ -5611,7 +5759,8 @@ static int selinux_msg_queue_associate(struct msg_queue *msq, int msqflg)
ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = msq->q_perm.key;
- return avc_has_perm(sid, isec->sid, SECCLASS_MSGQ,
+ return avc_has_perm(&selinux_state,
+ sid, isec->sid, SECCLASS_MSGQ,
MSGQ__ASSOCIATE, &ad);
}
@@ -5624,7 +5773,8 @@ static int selinux_msg_queue_msgctl(struct msg_queue *msq, int cmd)
case IPC_INFO:
case MSG_INFO:
/* No specific object, just general system-wide information. */
- return avc_has_perm(current_sid(), SECINITSID_KERNEL,
+ return avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_KERNEL,
SECCLASS_SYSTEM, SYSTEM__IPC_INFO, NULL);
case IPC_STAT:
case MSG_STAT:
@@ -5663,8 +5813,8 @@ static int selinux_msg_queue_msgsnd(struct msg_queue *msq, struct msg_msg *msg,
* Compute new sid based on current process and
* message queue this message will be stored in
*/
- rc = security_transition_sid(sid, isec->sid, SECCLASS_MSG,
- NULL, &msec->sid);
+ rc = security_transition_sid(&selinux_state, sid, isec->sid,
+ SECCLASS_MSG, NULL, &msec->sid);
if (rc)
return rc;
}
@@ -5673,15 +5823,18 @@ static int selinux_msg_queue_msgsnd(struct msg_queue *msq, struct msg_msg *msg,
ad.u.ipc_id = msq->q_perm.key;
/* Can this process write to the queue? */
- rc = avc_has_perm(sid, isec->sid, SECCLASS_MSGQ,
+ rc = avc_has_perm(&selinux_state,
+ sid, isec->sid, SECCLASS_MSGQ,
MSGQ__WRITE, &ad);
if (!rc)
/* Can this process send the message */
- rc = avc_has_perm(sid, msec->sid, SECCLASS_MSG,
+ rc = avc_has_perm(&selinux_state,
+ sid, msec->sid, SECCLASS_MSG,
MSG__SEND, &ad);
if (!rc)
/* Can the message be put in the queue? */
- rc = avc_has_perm(msec->sid, isec->sid, SECCLASS_MSGQ,
+ rc = avc_has_perm(&selinux_state,
+ msec->sid, isec->sid, SECCLASS_MSGQ,
MSGQ__ENQUEUE, &ad);
return rc;
@@ -5703,10 +5856,12 @@ static int selinux_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg,
ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = msq->q_perm.key;
- rc = avc_has_perm(sid, isec->sid,
+ rc = avc_has_perm(&selinux_state,
+ sid, isec->sid,
SECCLASS_MSGQ, MSGQ__READ, &ad);
if (!rc)
- rc = avc_has_perm(sid, msec->sid,
+ rc = avc_has_perm(&selinux_state,
+ sid, msec->sid,
SECCLASS_MSG, MSG__RECEIVE, &ad);
return rc;
}
@@ -5728,7 +5883,8 @@ static int selinux_shm_alloc_security(struct shmid_kernel *shp)
ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = shp->shm_perm.key;
- rc = avc_has_perm(sid, isec->sid, SECCLASS_SHM,
+ rc = avc_has_perm(&selinux_state,
+ sid, isec->sid, SECCLASS_SHM,
SHM__CREATE, &ad);
if (rc) {
ipc_free_security(&shp->shm_perm);
@@ -5753,7 +5909,8 @@ static int selinux_shm_associate(struct shmid_kernel *shp, int shmflg)
ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = shp->shm_perm.key;
- return avc_has_perm(sid, isec->sid, SECCLASS_SHM,
+ return avc_has_perm(&selinux_state,
+ sid, isec->sid, SECCLASS_SHM,
SHM__ASSOCIATE, &ad);
}
@@ -5767,7 +5924,8 @@ static int selinux_shm_shmctl(struct shmid_kernel *shp, int cmd)
case IPC_INFO:
case SHM_INFO:
/* No specific object, just general system-wide information. */
- return avc_has_perm(current_sid(), SECINITSID_KERNEL,
+ return avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_KERNEL,
SECCLASS_SYSTEM, SYSTEM__IPC_INFO, NULL);
case IPC_STAT:
case SHM_STAT:
@@ -5821,7 +5979,8 @@ static int selinux_sem_alloc_security(struct sem_array *sma)
ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = sma->sem_perm.key;
- rc = avc_has_perm(sid, isec->sid, SECCLASS_SEM,
+ rc = avc_has_perm(&selinux_state,
+ sid, isec->sid, SECCLASS_SEM,
SEM__CREATE, &ad);
if (rc) {
ipc_free_security(&sma->sem_perm);
@@ -5846,7 +6005,8 @@ static int selinux_sem_associate(struct sem_array *sma, int semflg)
ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = sma->sem_perm.key;
- return avc_has_perm(sid, isec->sid, SECCLASS_SEM,
+ return avc_has_perm(&selinux_state,
+ sid, isec->sid, SECCLASS_SEM,
SEM__ASSOCIATE, &ad);
}
@@ -5860,7 +6020,8 @@ static int selinux_sem_semctl(struct sem_array *sma, int cmd)
case IPC_INFO:
case SEM_INFO:
/* No specific object, just general system-wide information. */
- return avc_has_perm(current_sid(), SECINITSID_KERNEL,
+ return avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_KERNEL,
SECCLASS_SYSTEM, SYSTEM__IPC_INFO, NULL);
case GETPID:
case GETNCNT:
@@ -5946,7 +6107,8 @@ static int selinux_getprocattr(struct task_struct *p,
__tsec = __task_cred(p)->security;
if (current != p) {
- error = avc_has_perm(current_sid(), __tsec->sid,
+ error = avc_has_perm(&selinux_state,
+ current_sid(), __tsec->sid,
SECCLASS_PROCESS, PROCESS__GETATTR, NULL);
if (error)
goto bad;
@@ -5973,7 +6135,7 @@ static int selinux_getprocattr(struct task_struct *p,
if (!sid)
return 0;
- error = security_sid_to_context(sid, value, &len);
+ error = security_sid_to_context(&selinux_state, sid, value, &len);
if (error)
return error;
return len;
@@ -5995,19 +6157,24 @@ static int selinux_setprocattr(const char *name, void *value, size_t size)
* Basic control over ability to set these attributes at all.
*/
if (!strcmp(name, "exec"))
- error = avc_has_perm(mysid, mysid, SECCLASS_PROCESS,
+ error = avc_has_perm(&selinux_state,
+ mysid, mysid, SECCLASS_PROCESS,
PROCESS__SETEXEC, NULL);
else if (!strcmp(name, "fscreate"))
- error = avc_has_perm(mysid, mysid, SECCLASS_PROCESS,
+ error = avc_has_perm(&selinux_state,
+ mysid, mysid, SECCLASS_PROCESS,
PROCESS__SETFSCREATE, NULL);
else if (!strcmp(name, "keycreate"))
- error = avc_has_perm(mysid, mysid, SECCLASS_PROCESS,
+ error = avc_has_perm(&selinux_state,
+ mysid, mysid, SECCLASS_PROCESS,
PROCESS__SETKEYCREATE, NULL);
else if (!strcmp(name, "sockcreate"))
- error = avc_has_perm(mysid, mysid, SECCLASS_PROCESS,
+ error = avc_has_perm(&selinux_state,
+ mysid, mysid, SECCLASS_PROCESS,
PROCESS__SETSOCKCREATE, NULL);
else if (!strcmp(name, "current"))
- error = avc_has_perm(mysid, mysid, SECCLASS_PROCESS,
+ error = avc_has_perm(&selinux_state,
+ mysid, mysid, SECCLASS_PROCESS,
PROCESS__SETCURRENT, NULL);
else
error = -EINVAL;
@@ -6020,7 +6187,8 @@ static int selinux_setprocattr(const char *name, void *value, size_t size)
str[size-1] = 0;
size--;
}
- error = security_context_to_sid(value, size, &sid, GFP_KERNEL);
+ error = security_context_to_sid(&selinux_state, value, size,
+ &sid, GFP_KERNEL);
if (error == -EINVAL && !strcmp(name, "fscreate")) {
if (!has_cap_mac_admin(true)) {
struct audit_buffer *ab;
@@ -6039,8 +6207,9 @@ static int selinux_setprocattr(const char *name, void *value, size_t size)
return error;
}
- error = security_context_to_sid_force(value, size,
- &sid);
+ error = security_context_to_sid_force(
+ &selinux_state,
+ value, size, &sid);
}
if (error)
return error;
@@ -6062,7 +6231,8 @@ static int selinux_setprocattr(const char *name, void *value, size_t size)
} else if (!strcmp(name, "fscreate")) {
tsec->create_sid = sid;
} else if (!strcmp(name, "keycreate")) {
- error = avc_has_perm(mysid, sid, SECCLASS_KEY, KEY__CREATE,
+ error = avc_has_perm(&selinux_state,
+ mysid, sid, SECCLASS_KEY, KEY__CREATE,
NULL);
if (error)
goto abort_change;
@@ -6077,13 +6247,15 @@ static int selinux_setprocattr(const char *name, void *value, size_t size)
/* Only allow single threaded processes to change context */
error = -EPERM;
if (!current_is_single_threaded()) {
- error = security_bounded_transition(tsec->sid, sid);
+ error = security_bounded_transition(&selinux_state,
+ tsec->sid, sid);
if (error)
goto abort_change;
}
/* Check permissions for the transition. */
- error = avc_has_perm(tsec->sid, sid, SECCLASS_PROCESS,
+ error = avc_has_perm(&selinux_state,
+ tsec->sid, sid, SECCLASS_PROCESS,
PROCESS__DYNTRANSITION, NULL);
if (error)
goto abort_change;
@@ -6092,7 +6264,8 @@ static int selinux_setprocattr(const char *name, void *value, size_t size)
Otherwise, leave SID unchanged and fail. */
ptsid = ptrace_parent_sid();
if (ptsid != 0) {
- error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS,
+ error = avc_has_perm(&selinux_state,
+ ptsid, sid, SECCLASS_PROCESS,
PROCESS__PTRACE, NULL);
if (error)
goto abort_change;
@@ -6119,12 +6292,14 @@ static int selinux_ismaclabel(const char *name)
static int selinux_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
{
- return security_sid_to_context(secid, secdata, seclen);
+ return security_sid_to_context(&selinux_state, secid,
+ secdata, seclen);
}
static int selinux_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
{
- return security_context_to_sid(secdata, seclen, secid, GFP_KERNEL);
+ return security_context_to_sid(&selinux_state, secdata, seclen,
+ secid, GFP_KERNEL);
}
static void selinux_release_secctx(char *secdata, u32 seclen)
@@ -6219,7 +6394,8 @@ static int selinux_key_permission(key_ref_t key_ref,
key = key_ref_to_ptr(key_ref);
ksec = key->security;
- return avc_has_perm(sid, ksec->sid, SECCLASS_KEY, perm, NULL);
+ return avc_has_perm(&selinux_state,
+ sid, ksec->sid, SECCLASS_KEY, perm, NULL);
}
static int selinux_key_getsecurity(struct key *key, char **_buffer)
@@ -6229,7 +6405,8 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
unsigned len;
int rc;
- rc = security_sid_to_context(ksec->sid, &context, &len);
+ rc = security_sid_to_context(&selinux_state, ksec->sid,
+ &context, &len);
if (!rc)
rc = len;
*_buffer = context;
@@ -6254,7 +6431,8 @@ static int selinux_ib_pkey_access(void *ib_sec, u64 subnet_prefix, u16 pkey_val)
ibpkey.subnet_prefix = subnet_prefix;
ibpkey.pkey = pkey_val;
ad.u.ibpkey = &ibpkey;
- return avc_has_perm(sec->sid, sid,
+ return avc_has_perm(&selinux_state,
+ sec->sid, sid,
SECCLASS_INFINIBAND_PKEY,
INFINIBAND_PKEY__ACCESS, &ad);
}
@@ -6268,7 +6446,8 @@ static int selinux_ib_endport_manage_subnet(void *ib_sec, const char *dev_name,
struct ib_security_struct *sec = ib_sec;
struct lsm_ibendport_audit ibendport;
- err = security_ib_endport_sid(dev_name, port_num, &sid);
+ err = security_ib_endport_sid(&selinux_state, dev_name, port_num,
+ &sid);
if (err)
return err;
@@ -6277,7 +6456,8 @@ static int selinux_ib_endport_manage_subnet(void *ib_sec, const char *dev_name,
strncpy(ibendport.dev_name, dev_name, sizeof(ibendport.dev_name));
ibendport.port = port_num;
ad.u.ibendport = &ibendport;
- return avc_has_perm(sec->sid, sid,
+ return avc_has_perm(&selinux_state,
+ sec->sid, sid,
SECCLASS_INFINIBAND_ENDPORT,
INFINIBAND_ENDPORT__MANAGE_SUBNET, &ad);
}
@@ -6310,11 +6490,13 @@ static int selinux_bpf(int cmd, union bpf_attr *attr,
switch (cmd) {
case BPF_MAP_CREATE:
- ret = avc_has_perm(sid, sid, SECCLASS_BPF, BPF__MAP_CREATE,
+ ret = avc_has_perm(&selinux_state,
+ sid, sid, SECCLASS_BPF, BPF__MAP_CREATE,
NULL);
break;
case BPF_PROG_LOAD:
- ret = avc_has_perm(sid, sid, SECCLASS_BPF, BPF__PROG_LOAD,
+ ret = avc_has_perm(&selinux_state,
+ sid, sid, SECCLASS_BPF, BPF__PROG_LOAD,
NULL);
break;
default:
@@ -6354,14 +6536,16 @@ static int bpf_fd_pass(struct file *file, u32 sid)
if (file->f_op == &bpf_map_fops) {
map = file->private_data;
bpfsec = map->security;
- ret = avc_has_perm(sid, bpfsec->sid, SECCLASS_BPF,
+ ret = avc_has_perm(&selinux_state,
+ sid, bpfsec->sid, SECCLASS_BPF,
bpf_map_fmode_to_av(file->f_mode), NULL);
if (ret)
return ret;
} else if (file->f_op == &bpf_prog_fops) {
prog = file->private_data;
bpfsec = prog->aux->security;
- ret = avc_has_perm(sid, bpfsec->sid, SECCLASS_BPF,
+ ret = avc_has_perm(&selinux_state,
+ sid, bpfsec->sid, SECCLASS_BPF,
BPF__PROG_RUN, NULL);
if (ret)
return ret;
@@ -6375,7 +6559,8 @@ static int selinux_bpf_map(struct bpf_map *map, fmode_t fmode)
struct bpf_security_struct *bpfsec;
bpfsec = map->security;
- return avc_has_perm(sid, bpfsec->sid, SECCLASS_BPF,
+ return avc_has_perm(&selinux_state,
+ sid, bpfsec->sid, SECCLASS_BPF,
bpf_map_fmode_to_av(fmode), NULL);
}
@@ -6385,7 +6570,8 @@ static int selinux_bpf_prog(struct bpf_prog *prog)
struct bpf_security_struct *bpfsec;
bpfsec = prog->aux->security;
- return avc_has_perm(sid, bpfsec->sid, SECCLASS_BPF,
+ return avc_has_perm(&selinux_state,
+ sid, bpfsec->sid, SECCLASS_BPF,
BPF__PROG_RUN, NULL);
}
@@ -6451,7 +6637,7 @@ static int selinux_perf_event_open(struct perf_event_attr *attr, int type)
else
return -EINVAL;
- return avc_has_perm(sid, sid, SECCLASS_PERF_EVENT,
+ return avc_has_perm(&selinux_state, sid, sid, SECCLASS_PERF_EVENT,
requested, NULL);
}
@@ -6482,7 +6668,7 @@ static int selinux_perf_event_read(struct perf_event *event)
struct perf_event_security_struct *perfsec = event->security;
u32 sid = current_sid();
- return avc_has_perm(sid, perfsec->sid,
+ return avc_has_perm(&selinux_state, sid, perfsec->sid,
SECCLASS_PERF_EVENT, PERF_EVENT__READ, NULL);
}
@@ -6491,7 +6677,7 @@ static int selinux_perf_event_write(struct perf_event *event)
struct perf_event_security_struct *perfsec = event->security;
u32 sid = current_sid();
- return avc_has_perm(sid, perfsec->sid,
+ return avc_has_perm(&selinux_state, sid, perfsec->sid,
SECCLASS_PERF_EVENT, PERF_EVENT__WRITE, NULL);
}
#endif
@@ -6749,6 +6935,12 @@ static __init int selinux_init(void)
printk(KERN_INFO "SELinux: Initializing.\n");
+ memset(&selinux_state, 0, sizeof(selinux_state));
+ enforcing_set(&selinux_state, selinux_enforcing_boot);
+ selinux_state.checkreqprot = selinux_checkreqprot_boot;
+ selinux_ss_init(&selinux_state.ss);
+ selinux_avc_init(&selinux_state.avc);
+
/* Set the security state for the initial task. */
cred_init_security();
@@ -6762,6 +6954,12 @@ static __init int selinux_init(void)
0, SLAB_PANIC, NULL);
avc_init();
+ avtab_cache_init();
+
+ ebitmap_cache_init();
+
+ hashtab_cache_init();
+
security_add_hooks(selinux_hooks, ARRAY_SIZE(selinux_hooks), "selinux");
if (avc_add_callback(selinux_netcache_avc_callback, AVC_CALLBACK_RESET))
@@ -6770,7 +6968,7 @@ static __init int selinux_init(void)
if (avc_add_callback(selinux_lsm_notifier_avc_callback, AVC_CALLBACK_RESET))
panic("SELinux: Unable to register AVC LSM notifier callback\n");
- if (selinux_enforcing)
+ if (selinux_enforcing_boot)
printk(KERN_DEBUG "SELinux: Starting in enforcing mode\n");
else
printk(KERN_DEBUG "SELinux: Starting in permissive mode\n");
@@ -6891,23 +7089,22 @@ static void selinux_nf_ip_exit(void)
#endif /* CONFIG_NETFILTER */
#ifdef CONFIG_SECURITY_SELINUX_DISABLE
-static int selinux_disabled;
-
-int selinux_disable(void)
+int selinux_disable(struct selinux_state *state)
{
- if (ss_initialized) {
+ if (state->initialized) {
/* Not permitted after initial policy load. */
return -EINVAL;
}
- if (selinux_disabled) {
+ if (state->disabled) {
/* Only do this once. */
return -EINVAL;
}
+ state->disabled = 1;
+
printk(KERN_INFO "SELinux: Disabled at runtime.\n");
- selinux_disabled = 1;
selinux_enabled = 0;
security_delete_hooks(selinux_hooks, ARRAY_SIZE(selinux_hooks));
diff --git a/security/selinux/ibpkey.c b/security/selinux/ibpkey.c
index e3614ee..0a4b89d 100644
--- a/security/selinux/ibpkey.c
+++ b/security/selinux/ibpkey.c
@@ -152,7 +152,8 @@ static int sel_ib_pkey_sid_slow(u64 subnet_prefix, u16 pkey_num, u32 *sid)
return 0;
}
- ret = security_ib_pkey_sid(subnet_prefix, pkey_num, sid);
+ ret = security_ib_pkey_sid(&selinux_state, subnet_prefix, pkey_num,
+ sid);
if (ret)
goto out;
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index 57d61cf..ef899bc 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -20,12 +20,6 @@
#include "av_permissions.h"
#include "security.h"
-#ifdef CONFIG_SECURITY_SELINUX_DEVELOP
-extern int selinux_enforcing;
-#else
-#define selinux_enforcing 1
-#endif
-
/*
* An entry in the AVC.
*/
@@ -58,6 +52,7 @@ struct selinux_audit_data {
u32 audited;
u32 denied;
int result;
+ struct selinux_state *state;
};
/*
@@ -102,7 +97,8 @@ static inline u32 avc_audit_required(u32 requested,
return audited;
}
-int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass,
+int slow_avc_audit(struct selinux_state *state,
+ u32 ssid, u32 tsid, u16 tclass,
u32 requested, u32 audited, u32 denied, int result,
struct common_audit_data *a,
unsigned flags);
@@ -127,7 +123,8 @@ int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass,
* be performed under a lock, to allow the lock to be released
* before calling the auditing code.
*/
-static inline int avc_audit(u32 ssid, u32 tsid,
+static inline int avc_audit(struct selinux_state *state,
+ u32 ssid, u32 tsid,
u16 tclass, u32 requested,
struct av_decision *avd,
int result,
@@ -138,31 +135,35 @@ static inline int avc_audit(u32 ssid, u32 tsid,
audited = avc_audit_required(requested, avd, result, 0, &denied);
if (likely(!audited))
return 0;
- return slow_avc_audit(ssid, tsid, tclass,
+ return slow_avc_audit(state, ssid, tsid, tclass,
requested, audited, denied, result,
a, flags);
}
#define AVC_STRICT 1 /* Ignore permissive mode. */
#define AVC_EXTENDED_PERMS 2 /* update extended permissions */
-int avc_has_perm_noaudit(u32 ssid, u32 tsid,
+int avc_has_perm_noaudit(struct selinux_state *state,
+ u32 ssid, u32 tsid,
u16 tclass, u32 requested,
unsigned flags,
struct av_decision *avd);
-int avc_has_perm(u32 ssid, u32 tsid,
+int avc_has_perm(struct selinux_state *state,
+ u32 ssid, u32 tsid,
u16 tclass, u32 requested,
struct common_audit_data *auditdata);
-int avc_has_perm_flags(u32 ssid, u32 tsid,
+int avc_has_perm_flags(struct selinux_state *state,
+ u32 ssid, u32 tsid,
u16 tclass, u32 requested,
struct common_audit_data *auditdata,
int flags);
-int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested,
- u8 driver, u8 perm, struct common_audit_data *ad);
+int avc_has_extended_perms(struct selinux_state *state,
+ u32 ssid, u32 tsid, u16 tclass, u32 requested,
+ u8 driver, u8 perm, struct common_audit_data *ad);
-u32 avc_policy_seqno(void);
+u32 avc_policy_seqno(struct selinux_state *state);
#define AVC_CALLBACK_GRANT 1
#define AVC_CALLBACK_TRY_REVOKE 2
@@ -177,8 +178,11 @@ u32 avc_policy_seqno(void);
int avc_add_callback(int (*callback)(u32 event), u32 events);
/* Exported to selinuxfs */
-int avc_get_hash_stats(char *page);
-extern unsigned int avc_cache_threshold;
+struct selinux_avc;
+int avc_get_hash_stats(struct selinux_avc *avc, char *page);
+unsigned int avc_get_cache_threshold(struct selinux_avc *avc);
+void avc_set_cache_threshold(struct selinux_avc *avc,
+ unsigned int cache_threshold);
/* Attempt to free avc node cache */
void avc_disable(void);
diff --git a/security/selinux/include/avc_ss.h b/security/selinux/include/avc_ss.h
index 3bcc727..88c384c 100644
--- a/security/selinux/include/avc_ss.h
+++ b/security/selinux/include/avc_ss.h
@@ -9,7 +9,8 @@
#include "flask.h"
-int avc_ss_reset(u32 seqno);
+struct selinux_avc;
+int avc_ss_reset(struct selinux_avc *avc, u32 seqno);
/* Class/perm mapping support */
struct security_class_mapping {
@@ -19,11 +20,5 @@ struct security_class_mapping {
extern struct security_class_mapping secclass_map[];
-/*
- * The security server must be initialized before
- * any labeling or access decisions can be provided.
- */
-extern int ss_initialized;
-
#endif /* _SELINUX_AVC_SS_H_ */
diff --git a/security/selinux/include/conditional.h b/security/selinux/include/conditional.h
index ff4fddc..0e30eca 100644
--- a/security/selinux/include/conditional.h
+++ b/security/selinux/include/conditional.h
@@ -13,10 +13,15 @@
#ifndef _SELINUX_CONDITIONAL_H_
#define _SELINUX_CONDITIONAL_H_
-int security_get_bools(int *len, char ***names, int **values);
+#include "security.h"
-int security_set_bools(int len, int *values);
+int security_get_bools(struct selinux_state *state,
+ int *len, char ***names, int **values);
-int security_get_bool_value(int index);
+int security_set_bools(struct selinux_state *state,
+ int len, int *values);
+
+int security_get_bool_value(struct selinux_state *state,
+ int index);
#endif
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index 5bdddf0..2ac6edc 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -161,6 +161,4 @@ struct perf_event_security_struct {
u32 sid; /* SID of perf_event obj creator */
};
-extern unsigned int selinux_checkreqprot;
-
#endif /* _SELINUX_OBJSEC_H_ */
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 99203f1..feffe7e 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -14,6 +14,8 @@
#include <linux/magic.h>
#include <linux/types.h>
//#include "flask.h"
+#include <linux/refcount.h>
+#include <linux/workqueue.h>
#define SECSID_NULL 0x00000000 /* unspecified SID */
#define SECSID_WILD 0xffffffff /* wildcard SID */
@@ -79,14 +81,7 @@ enum {
};
#define POLICYDB_CAPABILITY_MAX (__POLICYDB_CAPABILITY_MAX - 1)
-extern char *selinux_policycap_names[__POLICYDB_CAPABILITY_MAX];
-
-extern int selinux_policycap_netpeer;
-extern int selinux_policycap_openperm;
-extern int selinux_policycap_extsockclass;
-extern int selinux_policycap_alwaysnetwork;
-extern int selinux_policycap_cgroupseclabel;
-extern int selinux_policycap_nnp_nosuid_transition;
+extern const char *selinux_policycap_names[__POLICYDB_CAPABILITY_MAX];
/*
* type_datum properties
@@ -98,13 +93,98 @@ extern int selinux_policycap_nnp_nosuid_transition;
/* limitation of boundary depth */
#define POLICYDB_BOUNDS_MAXDEPTH 4
-int security_mls_enabled(void);
+struct selinux_avc;
+struct selinux_ss;
-int security_load_policy(void *data, size_t len);
-int security_read_policy(void **data, size_t *len);
-size_t security_policydb_len(void);
+struct selinux_state {
+ bool disabled;
+#ifdef CONFIG_SECURITY_SELINUX_DEVELOP
+ bool enforcing;
+#endif
+ bool checkreqprot;
+ bool initialized;
+ bool policycap[__POLICYDB_CAPABILITY_MAX];
+ struct selinux_avc *avc;
+ struct selinux_ss *ss;
+};
-int security_policycap_supported(unsigned int req_cap);
+void selinux_ss_init(struct selinux_ss **ss);
+void selinux_avc_init(struct selinux_avc **avc);
+
+extern struct selinux_state selinux_state;
+
+#ifdef CONFIG_SECURITY_SELINUX_DEVELOP
+static inline bool enforcing_enabled(struct selinux_state *state)
+{
+ return state->enforcing;
+}
+
+static inline void enforcing_set(struct selinux_state *state, bool value)
+{
+ state->enforcing = value;
+}
+#else
+static inline bool enforcing_enabled(struct selinux_state *state)
+{
+ return true;
+}
+
+static inline void enforcing_set(struct selinux_state *state, bool value)
+{
+}
+#endif
+
+static inline bool selinux_policycap_netpeer(void)
+{
+ struct selinux_state *state = &selinux_state;
+
+ return state->policycap[POLICYDB_CAPABILITY_NETPEER];
+}
+
+static inline bool selinux_policycap_openperm(void)
+{
+ struct selinux_state *state = &selinux_state;
+
+ return state->policycap[POLICYDB_CAPABILITY_OPENPERM];
+}
+
+static inline bool selinux_policycap_extsockclass(void)
+{
+ struct selinux_state *state = &selinux_state;
+
+ return state->policycap[POLICYDB_CAPABILITY_EXTSOCKCLASS];
+}
+
+static inline bool selinux_policycap_alwaysnetwork(void)
+{
+ struct selinux_state *state = &selinux_state;
+
+ return state->policycap[POLICYDB_CAPABILITY_ALWAYSNETWORK];
+}
+
+static inline bool selinux_policycap_cgroupseclabel(void)
+{
+ struct selinux_state *state = &selinux_state;
+
+ return state->policycap[POLICYDB_CAPABILITY_CGROUPSECLABEL];
+}
+
+static inline bool selinux_policycap_nnp_nosuid_transition(void)
+{
+ struct selinux_state *state = &selinux_state;
+
+ return state->policycap[POLICYDB_CAPABILITY_NNP_NOSUID_TRANSITION];
+}
+
+int security_mls_enabled(struct selinux_state *state);
+int security_load_policy(struct selinux_state *state,
+ void *data, size_t len);
+int security_read_policy(struct selinux_state *state,
+ void **data, size_t *len);
+size_t security_policydb_len(struct selinux_state *state);
+
+int security_policycap_supported(struct selinux_state *state,
+ unsigned int req_cap);
#define SEL_VEC_MAX 32
struct av_decision {
@@ -141,76 +221,100 @@ struct extended_perms {
/* definitions of av_decision.flags */
#define AVD_FLAGS_PERMISSIVE 0x0001
-void security_compute_av(u32 ssid, u32 tsid,
+void security_compute_av(struct selinux_state *state,
+ u32 ssid, u32 tsid,
u16 tclass, struct av_decision *avd,
struct extended_perms *xperms);
-void security_compute_xperms_decision(u32 ssid, u32 tsid, u16 tclass,
- u8 driver, struct extended_perms_decision *xpermd);
+void security_compute_xperms_decision(struct selinux_state *state,
+ u32 ssid, u32 tsid, u16 tclass,
+ u8 driver,
+ struct extended_perms_decision *xpermd);
-void security_compute_av_user(u32 ssid, u32 tsid,
- u16 tclass, struct av_decision *avd);
+void security_compute_av_user(struct selinux_state *state,
+ u32 ssid, u32 tsid,
+ u16 tclass, struct av_decision *avd);
-int security_transition_sid(u32 ssid, u32 tsid, u16 tclass,
+int security_transition_sid(struct selinux_state *state,
+ u32 ssid, u32 tsid, u16 tclass,
const struct qstr *qstr, u32 *out_sid);
-int security_transition_sid_user(u32 ssid, u32 tsid, u16 tclass,
+int security_transition_sid_user(struct selinux_state *state,
+ u32 ssid, u32 tsid, u16 tclass,
const char *objname, u32 *out_sid);
-int security_member_sid(u32 ssid, u32 tsid,
- u16 tclass, u32 *out_sid);
+int security_member_sid(struct selinux_state *state, u32 ssid, u32 tsid,
+ u16 tclass, u32 *out_sid);
-int security_change_sid(u32 ssid, u32 tsid,
- u16 tclass, u32 *out_sid);
+int security_change_sid(struct selinux_state *state, u32 ssid, u32 tsid,
+ u16 tclass, u32 *out_sid);
-int security_sid_to_context(u32 sid, char **scontext,
- u32 *scontext_len);
+int security_sid_to_context(struct selinux_state *state, u32 sid,
+ char **scontext, u32 *scontext_len);
-int security_sid_to_context_force(u32 sid, char **scontext, u32 *scontext_len);
+int security_sid_to_context_force(struct selinux_state *state,
+ u32 sid, char **scontext, u32 *scontext_len);
-int security_context_to_sid(const char *scontext, u32 scontext_len,
+int security_context_to_sid(struct selinux_state *state,
+ const char *scontext, u32 scontext_len,
u32 *out_sid, gfp_t gfp);
-int security_context_str_to_sid(const char *scontext, u32 *out_sid, gfp_t gfp);
+int security_context_str_to_sid(struct selinux_state *state,
+ const char *scontext, u32 *out_sid, gfp_t gfp);
-int security_context_to_sid_default(const char *scontext, u32 scontext_len,
+int security_context_to_sid_default(struct selinux_state *state,
+ const char *scontext, u32 scontext_len,
u32 *out_sid, u32 def_sid, gfp_t gfp_flags);
-int security_context_to_sid_force(const char *scontext, u32 scontext_len,
+int security_context_to_sid_force(struct selinux_state *state,
+ const char *scontext, u32 scontext_len,
u32 *sid);
-int security_get_user_sids(u32 callsid, char *username,
+int security_get_user_sids(struct selinux_state *state,
+ u32 callsid, char *username,
u32 **sids, u32 *nel);
-int security_port_sid(u8 protocol, u16 port, u32 *out_sid);
+int security_port_sid(struct selinux_state *state,
+ u8 protocol, u16 port, u32 *out_sid);
-int security_ib_pkey_sid(u64 subnet_prefix, u16 pkey_num, u32 *out_sid);
+int security_ib_pkey_sid(struct selinux_state *state,
+ u64 subnet_prefix, u16 pkey_num, u32 *out_sid);
-int security_ib_endport_sid(const char *dev_name, u8 port_num, u32 *out_sid);
+int security_ib_endport_sid(struct selinux_state *state,
+ const char *dev_name, u8 port_num, u32 *out_sid);
-int security_netif_sid(char *name, u32 *if_sid);
+int security_netif_sid(struct selinux_state *state,
+ char *name, u32 *if_sid);
-int security_node_sid(u16 domain, void *addr, u32 addrlen,
- u32 *out_sid);
+int security_node_sid(struct selinux_state *state,
+ u16 domain, void *addr, u32 addrlen,
+ u32 *out_sid);
-int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid,
+int security_validate_transition(struct selinux_state *state,
+ u32 oldsid, u32 newsid, u32 tasksid,
u16 tclass);
-int security_validate_transition_user(u32 oldsid, u32 newsid, u32 tasksid,
+int security_validate_transition_user(struct selinux_state *state,
+ u32 oldsid, u32 newsid, u32 tasksid,
u16 tclass);
-int security_bounded_transition(u32 oldsid, u32 newsid);
+int security_bounded_transition(struct selinux_state *state,
+ u32 oldsid, u32 newsid);
-int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid);
+int security_sid_mls_copy(struct selinux_state *state,
+ u32 sid, u32 mls_sid, u32 *new_sid);
-int security_net_peersid_resolve(u32 nlbl_sid, u32 nlbl_type,
+int security_net_peersid_resolve(struct selinux_state *state,
+ u32 nlbl_sid, u32 nlbl_type,
u32 xfrm_sid,
u32 *peer_sid);
-int security_get_classes(char ***classes, int *nclasses);
-int security_get_permissions(char *class, char ***perms, int *nperms);
-int security_get_reject_unknown(void);
-int security_get_allow_unknown(void);
+int security_get_classes(struct selinux_state *state,
+ char ***classes, int *nclasses);
+int security_get_permissions(struct selinux_state *state,
+ char *class, char ***perms, int *nperms);
+int security_get_reject_unknown(struct selinux_state *state);
+int security_get_allow_unknown(struct selinux_state *state);
#define SECURITY_FS_USE_XATTR 1 /* use xattr */
#define SECURITY_FS_USE_TRANS 2 /* use transition SIDs, e.g. devpts/tmpfs */
@@ -221,27 +325,31 @@ int security_get_allow_unknown(void);
#define SECURITY_FS_USE_NATIVE 7 /* use native label support */
#define SECURITY_FS_USE_MAX 7 /* Highest SECURITY_FS_USE_XXX */
-int security_fs_use(struct super_block *sb);
+int security_fs_use(struct selinux_state *state, struct super_block *sb);
-int security_genfs_sid(const char *fstype, char *name, u16 sclass,
- u32 *sid);
+int security_genfs_sid(struct selinux_state *state,
+ const char *fstype, char *name, u16 sclass,
+ u32 *sid);
#ifdef CONFIG_NETLABEL
-int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr,
+int security_netlbl_secattr_to_sid(struct selinux_state *state,
+ struct netlbl_lsm_secattr *secattr,
u32 *sid);
-int security_netlbl_sid_to_secattr(u32 sid,
+int security_netlbl_sid_to_secattr(struct selinux_state *state,
+ u32 sid,
struct netlbl_lsm_secattr *secattr);
#else
-static inline int security_netlbl_secattr_to_sid(
+static inline int security_netlbl_secattr_to_sid(struct selinux_state *state,
struct netlbl_lsm_secattr *secattr,
u32 *sid)
{
return -EIDRM;
}
-static inline int security_netlbl_sid_to_secattr(u32 sid,
- struct netlbl_lsm_secattr *secattr)
+static inline int security_netlbl_sid_to_secattr(struct selinux_state *state,
+ u32 sid,
+ struct netlbl_lsm_secattr *secattr)
{
return -ENOENT;
}
@@ -252,7 +360,7 @@ const char *security_get_initial_sid_context(u32 sid);
/*
* status notifier using mmap interface
*/
-extern struct page *selinux_kernel_status_page(void);
+extern struct page *selinux_kernel_status_page(struct selinux_state *state);
#define SELINUX_KERNEL_STATUS_VERSION 1
struct selinux_kernel_status {
@@ -266,10 +374,12 @@ struct selinux_kernel_status {
*/
} __packed;
-extern void selinux_status_update_setenforce(int enforcing);
-extern void selinux_status_update_policyload(int seqno);
+extern void selinux_status_update_setenforce(struct selinux_state *state,
+ int enforcing);
+extern void selinux_status_update_policyload(struct selinux_state *state,
+ int seqno);
extern void selinux_complete_init(void);
-extern int selinux_disable(void);
+extern int selinux_disable(struct selinux_state *state);
extern void exit_sel_fs(void);
extern struct path selinux_null;
extern struct vfsmount *selinuxfs_mount;
@@ -277,5 +387,9 @@ extern void selnl_notify_setenforce(int val);
extern void selnl_notify_policyload(u32 seqno);
extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
-#endif /* _SELINUX_SECURITY_H_ */
+extern void avtab_cache_init(void);
+extern void ebitmap_cache_init(void);
+extern void hashtab_cache_init(void);
+extern int security_sidtab_hash_stats(struct selinux_state *state, char *page);
+#endif /* _SELINUX_SECURITY_H_ */
diff --git a/security/selinux/netif.c b/security/selinux/netif.c
index e607b44..ac65f74 100644
--- a/security/selinux/netif.c
+++ b/security/selinux/netif.c
@@ -163,7 +163,7 @@ static int sel_netif_sid_slow(struct net *ns, int ifindex, u32 *sid)
ret = -ENOMEM;
goto out;
}
- ret = security_netif_sid(dev->name, &new->nsec.sid);
+ ret = security_netif_sid(&selinux_state, dev->name, &new->nsec.sid);
if (ret != 0)
goto out;
new->nsec.ns = ns;
diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
index aaba667..c99884a 100644
--- a/security/selinux/netlabel.c
+++ b/security/selinux/netlabel.c
@@ -60,7 +60,7 @@ static int selinux_netlbl_sidlookup_cached(struct sk_buff *skb,
{
int rc;
- rc = security_netlbl_secattr_to_sid(secattr, sid);
+ rc = security_netlbl_secattr_to_sid(&selinux_state, secattr, sid);
if (rc == 0 &&
(secattr->flags & NETLBL_SECATTR_CACHEABLE) &&
(secattr->flags & NETLBL_SECATTR_CACHE))
@@ -91,7 +91,8 @@ static struct netlbl_lsm_secattr *selinux_netlbl_sock_genattr(struct sock *sk)
secattr = netlbl_secattr_alloc(GFP_ATOMIC);
if (secattr == NULL)
return NULL;
- rc = security_netlbl_sid_to_secattr(sksec->sid, secattr);
+ rc = security_netlbl_sid_to_secattr(&selinux_state, sksec->sid,
+ secattr);
if (rc != 0) {
netlbl_secattr_free(secattr);
return NULL;
@@ -257,7 +258,8 @@ int selinux_netlbl_skbuff_setsid(struct sk_buff *skb,
if (secattr == NULL) {
secattr = &secattr_storage;
netlbl_secattr_init(secattr);
- rc = security_netlbl_sid_to_secattr(sid, secattr);
+ rc = security_netlbl_sid_to_secattr(&selinux_state, sid,
+ secattr);
if (rc != 0)
goto skbuff_setsid_return;
}
@@ -290,7 +292,8 @@ int selinux_netlbl_inet_conn_request(struct request_sock *req, u16 family)
return 0;
netlbl_secattr_init(&secattr);
- rc = security_netlbl_sid_to_secattr(req->secid, &secattr);
+ rc = security_netlbl_sid_to_secattr(&selinux_state, req->secid,
+ &secattr);
if (rc != 0)
goto inet_conn_request_return;
rc = netlbl_req_setattr(req, &secattr);
@@ -403,7 +406,8 @@ int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec,
perm = RAWIP_SOCKET__RECVFROM;
}
- rc = avc_has_perm(sksec->sid, nlbl_sid, sksec->sclass, perm, ad);
+ rc = avc_has_perm(&selinux_state,
+ sksec->sid, nlbl_sid, sksec->sclass, perm, ad);
if (rc == 0)
return 0;
diff --git a/security/selinux/netnode.c b/security/selinux/netnode.c
index da923f8..6dd89b89 100644
--- a/security/selinux/netnode.c
+++ b/security/selinux/netnode.c
@@ -215,12 +215,12 @@ static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
goto out;
switch (family) {
case PF_INET:
- ret = security_node_sid(PF_INET,
+ ret = security_node_sid(&selinux_state, PF_INET,
addr, sizeof(struct in_addr), sid);
new->nsec.addr.ipv4 = *(__be32 *)addr;
break;
case PF_INET6:
- ret = security_node_sid(PF_INET6,
+ ret = security_node_sid(&selinux_state, PF_INET6,
addr, sizeof(struct in6_addr), sid);
new->nsec.addr.ipv6 = *(struct in6_addr *)addr;
break;
diff --git a/security/selinux/netport.c b/security/selinux/netport.c
index 3311cc3..9ed4c50 100644
--- a/security/selinux/netport.c
+++ b/security/selinux/netport.c
@@ -161,7 +161,7 @@ static int sel_netport_sid_slow(u8 protocol, u16 pnum, u32 *sid)
new = kzalloc(sizeof(*new), GFP_ATOMIC);
if (new == NULL)
goto out;
- ret = security_port_sid(protocol, pnum, sid);
+ ret = security_port_sid(&selinux_state, protocol, pnum, sid);
if (ret != 0)
goto out;
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 00eed84..dba17ea 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -19,6 +19,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
+#include <linux/mount.h>
#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/string.h>
@@ -41,34 +42,6 @@
#include "objsec.h"
#include "conditional.h"
-unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE;
-
-static int __init checkreqprot_setup(char *str)
-{
- unsigned long checkreqprot;
- if (!kstrtoul(str, 0, &checkreqprot))
- selinux_checkreqprot = checkreqprot ? 1 : 0;
- return 1;
-}
-__setup("checkreqprot=", checkreqprot_setup);
-
-static DEFINE_MUTEX(sel_mutex);
-
-/* global data for booleans */
-static struct dentry *bool_dir;
-static int bool_num;
-static char **bool_pending_names;
-static int *bool_pending_values;
-
-/* global data for classes */
-static struct dentry *class_dir;
-static unsigned long last_class_ino;
-
-static char policy_opened;
-
-/* global data for policy capabilities */
-static struct dentry *policycap_dir;
-
enum sel_inos {
SEL_ROOT_INO = 2,
SEL_LOAD, /* load policy */
@@ -93,7 +66,51 @@ enum sel_inos {
SEL_INO_NEXT, /* The next inode number to use */
};
-static unsigned long sel_last_ino = SEL_INO_NEXT - 1;
+struct selinux_fs_info {
+ struct dentry *bool_dir;
+ unsigned int bool_num;
+ char **bool_pending_names;
+ unsigned int *bool_pending_values;
+ struct dentry *class_dir;
+ unsigned long last_class_ino;
+ bool policy_opened;
+ struct dentry *policycap_dir;
+ struct mutex mutex;
+ unsigned long last_ino;
+ struct selinux_state *state;
+ struct super_block *sb;
+};
+
+static int selinux_fs_info_create(struct super_block *sb)
+{
+ struct selinux_fs_info *fsi;
+
+ fsi = kzalloc(sizeof(*fsi), GFP_KERNEL);
+ if (!fsi)
+ return -ENOMEM;
+
+ mutex_init(&fsi->mutex);
+ fsi->last_ino = SEL_INO_NEXT - 1;
+ fsi->state = &selinux_state;
+ fsi->sb = sb;
+ sb->s_fs_info = fsi;
+ return 0;
+}
+
+static void selinux_fs_info_free(struct super_block *sb)
+{
+ struct selinux_fs_info *fsi = sb->s_fs_info;
+ int i;
+
+ if (fsi) {
+ for (i = 0; i < fsi->bool_num; i++)
+ kfree(fsi->bool_pending_names[i]);
+ kfree(fsi->bool_pending_names);
+ kfree(fsi->bool_pending_values);
+ }
+ kfree(sb->s_fs_info);
+ sb->s_fs_info = NULL;
+}
#define SEL_INITCON_INO_OFFSET 0x01000000
#define SEL_BOOL_INO_OFFSET 0x02000000
@@ -105,10 +122,12 @@ static unsigned long sel_last_ino = SEL_INO_NEXT - 1;
static ssize_t sel_read_enforce(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
+ struct selinux_fs_info *fsi = file_inode(filp)->i_sb->s_fs_info;
char tmpbuf[TMPBUFLEN];
ssize_t length;
- length = scnprintf(tmpbuf, TMPBUFLEN, "%d", selinux_enforcing);
+ length = scnprintf(tmpbuf, TMPBUFLEN, "%d",
+ enforcing_enabled(fsi->state));
return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
}
@@ -117,9 +136,11 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
+ struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
+ struct selinux_state *state = fsi->state;
char *page = NULL;
ssize_t length;
- int new_value;
+ int old_value, new_value;
if (count >= PAGE_SIZE)
return -ENOMEM;
@@ -138,23 +159,25 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
new_value = !!new_value;
- if (new_value != selinux_enforcing) {
- length = avc_has_perm(current_sid(), SECINITSID_SECURITY,
+ old_value = enforcing_enabled(state);
+ if (new_value != old_value) {
+ length = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
SECCLASS_SECURITY, SECURITY__SETENFORCE,
NULL);
if (length)
goto out;
audit_log(current->audit_context, GFP_KERNEL, AUDIT_MAC_STATUS,
"enforcing=%d old_enforcing=%d auid=%u ses=%u",
- new_value, selinux_enforcing,
+ new_value, old_value,
from_kuid(&init_user_ns, audit_get_loginuid(current)),
audit_get_sessionid(current));
- selinux_enforcing = new_value;
- if (selinux_enforcing)
- avc_ss_reset(0);
- selnl_notify_setenforce(selinux_enforcing);
- selinux_status_update_setenforce(selinux_enforcing);
- if (!selinux_enforcing)
+ enforcing_set(state, new_value);
+ if (new_value)
+ avc_ss_reset(state->avc, 0);
+ selnl_notify_setenforce(new_value);
+ selinux_status_update_setenforce(state, new_value);
+ if (!new_value)
call_lsm_notifier(LSM_POLICY_CHANGE, NULL);
}
length = count;
@@ -175,11 +198,14 @@ static const struct file_operations sel_enforce_ops = {
static ssize_t sel_read_handle_unknown(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
+ struct selinux_fs_info *fsi = file_inode(filp)->i_sb->s_fs_info;
+ struct selinux_state *state = fsi->state;
char tmpbuf[TMPBUFLEN];
ssize_t length;
ino_t ino = file_inode(filp)->i_ino;
int handle_unknown = (ino == SEL_REJECT_UNKNOWN) ?
- security_get_reject_unknown() : !security_get_allow_unknown();
+ security_get_reject_unknown(state) :
+ !security_get_allow_unknown(state);
length = scnprintf(tmpbuf, TMPBUFLEN, "%d", handle_unknown);
return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
@@ -192,7 +218,8 @@ static const struct file_operations sel_handle_unknown_ops = {
static int sel_open_handle_status(struct inode *inode, struct file *filp)
{
- struct page *status = selinux_kernel_status_page();
+ struct selinux_fs_info *fsi = file_inode(filp)->i_sb->s_fs_info;
+ struct page *status = selinux_kernel_status_page(fsi->state);
if (!status)
return -ENOMEM;
@@ -248,6 +275,7 @@ static ssize_t sel_write_disable(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
+ struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
char *page;
ssize_t length;
int new_value;
@@ -268,7 +296,7 @@ static ssize_t sel_write_disable(struct file *file, const char __user *buf,
goto out;
if (new_value) {
- length = selinux_disable();
+ length = selinux_disable(fsi->state);
if (length)
goto out;
audit_log(current->audit_context, GFP_KERNEL, AUDIT_MAC_STATUS,
@@ -307,9 +335,9 @@ static const struct file_operations sel_policyvers_ops = {
};
/* declaration for sel_write_load */
-static int sel_make_bools(void);
-static int sel_make_classes(void);
-static int sel_make_policycap(void);
+static int sel_make_bools(struct selinux_fs_info *fsi);
+static int sel_make_classes(struct selinux_fs_info *fsi);
+static int sel_make_policycap(struct selinux_fs_info *fsi);
/* declaration for sel_make_class_dirs */
static struct dentry *sel_make_dir(struct dentry *dir, const char *name,
@@ -318,11 +346,12 @@ static struct dentry *sel_make_dir(struct dentry *dir, const char *name,
static ssize_t sel_read_mls(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
+ struct selinux_fs_info *fsi = file_inode(filp)->i_sb->s_fs_info;
char tmpbuf[TMPBUFLEN];
ssize_t length;
length = scnprintf(tmpbuf, TMPBUFLEN, "%d",
- security_mls_enabled());
+ security_mls_enabled(fsi->state));
return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
}
@@ -338,20 +367,23 @@ struct policy_load_memory {
static int sel_open_policy(struct inode *inode, struct file *filp)
{
+ struct selinux_fs_info *fsi = inode->i_sb->s_fs_info;
+ struct selinux_state *state = fsi->state;
struct policy_load_memory *plm = NULL;
int rc;
BUG_ON(filp->private_data);
- mutex_lock(&sel_mutex);
+ mutex_lock(&fsi->mutex);
- rc = avc_has_perm(current_sid(), SECINITSID_SECURITY,
+ rc = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
SECCLASS_SECURITY, SECURITY__READ_POLICY, NULL);
if (rc)
goto err;
rc = -EBUSY;
- if (policy_opened)
+ if (fsi->policy_opened)
goto err;
rc = -ENOMEM;
@@ -359,25 +391,25 @@ static int sel_open_policy(struct inode *inode, struct file *filp)
if (!plm)
goto err;
- if (i_size_read(inode) != security_policydb_len()) {
+ if (i_size_read(inode) != security_policydb_len(state)) {
inode_lock(inode);
- i_size_write(inode, security_policydb_len());
+ i_size_write(inode, security_policydb_len(state));
inode_unlock(inode);
}
- rc = security_read_policy(&plm->data, &plm->len);
+ rc = security_read_policy(state, &plm->data, &plm->len);
if (rc)
goto err;
- policy_opened = 1;
+ fsi->policy_opened = 1;
filp->private_data = plm;
- mutex_unlock(&sel_mutex);
+ mutex_unlock(&fsi->mutex);
return 0;
err:
- mutex_unlock(&sel_mutex);
+ mutex_unlock(&fsi->mutex);
if (plm)
vfree(plm->data);
@@ -387,11 +419,12 @@ static int sel_open_policy(struct inode *inode, struct file *filp)
static int sel_release_policy(struct inode *inode, struct file *filp)
{
+ struct selinux_fs_info *fsi = inode->i_sb->s_fs_info;
struct policy_load_memory *plm = filp->private_data;
BUG_ON(!plm);
- policy_opened = 0;
+ fsi->policy_opened = 0;
vfree(plm->data);
kfree(plm);
@@ -402,19 +435,21 @@ static int sel_release_policy(struct inode *inode, struct file *filp)
static ssize_t sel_read_policy(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
+ struct selinux_fs_info *fsi = file_inode(filp)->i_sb->s_fs_info;
struct policy_load_memory *plm = filp->private_data;
int ret;
- mutex_lock(&sel_mutex);
+ mutex_lock(&fsi->mutex);
- ret = avc_has_perm(current_sid(), SECINITSID_SECURITY,
+ ret = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
SECCLASS_SECURITY, SECURITY__READ_POLICY, NULL);
if (ret)
goto out;
ret = simple_read_from_buffer(buf, count, ppos, plm->data, plm->len);
out:
- mutex_unlock(&sel_mutex);
+ mutex_unlock(&fsi->mutex);
return ret;
}
@@ -468,16 +503,43 @@ static const struct file_operations sel_policy_ops = {
.llseek = generic_file_llseek,
};
+static int sel_make_policy_nodes(struct selinux_fs_info *fsi)
+{
+ int ret;
+
+ ret = sel_make_bools(fsi);
+ if (ret) {
+ pr_err("SELinux: failed to load policy booleans\n");
+ return ret;
+ }
+
+ ret = sel_make_classes(fsi);
+ if (ret) {
+ pr_err("SELinux: failed to load policy classes\n");
+ return ret;
+ }
+
+ ret = sel_make_policycap(fsi);
+ if (ret) {
+ pr_err("SELinux: failed to load policy capabilities\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static ssize_t sel_write_load(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
+ struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
ssize_t length;
void *data = NULL;
- mutex_lock(&sel_mutex);
+ mutex_lock(&fsi->mutex);
- length = avc_has_perm(current_sid(), SECINITSID_SECURITY,
+ length = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
SECCLASS_SECURITY, SECURITY__LOAD_POLICY, NULL);
if (length)
goto out;
@@ -500,29 +562,15 @@ static ssize_t sel_write_load(struct file *file, const char __user *buf,
if (copy_from_user(data, buf, count) != 0)
goto out;
- length = security_load_policy(data, count);
+ length = security_load_policy(fsi->state, data, count);
if (length) {
pr_warn_ratelimited("SELinux: failed to load policy\n");
goto out;
}
- length = sel_make_bools();
- if (length) {
- pr_err("SELinux: failed to load policy booleans\n");
+ length = sel_make_policy_nodes(fsi);
+ if (length)
goto out1;
- }
-
- length = sel_make_classes();
- if (length) {
- pr_err("SELinux: failed to load policy classes\n");
- goto out1;
- }
-
- length = sel_make_policycap();
- if (length) {
- pr_err("SELinux: failed to load policy capabilities\n");
- goto out1;
- }
length = count;
@@ -532,7 +580,7 @@ static ssize_t sel_write_load(struct file *file, const char __user *buf,
from_kuid(&init_user_ns, audit_get_loginuid(current)),
audit_get_sessionid(current));
out:
- mutex_unlock(&sel_mutex);
+ mutex_unlock(&fsi->mutex);
vfree(data);
return length;
}
@@ -544,20 +592,23 @@ static const struct file_operations sel_load_ops = {
static ssize_t sel_write_context(struct file *file, char *buf, size_t size)
{
+ struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
+ struct selinux_state *state = fsi->state;
char *canon = NULL;
u32 sid, len;
ssize_t length;
- length = avc_has_perm(current_sid(), SECINITSID_SECURITY,
+ length = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
SECCLASS_SECURITY, SECURITY__CHECK_CONTEXT, NULL);
if (length)
goto out;
- length = security_context_to_sid(buf, size, &sid, GFP_KERNEL);
+ length = security_context_to_sid(state, buf, size, &sid, GFP_KERNEL);
if (length)
goto out;
- length = security_sid_to_context(sid, &canon, &len);
+ length = security_sid_to_context(state, sid, &canon, &len);
if (length)
goto out;
@@ -578,21 +629,24 @@ static ssize_t sel_write_context(struct file *file, char *buf, size_t size)
static ssize_t sel_read_checkreqprot(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
+ struct selinux_fs_info *fsi = file_inode(filp)->i_sb->s_fs_info;
char tmpbuf[TMPBUFLEN];
ssize_t length;
- length = scnprintf(tmpbuf, TMPBUFLEN, "%u", selinux_checkreqprot);
+ length = scnprintf(tmpbuf, TMPBUFLEN, "%u", fsi->state->checkreqprot);
return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
}
static ssize_t sel_write_checkreqprot(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
+ struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
char *page;
ssize_t length;
unsigned int new_value;
- length = avc_has_perm(current_sid(), SECINITSID_SECURITY,
+ length = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
SECCLASS_SECURITY, SECURITY__SETCHECKREQPROT,
NULL);
if (length)
@@ -613,7 +667,7 @@ static ssize_t sel_write_checkreqprot(struct file *file, const char __user *buf,
if (sscanf(page, "%u", &new_value) != 1)
goto out;
- selinux_checkreqprot = new_value ? 1 : 0;
+ fsi->state->checkreqprot = new_value ? 1 : 0;
length = count;
out:
kfree(page);
@@ -629,13 +683,16 @@ static ssize_t sel_write_validatetrans(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos)
{
+ struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
+ struct selinux_state *state = fsi->state;
char *oldcon = NULL, *newcon = NULL, *taskcon = NULL;
char *req = NULL;
u32 osid, nsid, tsid;
u16 tclass;
int rc;
- rc = avc_has_perm(current_sid(), SECINITSID_SECURITY,
+ rc = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
SECCLASS_SECURITY, SECURITY__VALIDATE_TRANS, NULL);
if (rc)
goto out;
@@ -673,19 +730,19 @@ static ssize_t sel_write_validatetrans(struct file *file,
if (sscanf(req, "%s %s %hu %s", oldcon, newcon, &tclass, taskcon) != 4)
goto out;
- rc = security_context_str_to_sid(oldcon, &osid, GFP_KERNEL);
+ rc = security_context_str_to_sid(state, oldcon, &osid, GFP_KERNEL);
if (rc)
goto out;
- rc = security_context_str_to_sid(newcon, &nsid, GFP_KERNEL);
+ rc = security_context_str_to_sid(state, newcon, &nsid, GFP_KERNEL);
if (rc)
goto out;
- rc = security_context_str_to_sid(taskcon, &tsid, GFP_KERNEL);
+ rc = security_context_str_to_sid(state, taskcon, &tsid, GFP_KERNEL);
if (rc)
goto out;
- rc = security_validate_transition_user(osid, nsid, tsid, tclass);
+ rc = security_validate_transition_user(state, osid, nsid, tsid, tclass);
if (!rc)
rc = count;
out:
@@ -755,13 +812,16 @@ static const struct file_operations transaction_ops = {
static ssize_t sel_write_access(struct file *file, char *buf, size_t size)
{
+ struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
+ struct selinux_state *state = fsi->state;
char *scon = NULL, *tcon = NULL;
u32 ssid, tsid;
u16 tclass;
struct av_decision avd;
ssize_t length;
- length = avc_has_perm(current_sid(), SECINITSID_SECURITY,
+ length = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
SECCLASS_SECURITY, SECURITY__COMPUTE_AV, NULL);
if (length)
goto out;
@@ -780,15 +840,15 @@ static ssize_t sel_write_access(struct file *file, char *buf, size_t size)
if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
goto out;
- length = security_context_str_to_sid(scon, &ssid, GFP_KERNEL);
+ length = security_context_str_to_sid(state, scon, &ssid, GFP_KERNEL);
if (length)
goto out;
- length = security_context_str_to_sid(tcon, &tsid, GFP_KERNEL);
+ length = security_context_str_to_sid(state, tcon, &tsid, GFP_KERNEL);
if (length)
goto out;
- security_compute_av_user(ssid, tsid, tclass, &avd);
+ security_compute_av_user(state, ssid, tsid, tclass, &avd);
length = scnprintf(buf, SIMPLE_TRANSACTION_LIMIT,
"%x %x %x %x %u %x",
@@ -803,6 +863,8 @@ static ssize_t sel_write_access(struct file *file, char *buf, size_t size)
static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
{
+ struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
+ struct selinux_state *state = fsi->state;
char *scon = NULL, *tcon = NULL;
char *namebuf = NULL, *objname = NULL;
u32 ssid, tsid, newsid;
@@ -812,7 +874,8 @@ static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
u32 len;
int nargs;
- length = avc_has_perm(current_sid(), SECINITSID_SECURITY,
+ length = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
SECCLASS_SECURITY, SECURITY__COMPUTE_CREATE,
NULL);
if (length)
@@ -868,20 +931,20 @@ static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
objname = namebuf;
}
- length = security_context_str_to_sid(scon, &ssid, GFP_KERNEL);
+ length = security_context_str_to_sid(state, scon, &ssid, GFP_KERNEL);
if (length)
goto out;
- length = security_context_str_to_sid(tcon, &tsid, GFP_KERNEL);
+ length = security_context_str_to_sid(state, tcon, &tsid, GFP_KERNEL);
if (length)
goto out;
- length = security_transition_sid_user(ssid, tsid, tclass,
+ length = security_transition_sid_user(state, ssid, tsid, tclass,
objname, &newsid);
if (length)
goto out;
- length = security_sid_to_context(newsid, &newcon, &len);
+ length = security_sid_to_context(state, newsid, &newcon, &len);
if (length)
goto out;
@@ -904,6 +967,8 @@ static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
static ssize_t sel_write_relabel(struct file *file, char *buf, size_t size)
{
+ struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
+ struct selinux_state *state = fsi->state;
char *scon = NULL, *tcon = NULL;
u32 ssid, tsid, newsid;
u16 tclass;
@@ -911,7 +976,8 @@ static ssize_t sel_write_relabel(struct file *file, char *buf, size_t size)
char *newcon = NULL;
u32 len;
- length = avc_has_perm(current_sid(), SECINITSID_SECURITY,
+ length = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
SECCLASS_SECURITY, SECURITY__COMPUTE_RELABEL,
NULL);
if (length)
@@ -931,19 +997,19 @@ static ssize_t sel_write_relabel(struct file *file, char *buf, size_t size)
if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
goto out;
- length = security_context_str_to_sid(scon, &ssid, GFP_KERNEL);
+ length = security_context_str_to_sid(state, scon, &ssid, GFP_KERNEL);
if (length)
goto out;
- length = security_context_str_to_sid(tcon, &tsid, GFP_KERNEL);
+ length = security_context_str_to_sid(state, tcon, &tsid, GFP_KERNEL);
if (length)
goto out;
- length = security_change_sid(ssid, tsid, tclass, &newsid);
+ length = security_change_sid(state, ssid, tsid, tclass, &newsid);
if (length)
goto out;
- length = security_sid_to_context(newsid, &newcon, &len);
+ length = security_sid_to_context(state, newsid, &newcon, &len);
if (length)
goto out;
@@ -962,6 +1028,8 @@ static ssize_t sel_write_relabel(struct file *file, char *buf, size_t size)
static ssize_t sel_write_user(struct file *file, char *buf, size_t size)
{
+ struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
+ struct selinux_state *state = fsi->state;
char *con = NULL, *user = NULL, *ptr;
u32 sid, *sids = NULL;
ssize_t length;
@@ -969,7 +1037,8 @@ static ssize_t sel_write_user(struct file *file, char *buf, size_t size)
int i, rc;
u32 len, nsids;
- length = avc_has_perm(current_sid(), SECINITSID_SECURITY,
+ length = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
SECCLASS_SECURITY, SECURITY__COMPUTE_USER,
NULL);
if (length)
@@ -989,18 +1058,18 @@ static ssize_t sel_write_user(struct file *file, char *buf, size_t size)
if (sscanf(buf, "%s %s", con, user) != 2)
goto out;
- length = security_context_str_to_sid(con, &sid, GFP_KERNEL);
+ length = security_context_str_to_sid(state, con, &sid, GFP_KERNEL);
if (length)
goto out;
- length = security_get_user_sids(sid, user, &sids, &nsids);
+ length = security_get_user_sids(state, sid, user, &sids, &nsids);
if (length)
goto out;
length = sprintf(buf, "%u", nsids) + 1;
ptr = buf + length;
for (i = 0; i < nsids; i++) {
- rc = security_sid_to_context(sids[i], &newcon, &len);
+ rc = security_sid_to_context(state, sids[i], &newcon, &len);
if (rc) {
length = rc;
goto out;
@@ -1024,6 +1093,8 @@ static ssize_t sel_write_user(struct file *file, char *buf, size_t size)
static ssize_t sel_write_member(struct file *file, char *buf, size_t size)
{
+ struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
+ struct selinux_state *state = fsi->state;
char *scon = NULL, *tcon = NULL;
u32 ssid, tsid, newsid;
u16 tclass;
@@ -1031,7 +1102,8 @@ static ssize_t sel_write_member(struct file *file, char *buf, size_t size)
char *newcon = NULL;
u32 len;
- length = avc_has_perm(current_sid(), SECINITSID_SECURITY,
+ length = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
SECCLASS_SECURITY, SECURITY__COMPUTE_MEMBER,
NULL);
if (length)
@@ -1051,19 +1123,19 @@ static ssize_t sel_write_member(struct file *file, char *buf, size_t size)
if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
goto out;
- length = security_context_str_to_sid(scon, &ssid, GFP_KERNEL);
+ length = security_context_str_to_sid(state, scon, &ssid, GFP_KERNEL);
if (length)
goto out;
- length = security_context_str_to_sid(tcon, &tsid, GFP_KERNEL);
+ length = security_context_str_to_sid(state, tcon, &tsid, GFP_KERNEL);
if (length)
goto out;
- length = security_member_sid(ssid, tsid, tclass, &newsid);
+ length = security_member_sid(state, ssid, tsid, tclass, &newsid);
if (length)
goto out;
- length = security_sid_to_context(newsid, &newcon, &len);
+ length = security_sid_to_context(state, newsid, &newcon, &len);
if (length)
goto out;
@@ -1097,6 +1169,7 @@ static struct inode *sel_make_inode(struct super_block *sb, int mode)
static ssize_t sel_read_bool(struct file *filep, char __user *buf,
size_t count, loff_t *ppos)
{
+ struct selinux_fs_info *fsi = file_inode(filep)->i_sb->s_fs_info;
char *page = NULL;
ssize_t length;
ssize_t ret;
@@ -1104,10 +1177,11 @@ static ssize_t sel_read_bool(struct file *filep, char __user *buf,
unsigned index = file_inode(filep)->i_ino & SEL_INO_MASK;
const char *name = filep->f_path.dentry->d_name.name;
- mutex_lock(&sel_mutex);
+ mutex_lock(&fsi->mutex);
ret = -EINVAL;
- if (index >= bool_num || strcmp(name, bool_pending_names[index]))
+ if (index >= fsi->bool_num || strcmp(name,
+ fsi->bool_pending_names[index]))
goto out;
ret = -ENOMEM;
@@ -1115,16 +1189,16 @@ static ssize_t sel_read_bool(struct file *filep, char __user *buf,
if (!page)
goto out;
- cur_enforcing = security_get_bool_value(index);
+ cur_enforcing = security_get_bool_value(fsi->state, index);
if (cur_enforcing < 0) {
ret = cur_enforcing;
goto out;
}
length = scnprintf(page, PAGE_SIZE, "%d %d", cur_enforcing,
- bool_pending_values[index]);
+ fsi->bool_pending_values[index]);
ret = simple_read_from_buffer(buf, count, ppos, page, length);
out:
- mutex_unlock(&sel_mutex);
+ mutex_unlock(&fsi->mutex);
free_page((unsigned long)page);
return ret;
}
@@ -1132,22 +1206,25 @@ static ssize_t sel_read_bool(struct file *filep, char __user *buf,
static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
size_t count, loff_t *ppos)
{
+ struct selinux_fs_info *fsi = file_inode(filep)->i_sb->s_fs_info;
char *page = NULL;
ssize_t length;
int new_value;
unsigned index = file_inode(filep)->i_ino & SEL_INO_MASK;
const char *name = filep->f_path.dentry->d_name.name;
- mutex_lock(&sel_mutex);
+ mutex_lock(&fsi->mutex);
- length = avc_has_perm(current_sid(), SECINITSID_SECURITY,
+ length = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
SECCLASS_SECURITY, SECURITY__SETBOOL,
NULL);
if (length)
goto out;
length = -EINVAL;
- if (index >= bool_num || strcmp(name, bool_pending_names[index]))
+ if (index >= fsi->bool_num || strcmp(name,
+ fsi->bool_pending_names[index]))
goto out;
length = -ENOMEM;
@@ -1173,11 +1250,11 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
if (new_value)
new_value = 1;
- bool_pending_values[index] = new_value;
+ fsi->bool_pending_values[index] = new_value;
length = count;
out:
- mutex_unlock(&sel_mutex);
+ mutex_unlock(&fsi->mutex);
kfree(page);
return length;
}
@@ -1192,13 +1269,15 @@ static ssize_t sel_commit_bools_write(struct file *filep,
const char __user *buf,
size_t count, loff_t *ppos)
{
+ struct selinux_fs_info *fsi = file_inode(filep)->i_sb->s_fs_info;
char *page = NULL;
ssize_t length;
int new_value;
- mutex_lock(&sel_mutex);
+ mutex_lock(&fsi->mutex);
- length = avc_has_perm(current_sid(), SECINITSID_SECURITY,
+ length = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
SECCLASS_SECURITY, SECURITY__SETBOOL,
NULL);
if (length)
@@ -1225,14 +1304,15 @@ static ssize_t sel_commit_bools_write(struct file *filep,
goto out;
length = 0;
- if (new_value && bool_pending_values)
- length = security_set_bools(bool_num, bool_pending_values);
+ if (new_value && fsi->bool_pending_values)
+ length = security_set_bools(fsi->state, fsi->bool_num,
+ fsi->bool_pending_values);
if (!length)
length = count;
out:
- mutex_unlock(&sel_mutex);
+ mutex_unlock(&fsi->mutex);
kfree(page);
return length;
}
@@ -1250,12 +1330,12 @@ static void sel_remove_entries(struct dentry *de)
#define BOOL_DIR_NAME "booleans"
-static int sel_make_bools(void)
+static int sel_make_bools(struct selinux_fs_info *fsi)
{
int i, ret;
ssize_t len;
struct dentry *dentry = NULL;
- struct dentry *dir = bool_dir;
+ struct dentry *dir = fsi->bool_dir;
struct inode *inode = NULL;
struct inode_security_struct *isec;
char **names = NULL, *page;
@@ -1264,13 +1344,13 @@ static int sel_make_bools(void)
u32 sid;
/* remove any existing files */
- for (i = 0; i < bool_num; i++)
- kfree(bool_pending_names[i]);
- kfree(bool_pending_names);
- kfree(bool_pending_values);
- bool_num = 0;
- bool_pending_names = NULL;
- bool_pending_values = NULL;
+ for (i = 0; i < fsi->bool_num; i++)
+ kfree(fsi->bool_pending_names[i]);
+ kfree(fsi->bool_pending_names);
+ kfree(fsi->bool_pending_values);
+ fsi->bool_num = 0;
+ fsi->bool_pending_names = NULL;
+ fsi->bool_pending_values = NULL;
sel_remove_entries(dir);
@@ -1279,7 +1359,7 @@ static int sel_make_bools(void)
if (!page)
goto out;
- ret = security_get_bools(&num, &names, &values);
+ ret = security_get_bools(fsi->state, &num, &names, &values);
if (ret)
goto out;
@@ -1300,7 +1380,8 @@ static int sel_make_bools(void)
goto out;
isec = (struct inode_security_struct *)inode->i_security;
- ret = security_genfs_sid("selinuxfs", page, SECCLASS_FILE, &sid);
+ ret = security_genfs_sid(fsi->state, "selinuxfs", page,
+ SECCLASS_FILE, &sid);
if (ret) {
pr_warn_ratelimited("SELinux: no sid found, defaulting to security isid for %s\n",
page);
@@ -1313,9 +1394,9 @@ static int sel_make_bools(void)
inode->i_ino = i|SEL_BOOL_INO_OFFSET;
d_add(dentry, inode);
}
- bool_num = num;
- bool_pending_names = names;
- bool_pending_values = values;
+ fsi->bool_num = num;
+ fsi->bool_pending_names = names;
+ fsi->bool_pending_values = values;
free_page((unsigned long)page);
return 0;
@@ -1333,17 +1414,16 @@ static int sel_make_bools(void)
return ret;
}
-#define NULL_FILE_NAME "null"
-
-struct path selinux_null;
-
static ssize_t sel_read_avc_cache_threshold(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
+ struct selinux_fs_info *fsi = file_inode(filp)->i_sb->s_fs_info;
+ struct selinux_state *state = fsi->state;
char tmpbuf[TMPBUFLEN];
ssize_t length;
- length = scnprintf(tmpbuf, TMPBUFLEN, "%u", avc_cache_threshold);
+ length = scnprintf(tmpbuf, TMPBUFLEN, "%u",
+ avc_get_cache_threshold(state->avc));
return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
}
@@ -1352,11 +1432,14 @@ static ssize_t sel_write_avc_cache_threshold(struct file *file,
size_t count, loff_t *ppos)
{
+ struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
+ struct selinux_state *state = fsi->state;
char *page;
ssize_t ret;
unsigned int new_value;
- ret = avc_has_perm(current_sid(), SECINITSID_SECURITY,
+ ret = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
SECCLASS_SECURITY, SECURITY__SETSECPARAM,
NULL);
if (ret)
@@ -1377,7 +1460,7 @@ static ssize_t sel_write_avc_cache_threshold(struct file *file,
if (sscanf(page, "%u", &new_value) != 1)
goto out;
- avc_cache_threshold = new_value;
+ avc_set_cache_threshold(state->avc, new_value);
ret = count;
out:
@@ -1388,6 +1471,8 @@ static ssize_t sel_write_avc_cache_threshold(struct file *file,
static ssize_t sel_read_avc_hash_stats(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
+ struct selinux_fs_info *fsi = file_inode(filp)->i_sb->s_fs_info;
+ struct selinux_state *state = fsi->state;
char *page;
ssize_t length;
@@ -1395,7 +1480,7 @@ static ssize_t sel_read_avc_hash_stats(struct file *filp, char __user *buf,
if (!page)
return -ENOMEM;
- length = avc_get_hash_stats(page);
+ length = avc_get_hash_stats(state->avc, page);
if (length >= 0)
length = simple_read_from_buffer(buf, count, ppos, page, length);
free_page((unsigned long)page);
@@ -1403,6 +1488,32 @@ static ssize_t sel_read_avc_hash_stats(struct file *filp, char __user *buf,
return length;
}
+static ssize_t sel_read_sidtab_hash_stats(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct selinux_fs_info *fsi = file_inode(filp)->i_sb->s_fs_info;
+ struct selinux_state *state = fsi->state;
+ char *page;
+ ssize_t length;
+
+ page = (char *)__get_free_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ length = security_sidtab_hash_stats(state, page);
+ if (length >= 0)
+ length = simple_read_from_buffer(buf, count, ppos, page,
+ length);
+ free_page((unsigned long)page);
+
+ return length;
+}
+
+static const struct file_operations sel_sidtab_hash_stats_ops = {
+ .read = sel_read_sidtab_hash_stats,
+ .llseek = generic_file_llseek,
+};
+
static const struct file_operations sel_avc_cache_threshold_ops = {
.read = sel_read_avc_cache_threshold,
.write = sel_write_avc_cache_threshold,
@@ -1486,6 +1597,8 @@ static const struct file_operations sel_avc_cache_stats_ops = {
static int sel_make_avc_files(struct dentry *dir)
{
+ struct super_block *sb = dir->d_sb;
+ struct selinux_fs_info *fsi = sb->s_fs_info;
int i;
static const struct tree_descr files[] = {
{ "cache_threshold",
@@ -1509,7 +1622,38 @@ static int sel_make_avc_files(struct dentry *dir)
return -ENOMEM;
inode->i_fop = files[i].ops;
- inode->i_ino = ++sel_last_ino;
+ inode->i_ino = ++fsi->last_ino;
+ d_add(dentry, inode);
+ }
+
+ return 0;
+}
+
+static int sel_make_ss_files(struct dentry *dir)
+{
+ struct super_block *sb = dir->d_sb;
+ struct selinux_fs_info *fsi = sb->s_fs_info;
+ int i;
+ static struct tree_descr files[] = {
+ { "sidtab_hash_stats", &sel_sidtab_hash_stats_ops, S_IRUGO },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(files); i++) {
+ struct inode *inode;
+ struct dentry *dentry;
+
+ dentry = d_alloc_name(dir, files[i].name);
+ if (!dentry)
+ return -ENOMEM;
+
+ inode = sel_make_inode(dir->d_sb, S_IFREG|files[i].mode);
+ if (!inode) {
+ dput(dentry);
+ return -ENOMEM;
+ }
+
+ inode->i_fop = files[i].ops;
+ inode->i_ino = ++fsi->last_ino;
d_add(dentry, inode);
}
@@ -1519,12 +1663,13 @@ static int sel_make_avc_files(struct dentry *dir)
static ssize_t sel_read_initcon(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
+ struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
char *con;
u32 sid, len;
ssize_t ret;
sid = file_inode(file)->i_ino&SEL_INO_MASK;
- ret = security_sid_to_context(sid, &con, &len);
+ ret = security_sid_to_context(fsi->state, sid, &con, &len);
if (ret)
return ret;
@@ -1612,12 +1757,13 @@ static const struct file_operations sel_perm_ops = {
static ssize_t sel_read_policycap(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
+ struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
int value;
char tmpbuf[TMPBUFLEN];
ssize_t length;
unsigned long i_ino = file_inode(file)->i_ino;
- value = security_policycap_supported(i_ino & SEL_INO_MASK);
+ value = security_policycap_supported(fsi->state, i_ino & SEL_INO_MASK);
length = scnprintf(tmpbuf, TMPBUFLEN, "%d", value);
return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
@@ -1631,10 +1777,11 @@ static const struct file_operations sel_policycap_ops = {
static int sel_make_perm_files(char *objclass, int classvalue,
struct dentry *dir)
{
+ struct selinux_fs_info *fsi = dir->d_sb->s_fs_info;
int i, rc, nperms;
char **perms;
- rc = security_get_permissions(objclass, &perms, &nperms);
+ rc = security_get_permissions(fsi->state, objclass, &perms, &nperms);
if (rc)
return rc;
@@ -1668,6 +1815,8 @@ static int sel_make_perm_files(char *objclass, int classvalue,
static int sel_make_class_dir_entries(char *classname, int index,
struct dentry *dir)
{
+ struct super_block *sb = dir->d_sb;
+ struct selinux_fs_info *fsi = sb->s_fs_info;
struct dentry *dentry = NULL;
struct inode *inode = NULL;
int rc;
@@ -1684,7 +1833,7 @@ static int sel_make_class_dir_entries(char *classname, int index,
inode->i_ino = sel_class_to_ino(index);
d_add(dentry, inode);
- dentry = sel_make_dir(dir, "perms", &last_class_ino);
+ dentry = sel_make_dir(dir, "perms", &fsi->last_class_ino);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
@@ -1693,26 +1842,27 @@ static int sel_make_class_dir_entries(char *classname, int index,
return rc;
}
-static int sel_make_classes(void)
+static int sel_make_classes(struct selinux_fs_info *fsi)
{
+
int rc, nclasses, i;
char **classes;
/* delete any existing entries */
- sel_remove_entries(class_dir);
+ sel_remove_entries(fsi->class_dir);
- rc = security_get_classes(&classes, &nclasses);
+ rc = security_get_classes(fsi->state, &classes, &nclasses);
if (rc)
return rc;
/* +2 since classes are 1-indexed */
- last_class_ino = sel_class_to_ino(nclasses + 2);
+ fsi->last_class_ino = sel_class_to_ino(nclasses + 2);
for (i = 0; i < nclasses; i++) {
struct dentry *class_name_dir;
- class_name_dir = sel_make_dir(class_dir, classes[i],
- &last_class_ino);
+ class_name_dir = sel_make_dir(fsi->class_dir, classes[i],
+ &fsi->last_class_ino);
if (IS_ERR(class_name_dir)) {
rc = PTR_ERR(class_name_dir);
goto out;
@@ -1732,25 +1882,25 @@ static int sel_make_classes(void)
return rc;
}
-static int sel_make_policycap(void)
+static int sel_make_policycap(struct selinux_fs_info *fsi)
{
unsigned int iter;
struct dentry *dentry = NULL;
struct inode *inode = NULL;
- sel_remove_entries(policycap_dir);
+ sel_remove_entries(fsi->policycap_dir);
for (iter = 0; iter <= POLICYDB_CAPABILITY_MAX; iter++) {
if (iter < ARRAY_SIZE(selinux_policycap_names))
- dentry = d_alloc_name(policycap_dir,
+ dentry = d_alloc_name(fsi->policycap_dir,
selinux_policycap_names[iter]);
else
- dentry = d_alloc_name(policycap_dir, "unknown");
+ dentry = d_alloc_name(fsi->policycap_dir, "unknown");
if (dentry == NULL)
return -ENOMEM;
- inode = sel_make_inode(policycap_dir->d_sb, S_IFREG | S_IRUGO);
+ inode = sel_make_inode(fsi->sb, S_IFREG | 0444);
if (inode == NULL)
return -ENOMEM;
@@ -1789,8 +1939,11 @@ static struct dentry *sel_make_dir(struct dentry *dir, const char *name,
return dentry;
}
+#define NULL_FILE_NAME "null"
+
static int sel_fill_super(struct super_block *sb, void *data, int silent)
{
+ struct selinux_fs_info *fsi;
int ret;
struct dentry *dentry;
struct inode *inode;
@@ -1818,14 +1971,20 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent)
S_IWUGO},
/* last one */ {""}
};
+
+ ret = selinux_fs_info_create(sb);
+ if (ret)
+ goto err;
+
ret = simple_fill_super(sb, SELINUX_MAGIC, selinux_files);
if (ret)
goto err;
- bool_dir = sel_make_dir(sb->s_root, BOOL_DIR_NAME, &sel_last_ino);
- if (IS_ERR(bool_dir)) {
- ret = PTR_ERR(bool_dir);
- bool_dir = NULL;
+ fsi = sb->s_fs_info;
+ fsi->bool_dir = sel_make_dir(sb->s_root, BOOL_DIR_NAME, &fsi->last_ino);
+ if (IS_ERR(fsi->bool_dir)) {
+ ret = PTR_ERR(fsi->bool_dir);
+ fsi->bool_dir = NULL;
goto err;
}
@@ -1839,7 +1998,7 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent)
if (!inode)
goto err;
- inode->i_ino = ++sel_last_ino;
+ inode->i_ino = ++fsi->last_ino;
isec = (struct inode_security_struct *)inode->i_security;
isec->sid = SECINITSID_DEVNULL;
isec->sclass = SECCLASS_CHR_FILE;
@@ -1847,19 +2006,26 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent)
init_special_inode(inode, S_IFCHR | S_IRUGO | S_IWUGO, MKDEV(MEM_MAJOR, 3));
d_add(dentry, inode);
- selinux_null.dentry = dentry;
- dentry = sel_make_dir(sb->s_root, "avc", &sel_last_ino);
+ dentry = sel_make_dir(sb->s_root, "avc", &fsi->last_ino);
if (IS_ERR(dentry)) {
ret = PTR_ERR(dentry);
goto err;
}
ret = sel_make_avc_files(dentry);
+
+ dentry = sel_make_dir(sb->s_root, "ss", &fsi->last_ino);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto err;
+ }
+
+ ret = sel_make_ss_files(dentry);
if (ret)
goto err;
- dentry = sel_make_dir(sb->s_root, "initial_contexts", &sel_last_ino);
+ dentry = sel_make_dir(sb->s_root, "initial_contexts", &fsi->last_ino);
if (IS_ERR(dentry)) {
ret = PTR_ERR(dentry);
goto err;
@@ -1869,23 +2035,31 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent)
if (ret)
goto err;
- class_dir = sel_make_dir(sb->s_root, "class", &sel_last_ino);
- if (IS_ERR(class_dir)) {
- ret = PTR_ERR(class_dir);
- class_dir = NULL;
+ fsi->class_dir = sel_make_dir(sb->s_root, "class", &fsi->last_ino);
+ if (IS_ERR(fsi->class_dir)) {
+ ret = PTR_ERR(fsi->class_dir);
+ fsi->class_dir = NULL;
goto err;
}
- policycap_dir = sel_make_dir(sb->s_root, "policy_capabilities", &sel_last_ino);
- if (IS_ERR(policycap_dir)) {
- ret = PTR_ERR(policycap_dir);
- policycap_dir = NULL;
+ fsi->policycap_dir = sel_make_dir(sb->s_root, "policy_capabilities",
+ &fsi->last_ino);
+ if (IS_ERR(fsi->policycap_dir)) {
+ ret = PTR_ERR(fsi->policycap_dir);
+ fsi->policycap_dir = NULL;
goto err;
}
+
+ ret = sel_make_policy_nodes(fsi);
+ if (ret)
+ goto err;
return 0;
err:
printk(KERN_ERR "SELinux: %s: failed while creating inodes\n",
__func__);
+
+ selinux_fs_info_free(sb);
+
return ret;
}
@@ -1895,16 +2069,25 @@ static struct dentry *sel_mount(struct file_system_type *fs_type,
return mount_single(fs_type, flags, data, sel_fill_super);
}
+static void sel_kill_sb(struct super_block *sb)
+{
+ selinux_fs_info_free(sb);
+ kill_litter_super(sb);
+}
+
static struct file_system_type sel_fs_type = {
.name = "selinuxfs",
.mount = sel_mount,
- .kill_sb = kill_litter_super,
+ .kill_sb = sel_kill_sb,
};
struct vfsmount *selinuxfs_mount;
+struct path selinux_null;
static int __init init_sel_fs(void)
{
+ struct qstr null_name = QSTR_INIT(NULL_FILE_NAME,
+ sizeof(NULL_FILE_NAME)-1);
int err;
if (!selinux_enabled)
@@ -1926,6 +2109,13 @@ static int __init init_sel_fs(void)
err = PTR_ERR(selinuxfs_mount);
selinuxfs_mount = NULL;
}
+ selinux_null.dentry = d_hash_and_lookup(selinux_null.mnt->mnt_root,
+ &null_name);
+ if (IS_ERR(selinux_null.dentry)) {
+ pr_err("selinuxfs: could not lookup null!\n");
+ err = PTR_ERR(selinux_null.dentry);
+ selinux_null.dentry = NULL;
+ }
return err;
}
diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
index 2c3c7d0..a2c9148 100644
--- a/security/selinux/ss/avtab.c
+++ b/security/selinux/ss/avtab.c
@@ -655,7 +655,8 @@ int avtab_write(struct policydb *p, struct avtab *a, void *fp)
return rc;
}
-void avtab_cache_init(void)
+
+void __init avtab_cache_init(void)
{
avtab_node_cachep = kmem_cache_create("avtab_node",
sizeof(struct avtab_node),
@@ -664,9 +665,3 @@ void avtab_cache_init(void)
sizeof(struct avtab_extended_perms),
0, SLAB_PANIC, NULL);
}
-
-void avtab_cache_destroy(void)
-{
- kmem_cache_destroy(avtab_node_cachep);
- kmem_cache_destroy(avtab_xperms_cachep);
-}
diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h
index 725853c..0d652fa 100644
--- a/security/selinux/ss/avtab.h
+++ b/security/selinux/ss/avtab.h
@@ -114,9 +114,6 @@ struct avtab_node *avtab_search_node(struct avtab *h, struct avtab_key *key);
struct avtab_node *avtab_search_node_next(struct avtab_node *node, int specified);
-void avtab_cache_init(void);
-void avtab_cache_destroy(void);
-
#define MAX_AVTAB_HASH_BITS 16
#define MAX_AVTAB_HASH_BUCKETS (1 << MAX_AVTAB_HASH_BITS)
diff --git a/security/selinux/ss/context.h b/security/selinux/ss/context.h
index 2260c44..69fd194 100644
--- a/security/selinux/ss/context.h
+++ b/security/selinux/ss/context.h
@@ -31,6 +31,7 @@ struct context {
u32 len; /* length of string in bytes */
struct mls_range range;
char *str; /* string representation if context cannot be mapped. */
+ u32 hash; /* a hash of the string representation */
};
static inline void mls_context_init(struct context *c)
@@ -136,12 +137,13 @@ static inline int context_cpy(struct context *dst, struct context *src)
kfree(dst->str);
return rc;
}
+ dst->hash = src->hash;
return 0;
}
static inline void context_destroy(struct context *c)
{
- c->user = c->role = c->type = 0;
+ c->user = c->role = c->type = c->hash = 0;
kfree(c->str);
c->str = NULL;
c->len = 0;
@@ -150,6 +152,8 @@ static inline void context_destroy(struct context *c)
static inline int context_cmp(struct context *c1, struct context *c2)
{
+ if (c1->hash && c2->hash && (c1->hash != c2->hash))
+ return 0;
if (c1->len && c2->len)
return (c1->len == c2->len && !strcmp(c1->str, c2->str));
if (c1->len || c2->len)
@@ -160,5 +164,10 @@ static inline int context_cmp(struct context *c1, struct context *c2)
mls_context_cmp(c1, c2));
}
+static inline unsigned int context_compute_hash(const char *s)
+{
+ return full_name_hash(NULL, s, strlen(s));
+}
+
#endif /* _SS_CONTEXT_H_ */
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
index b6a78b0..5ae8c61 100644
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -523,14 +523,9 @@ int ebitmap_write(struct ebitmap *e, void *fp)
return 0;
}
-void ebitmap_cache_init(void)
+void __init ebitmap_cache_init(void)
{
ebitmap_node_cachep = kmem_cache_create("ebitmap_node",
sizeof(struct ebitmap_node),
0, SLAB_PANIC, NULL);
}
-
-void ebitmap_cache_destroy(void)
-{
- kmem_cache_destroy(ebitmap_node_cachep);
-}
diff --git a/security/selinux/ss/ebitmap.h b/security/selinux/ss/ebitmap.h
index edf4fa3..6aa7cf6 100644
--- a/security/selinux/ss/ebitmap.h
+++ b/security/selinux/ss/ebitmap.h
@@ -131,9 +131,6 @@ void ebitmap_destroy(struct ebitmap *e);
int ebitmap_read(struct ebitmap *e, void *fp);
int ebitmap_write(struct ebitmap *e, void *fp);
-void ebitmap_cache_init(void);
-void ebitmap_cache_destroy(void);
-
#ifdef CONFIG_NETLABEL
int ebitmap_netlbl_export(struct ebitmap *ebmap,
struct netlbl_lsm_catmap **catmap);
diff --git a/security/selinux/ss/hashtab.c b/security/selinux/ss/hashtab.c
index 6bd6dcd..836aa5b 100644
--- a/security/selinux/ss/hashtab.c
+++ b/security/selinux/ss/hashtab.c
@@ -10,6 +10,8 @@
#include <linux/sched.h>
#include "hashtab.h"
+static struct kmem_cache *hashtab_node_cachep;
+
struct hashtab *hashtab_create(u32 (*hash_value)(struct hashtab *h, const void *key),
int (*keycmp)(struct hashtab *h, const void *key1, const void *key2),
u32 size)
@@ -58,7 +60,7 @@ int hashtab_insert(struct hashtab *h, void *key, void *datum)
if (cur && (h->keycmp(h, key, cur->key) == 0))
return -EEXIST;
- newnode = kzalloc(sizeof(*newnode), GFP_KERNEL);
+ newnode = kmem_cache_zalloc(hashtab_node_cachep, GFP_KERNEL);
if (!newnode)
return -ENOMEM;
newnode->key = key;
@@ -107,7 +109,7 @@ void hashtab_destroy(struct hashtab *h)
while (cur) {
temp = cur;
cur = cur->next;
- kfree(temp);
+ kmem_cache_free(hashtab_node_cachep, temp);
}
h->htable[i] = NULL;
}
@@ -167,3 +169,10 @@ void hashtab_stat(struct hashtab *h, struct hashtab_info *info)
info->slots_used = slots_used;
info->max_chain_len = max_chain_len;
}
+
+void __init hashtab_cache_init(void)
+{
+ hashtab_node_cachep = kmem_cache_create("hashtab_node",
+ sizeof(struct hashtab_node),
+ 0, SLAB_PANIC, NULL);
+}
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
index ad982ce..18ba0c2 100644
--- a/security/selinux/ss/mls.c
+++ b/security/selinux/ss/mls.c
@@ -33,20 +33,20 @@
* Return the length in bytes for the MLS fields of the
* security context string representation of `context'.
*/
-int mls_compute_context_len(struct context *context)
+int mls_compute_context_len(struct policydb *p, struct context *context)
{
int i, l, len, head, prev;
char *nm;
struct ebitmap *e;
struct ebitmap_node *node;
- if (!policydb.mls_enabled)
+ if (!p->mls_enabled)
return 0;
len = 1; /* for the beginning ":" */
for (l = 0; l < 2; l++) {
int index_sens = context->range.level[l].sens;
- len += strlen(sym_name(&policydb, SYM_LEVELS, index_sens - 1));
+ len += strlen(sym_name(p, SYM_LEVELS, index_sens - 1));
/* categories */
head = -2;
@@ -56,17 +56,17 @@ int mls_compute_context_len(struct context *context)
if (i - prev > 1) {
/* one or more negative bits are skipped */
if (head != prev) {
- nm = sym_name(&policydb, SYM_CATS, prev);
+ nm = sym_name(p, SYM_CATS, prev);
len += strlen(nm) + 1;
}
- nm = sym_name(&policydb, SYM_CATS, i);
+ nm = sym_name(p, SYM_CATS, i);
len += strlen(nm) + 1;
head = i;
}
prev = i;
}
if (prev != head) {
- nm = sym_name(&policydb, SYM_CATS, prev);
+ nm = sym_name(p, SYM_CATS, prev);
len += strlen(nm) + 1;
}
if (l == 0) {
@@ -86,7 +86,8 @@ int mls_compute_context_len(struct context *context)
* the MLS fields of `context' into the string `*scontext'.
* Update `*scontext' to point to the end of the MLS fields.
*/
-void mls_sid_to_context(struct context *context,
+void mls_sid_to_context(struct policydb *p,
+ struct context *context,
char **scontext)
{
char *scontextp, *nm;
@@ -94,7 +95,7 @@ void mls_sid_to_context(struct context *context,
struct ebitmap *e;
struct ebitmap_node *node;
- if (!policydb.mls_enabled)
+ if (!p->mls_enabled)
return;
scontextp = *scontext;
@@ -103,7 +104,7 @@ void mls_sid_to_context(struct context *context,
scontextp++;
for (l = 0; l < 2; l++) {
- strcpy(scontextp, sym_name(&policydb, SYM_LEVELS,
+ strcpy(scontextp, sym_name(p, SYM_LEVELS,
context->range.level[l].sens - 1));
scontextp += strlen(scontextp);
@@ -119,7 +120,7 @@ void mls_sid_to_context(struct context *context,
*scontextp++ = '.';
else
*scontextp++ = ',';
- nm = sym_name(&policydb, SYM_CATS, prev);
+ nm = sym_name(p, SYM_CATS, prev);
strcpy(scontextp, nm);
scontextp += strlen(nm);
}
@@ -127,7 +128,7 @@ void mls_sid_to_context(struct context *context,
*scontextp++ = ':';
else
*scontextp++ = ',';
- nm = sym_name(&policydb, SYM_CATS, i);
+ nm = sym_name(p, SYM_CATS, i);
strcpy(scontextp, nm);
scontextp += strlen(nm);
head = i;
@@ -140,7 +141,7 @@ void mls_sid_to_context(struct context *context,
*scontextp++ = '.';
else
*scontextp++ = ',';
- nm = sym_name(&policydb, SYM_CATS, prev);
+ nm = sym_name(p, SYM_CATS, prev);
strcpy(scontextp, nm);
scontextp += strlen(nm);
}
@@ -217,9 +218,7 @@ int mls_context_isvalid(struct policydb *p, struct context *c)
/*
* Set the MLS fields in the security context structure
* `context' based on the string representation in
- * the string `*scontext'. Update `*scontext' to
- * point to the end of the string representation of
- * the MLS fields.
+ * the string `scontext'.
*
* This function modifies the string in place, inserting
* NULL characters to terminate the MLS fields.
@@ -234,22 +233,21 @@ int mls_context_isvalid(struct policydb *p, struct context *c)
*/
int mls_context_to_sid(struct policydb *pol,
char oldc,
- char **scontext,
+ char *scontext,
struct context *context,
struct sidtab *s,
u32 def_sid)
{
-
- char delim;
- char *scontextp, *p, *rngptr;
+ char *sensitivity, *cur_cat, *next_cat, *rngptr;
struct level_datum *levdatum;
struct cat_datum *catdatum, *rngdatum;
- int l, rc = -EINVAL;
+ int l, rc, i;
+ char *rangep[2];
if (!pol->mls_enabled) {
- if (def_sid != SECSID_NULL && oldc)
- *scontext += strlen(*scontext) + 1;
- return 0;
+ if ((def_sid != SECSID_NULL && oldc) || (*scontext) == '\0')
+ return 0;
+ return -EINVAL;
}
/*
@@ -260,113 +258,94 @@ int mls_context_to_sid(struct policydb *pol,
struct context *defcon;
if (def_sid == SECSID_NULL)
- goto out;
+ return -EINVAL;
defcon = sidtab_search(s, def_sid);
if (!defcon)
- goto out;
+ return -EINVAL;
- rc = mls_context_cpy(context, defcon);
- goto out;
+ return mls_context_cpy(context, defcon);
}
- /* Extract low sensitivity. */
- scontextp = p = *scontext;
- while (*p && *p != ':' && *p != '-')
- p++;
+ /*
+ * If we're dealing with a range, figure out where the two parts
+ * of the range begin.
+ */
+ rangep[0] = scontext;
+ rangep[1] = strchr(scontext, '-');
+ if (rangep[1]) {
+ rangep[1][0] = '\0';
+ rangep[1]++;
+ }
- delim = *p;
- if (delim != '\0')
- *p++ = '\0';
-
+ /* For each part of the range: */
for (l = 0; l < 2; l++) {
- levdatum = hashtab_search(pol->p_levels.table, scontextp);
- if (!levdatum) {
- rc = -EINVAL;
- goto out;
- }
+ /* Split sensitivity and category set. */
+ sensitivity = rangep[l];
+ if (sensitivity == NULL)
+ break;
+ next_cat = strchr(sensitivity, ':');
+ if (next_cat)
+ *(next_cat++) = '\0';
+ /* Parse sensitivity. */
+ levdatum = hashtab_search(pol->p_levels.table, sensitivity);
+ if (!levdatum)
+ return -EINVAL;
context->range.level[l].sens = levdatum->level->sens;
- if (delim == ':') {
- /* Extract category set. */
- while (1) {
- scontextp = p;
- while (*p && *p != ',' && *p != '-')
- p++;
- delim = *p;
- if (delim != '\0')
- *p++ = '\0';
+ /* Extract category set. */
+ while (next_cat != NULL) {
+ cur_cat = next_cat;
+ next_cat = strchr(next_cat, ',');
+ if (next_cat != NULL)
+ *(next_cat++) = '\0';
- /* Separate into range if exists */
- rngptr = strchr(scontextp, '.');
- if (rngptr != NULL) {
- /* Remove '.' */
- *rngptr++ = '\0';
- }
+ /* Separate into range if exists */
+ rngptr = strchr(cur_cat, '.');
+ if (rngptr != NULL) {
+ /* Remove '.' */
+ *rngptr++ = '\0';
+ }
- catdatum = hashtab_search(pol->p_cats.table,
- scontextp);
- if (!catdatum) {
- rc = -EINVAL;
- goto out;
- }
+ catdatum = hashtab_search(pol->p_cats.table, cur_cat);
+ if (!catdatum)
+ return -EINVAL;
- rc = ebitmap_set_bit(&context->range.level[l].cat,
- catdatum->value - 1, 1);
+ rc = ebitmap_set_bit(&context->range.level[l].cat,
+ catdatum->value - 1, 1);
+ if (rc)
+ return rc;
+
+ /* If range, set all categories in range */
+ if (rngptr == NULL)
+ continue;
+
+ rngdatum = hashtab_search(pol->p_cats.table, rngptr);
+ if (!rngdatum)
+ return -EINVAL;
+
+ if (catdatum->value >= rngdatum->value)
+ return -EINVAL;
+
+ for (i = catdatum->value; i < rngdatum->value; i++) {
+ rc = ebitmap_set_bit(&context->range.level[l].cat, i, 1);
if (rc)
- goto out;
-
- /* If range, set all categories in range */
- if (rngptr) {
- int i;
-
- rngdatum = hashtab_search(pol->p_cats.table, rngptr);
- if (!rngdatum) {
- rc = -EINVAL;
- goto out;
- }
-
- if (catdatum->value >= rngdatum->value) {
- rc = -EINVAL;
- goto out;
- }
-
- for (i = catdatum->value; i < rngdatum->value; i++) {
- rc = ebitmap_set_bit(&context->range.level[l].cat, i, 1);
- if (rc)
- goto out;
- }
- }
-
- if (delim != ',')
- break;
+ return rc;
}
}
- if (delim == '-') {
- /* Extract high sensitivity. */
- scontextp = p;
- while (*p && *p != ':')
- p++;
-
- delim = *p;
- if (delim != '\0')
- *p++ = '\0';
- } else
- break;
}
- if (l == 0) {
+ /* If we didn't see a '-', the range start is also the range end. */
+ if (rangep[1] == NULL) {
context->range.level[1].sens = context->range.level[0].sens;
rc = ebitmap_cpy(&context->range.level[1].cat,
&context->range.level[0].cat);
if (rc)
- goto out;
+ return rc;
}
- *scontext = ++p;
- rc = 0;
-out:
- return rc;
+
+ return 0;
}
/*
@@ -375,23 +354,22 @@ int mls_context_to_sid(struct policydb *pol,
* the string `str'. This function will allocate temporary memory with the
* given constraints of gfp_mask.
*/
-int mls_from_string(char *str, struct context *context, gfp_t gfp_mask)
+int mls_from_string(struct policydb *p, char *str, struct context *context,
+ gfp_t gfp_mask)
{
- char *tmpstr, *freestr;
+ char *tmpstr;
int rc;
- if (!policydb.mls_enabled)
+ if (!p->mls_enabled)
return -EINVAL;
- /* we need freestr because mls_context_to_sid will change
- the value of tmpstr */
- tmpstr = freestr = kstrdup(str, gfp_mask);
+ tmpstr = kstrdup(str, gfp_mask);
if (!tmpstr) {
rc = -ENOMEM;
} else {
- rc = mls_context_to_sid(&policydb, ':', &tmpstr, context,
+ rc = mls_context_to_sid(p, ':', tmpstr, context,
NULL, SECSID_NULL);
- kfree(freestr);
+ kfree(tmpstr);
}
return rc;
@@ -417,10 +395,11 @@ int mls_range_set(struct context *context,
return rc;
}
-int mls_setup_user_range(struct context *fromcon, struct user_datum *user,
+int mls_setup_user_range(struct policydb *p,
+ struct context *fromcon, struct user_datum *user,
struct context *usercon)
{
- if (policydb.mls_enabled) {
+ if (p->mls_enabled) {
struct mls_level *fromcon_sen = &(fromcon->range.level[0]);
struct mls_level *fromcon_clr = &(fromcon->range.level[1]);
struct mls_level *user_low = &(user->range.level[0]);
@@ -457,53 +436,52 @@ int mls_setup_user_range(struct context *fromcon, struct user_datum *user,
/*
* Convert the MLS fields in the security context
- * structure `c' from the values specified in the
- * policy `oldp' to the values specified in the policy `newp'.
+ * structure `oldc' from the values specified in the
+ * policy `oldp' to the values specified in the policy `newp',
+ * storing the resulting context in `newc'.
*/
int mls_convert_context(struct policydb *oldp,
struct policydb *newp,
- struct context *c)
+ struct context *oldc,
+ struct context *newc)
{
struct level_datum *levdatum;
struct cat_datum *catdatum;
- struct ebitmap bitmap;
struct ebitmap_node *node;
int l, i;
- if (!policydb.mls_enabled)
+ if (!oldp->mls_enabled || !newp->mls_enabled)
return 0;
for (l = 0; l < 2; l++) {
levdatum = hashtab_search(newp->p_levels.table,
sym_name(oldp, SYM_LEVELS,
- c->range.level[l].sens - 1));
+ oldc->range.level[l].sens - 1));
if (!levdatum)
return -EINVAL;
- c->range.level[l].sens = levdatum->level->sens;
+ newc->range.level[l].sens = levdatum->level->sens;
- ebitmap_init(&bitmap);
- ebitmap_for_each_positive_bit(&c->range.level[l].cat, node, i) {
+ ebitmap_for_each_positive_bit(&oldc->range.level[l].cat,
+ node, i) {
int rc;
catdatum = hashtab_search(newp->p_cats.table,
sym_name(oldp, SYM_CATS, i));
if (!catdatum)
return -EINVAL;
- rc = ebitmap_set_bit(&bitmap, catdatum->value - 1, 1);
+ rc = ebitmap_set_bit(&newc->range.level[l].cat,
+ catdatum->value - 1, 1);
if (rc)
return rc;
-
- cond_resched();
}
- ebitmap_destroy(&c->range.level[l].cat);
- c->range.level[l].cat = bitmap;
}
return 0;
}
-int mls_compute_sid(struct context *scontext,
+int mls_compute_sid(struct policydb *p,
+ struct context *scontext,
struct context *tcontext,
u16 tclass,
u32 specified,
@@ -515,7 +493,7 @@ int mls_compute_sid(struct context *scontext,
struct class_datum *cladatum;
int default_range = 0;
- if (!policydb.mls_enabled)
+ if (!p->mls_enabled)
return 0;
switch (specified) {
@@ -524,12 +502,12 @@ int mls_compute_sid(struct context *scontext,
rtr.source_type = scontext->type;
rtr.target_type = tcontext->type;
rtr.target_class = tclass;
- r = hashtab_search(policydb.range_tr, &rtr);
+ r = hashtab_search(p->range_tr, &rtr);
if (r)
return mls_range_set(newcontext, r);
- if (tclass && tclass <= policydb.p_classes.nprim) {
- cladatum = policydb.class_val_to_struct[tclass - 1];
+ if (tclass && tclass <= p->p_classes.nprim) {
+ cladatum = p->class_val_to_struct[tclass - 1];
if (cladatum)
default_range = cladatum->default_range;
}
@@ -551,7 +529,7 @@ int mls_compute_sid(struct context *scontext,
/* Fallthrough */
case AVTAB_CHANGE:
- if ((tclass == policydb.process_class) || (sock == true))
+ if ((tclass == p->process_class) || (sock == true))
/* Use the process MLS attributes. */
return mls_context_cpy(newcontext, scontext);
else
@@ -577,10 +555,11 @@ int mls_compute_sid(struct context *scontext,
* NetLabel MLS sensitivity level field.
*
*/
-void mls_export_netlbl_lvl(struct context *context,
+void mls_export_netlbl_lvl(struct policydb *p,
+ struct context *context,
struct netlbl_lsm_secattr *secattr)
{
- if (!policydb.mls_enabled)
+ if (!p->mls_enabled)
return;
secattr->attr.mls.lvl = context->range.level[0].sens - 1;
@@ -597,10 +576,11 @@ void mls_export_netlbl_lvl(struct context *context,
* NetLabel MLS sensitivity level into the context.
*
*/
-void mls_import_netlbl_lvl(struct context *context,
+void mls_import_netlbl_lvl(struct policydb *p,
+ struct context *context,
struct netlbl_lsm_secattr *secattr)
{
- if (!policydb.mls_enabled)
+ if (!p->mls_enabled)
return;
context->range.level[0].sens = secattr->attr.mls.lvl + 1;
@@ -617,12 +597,13 @@ void mls_import_netlbl_lvl(struct context *context,
* MLS category field. Returns zero on success, negative values on failure.
*
*/
-int mls_export_netlbl_cat(struct context *context,
+int mls_export_netlbl_cat(struct policydb *p,
+ struct context *context,
struct netlbl_lsm_secattr *secattr)
{
int rc;
- if (!policydb.mls_enabled)
+ if (!p->mls_enabled)
return 0;
rc = ebitmap_netlbl_export(&context->range.level[0].cat,
@@ -645,12 +626,13 @@ int mls_export_netlbl_cat(struct context *context,
* negative values on failure.
*
*/
-int mls_import_netlbl_cat(struct context *context,
+int mls_import_netlbl_cat(struct policydb *p,
+ struct context *context,
struct netlbl_lsm_secattr *secattr)
{
int rc;
- if (!policydb.mls_enabled)
+ if (!p->mls_enabled)
return 0;
rc = ebitmap_netlbl_import(&context->range.level[0].cat,
diff --git a/security/selinux/ss/mls.h b/security/selinux/ss/mls.h
index 131d762..7954b1e 100644
--- a/security/selinux/ss/mls.h
+++ b/security/selinux/ss/mls.h
@@ -25,63 +25,76 @@
#include "context.h"
#include "policydb.h"
-int mls_compute_context_len(struct context *context);
-void mls_sid_to_context(struct context *context, char **scontext);
+int mls_compute_context_len(struct policydb *p, struct context *context);
+void mls_sid_to_context(struct policydb *p, struct context *context,
+ char **scontext);
int mls_context_isvalid(struct policydb *p, struct context *c);
int mls_range_isvalid(struct policydb *p, struct mls_range *r);
int mls_level_isvalid(struct policydb *p, struct mls_level *l);
int mls_context_to_sid(struct policydb *p,
char oldc,
- char **scontext,
+ char *scontext,
struct context *context,
struct sidtab *s,
u32 def_sid);
-int mls_from_string(char *str, struct context *context, gfp_t gfp_mask);
+int mls_from_string(struct policydb *p, char *str, struct context *context,
+ gfp_t gfp_mask);
int mls_range_set(struct context *context, struct mls_range *range);
int mls_convert_context(struct policydb *oldp,
struct policydb *newp,
- struct context *context);
+ struct context *oldc,
+ struct context *newc);
-int mls_compute_sid(struct context *scontext,
+int mls_compute_sid(struct policydb *p,
+ struct context *scontext,
struct context *tcontext,
u16 tclass,
u32 specified,
struct context *newcontext,
bool sock);
-int mls_setup_user_range(struct context *fromcon, struct user_datum *user,
+int mls_setup_user_range(struct policydb *p,
+ struct context *fromcon, struct user_datum *user,
struct context *usercon);
#ifdef CONFIG_NETLABEL
-void mls_export_netlbl_lvl(struct context *context,
+void mls_export_netlbl_lvl(struct policydb *p,
+ struct context *context,
struct netlbl_lsm_secattr *secattr);
-void mls_import_netlbl_lvl(struct context *context,
+void mls_import_netlbl_lvl(struct policydb *p,
+ struct context *context,
struct netlbl_lsm_secattr *secattr);
-int mls_export_netlbl_cat(struct context *context,
+int mls_export_netlbl_cat(struct policydb *p,
+ struct context *context,
struct netlbl_lsm_secattr *secattr);
-int mls_import_netlbl_cat(struct context *context,
+int mls_import_netlbl_cat(struct policydb *p,
+ struct context *context,
struct netlbl_lsm_secattr *secattr);
#else
-static inline void mls_export_netlbl_lvl(struct context *context,
+static inline void mls_export_netlbl_lvl(struct policydb *p,
+ struct context *context,
struct netlbl_lsm_secattr *secattr)
{
return;
}
-static inline void mls_import_netlbl_lvl(struct context *context,
+static inline void mls_import_netlbl_lvl(struct policydb *p,
+ struct context *context,
struct netlbl_lsm_secattr *secattr)
{
return;
}
-static inline int mls_export_netlbl_cat(struct context *context,
+static inline int mls_export_netlbl_cat(struct policydb *p,
+ struct context *context,
struct netlbl_lsm_secattr *secattr)
{
return -ENOMEM;
}
-static inline int mls_import_netlbl_cat(struct context *context,
+static inline int mls_import_netlbl_cat(struct policydb *p,
+ struct context *context,
struct netlbl_lsm_secattr *secattr)
{
return -ENOMEM;
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 9d9f6bb..57e608f 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -43,6 +43,7 @@
#include "conditional.h"
#include "mls.h"
#include "services.h"
+#include "flask.h"
#define _DEBUG_HASHES
@@ -912,13 +913,26 @@ int policydb_load_isids(struct policydb *p, struct sidtab *s)
if (!c->context[0].user) {
printk(KERN_ERR "SELinux: SID %s was never defined.\n",
c->u.name);
+ sidtab_destroy(s);
+ goto out;
+ }
+ if (c->sid[0] == SECSID_NULL || c->sid[0] > SECINITSID_NUM) {
+ pr_err("SELinux: Initial SID %s out of range.\n",
+ c->u.name);
+ sidtab_destroy(s);
+ goto out;
+ }
+ rc = context_add_hash(p, &c->context[0]);
+ if (rc) {
+ sidtab_destroy(s);
goto out;
}
- rc = sidtab_insert(s, c->sid[0], &c->context[0]);
+ rc = sidtab_set_initial(s, c->sid[0], &c->context[0]);
if (rc) {
printk(KERN_ERR "SELinux: unable to load initial SID %s.\n",
c->u.name);
+ sidtab_destroy(s);
goto out;
}
}
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index b275743..c21b0cf 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -71,7 +71,7 @@
#include "audit.h"
/* Policy capability names */
-char *selinux_policycap_names[__POLICYDB_CAPABILITY_MAX] = {
+const char *selinux_policycap_names[__POLICYDB_CAPABILITY_MAX] = {
"network_peer_controls",
"open_perms",
"extended_socket_class",
@@ -80,53 +80,32 @@ char *selinux_policycap_names[__POLICYDB_CAPABILITY_MAX] = {
"nnp_nosuid_transition"
};
-int selinux_policycap_netpeer;
-int selinux_policycap_openperm;
-int selinux_policycap_extsockclass;
-int selinux_policycap_alwaysnetwork;
-int selinux_policycap_cgroupseclabel;
-int selinux_policycap_nnp_nosuid_transition;
+static struct selinux_ss selinux_ss;
-static DEFINE_RWLOCK(policy_rwlock);
-
-static struct sidtab sidtab;
-struct policydb policydb;
-int ss_initialized;
-
-/*
- * The largest sequence number that has been used when
- * providing an access decision to the access vector cache.
- * The sequence number only changes when a policy change
- * occurs.
- */
-static u32 latest_granting;
+void selinux_ss_init(struct selinux_ss **ss)
+{
+ rwlock_init(&selinux_ss.policy_rwlock);
+ mutex_init(&selinux_ss.status_lock);
+ *ss = &selinux_ss;
+}
/* Forward declaration. */
-static int context_struct_to_string(struct context *context, char **scontext,
+static int context_struct_to_string(struct policydb *policydb,
+ struct context *context,
+ char **scontext,
u32 *scontext_len);
-static void context_struct_compute_av(struct context *scontext,
- struct context *tcontext,
- u16 tclass,
- struct av_decision *avd,
- struct extended_perms *xperms);
-
-struct selinux_mapping {
- u16 value; /* policy value */
- unsigned num_perms;
- u32 perms[sizeof(u32) * 8];
-};
-
-static struct selinux_mapping *current_mapping;
-static u16 current_mapping_size;
+static void context_struct_compute_av(struct policydb *policydb,
+ struct context *scontext,
+ struct context *tcontext,
+ u16 tclass,
+ struct av_decision *avd,
+ struct extended_perms *xperms);
static int selinux_set_mapping(struct policydb *pol,
struct security_class_mapping *map,
- struct selinux_mapping **out_map_p,
- u16 *out_map_size)
+ struct selinux_map *out_map)
{
- struct selinux_mapping *out_map = NULL;
- size_t size = sizeof(struct selinux_mapping);
u16 i, j;
unsigned k;
bool print_unknown_handle = false;
@@ -139,15 +118,15 @@ static int selinux_set_mapping(struct policydb *pol,
i++;
/* Allocate space for the class records, plus one for class zero */
- out_map = kcalloc(++i, size, GFP_ATOMIC);
- if (!out_map)
+ out_map->mapping = kcalloc(++i, sizeof(*out_map->mapping), GFP_ATOMIC);
+ if (!out_map->mapping)
return -ENOMEM;
/* Store the raw class and permission values */
j = 0;
while (map[j].name) {
struct security_class_mapping *p_in = map + (j++);
- struct selinux_mapping *p_out = out_map + j;
+ struct selinux_mapping *p_out = out_map->mapping + j;
/* An empty class string skips ahead */
if (!strcmp(p_in->name, "")) {
@@ -157,8 +136,7 @@ static int selinux_set_mapping(struct policydb *pol,
p_out->value = string_to_security_class(pol, p_in->name);
if (!p_out->value) {
- printk(KERN_INFO
- "SELinux: Class %s not defined in policy.\n",
+ pr_info("SELinux: Class %s not defined in policy.\n",
p_in->name);
if (pol->reject_unknown)
goto err;
@@ -177,8 +155,7 @@ static int selinux_set_mapping(struct policydb *pol,
p_out->perms[k] = string_to_av_perm(pol, p_out->value,
p_in->perms[k]);
if (!p_out->perms[k]) {
- printk(KERN_INFO
- "SELinux: Permission %s in class %s not defined in policy.\n",
+ pr_info("SELinux: Permission %s in class %s not defined in policy.\n",
p_in->perms[k], p_in->name);
if (pol->reject_unknown)
goto err;
@@ -191,14 +168,14 @@ static int selinux_set_mapping(struct policydb *pol,
}
if (print_unknown_handle)
- printk(KERN_INFO "SELinux: the above unknown classes and permissions will be %s\n",
+ pr_info("SELinux: the above unknown classes and permissions will be %s\n",
pol->allow_unknown ? "allowed" : "denied");
- *out_map_p = out_map;
- *out_map_size = i;
+ out_map->size = i;
return 0;
err:
- kfree(out_map);
+ kfree(out_map->mapping);
+ out_map->mapping = NULL;
return -EINVAL;
}
@@ -206,10 +183,10 @@ static int selinux_set_mapping(struct policydb *pol,
* Get real, policy values from mapped values
*/
-static u16 unmap_class(u16 tclass)
+static u16 unmap_class(struct selinux_map *map, u16 tclass)
{
- if (tclass < current_mapping_size)
- return current_mapping[tclass].value;
+ if (tclass < map->size)
+ return map->mapping[tclass].value;
return tclass;
}
@@ -217,42 +194,44 @@ static u16 unmap_class(u16 tclass)
/*
* Get kernel value for class from its policy value
*/
-static u16 map_class(u16 pol_value)
+static u16 map_class(struct selinux_map *map, u16 pol_value)
{
u16 i;
- for (i = 1; i < current_mapping_size; i++) {
- if (current_mapping[i].value == pol_value)
+ for (i = 1; i < map->size; i++) {
+ if (map->mapping[i].value == pol_value)
return i;
}
return SECCLASS_NULL;
}
-static void map_decision(u16 tclass, struct av_decision *avd,
+static void map_decision(struct selinux_map *map,
+ u16 tclass, struct av_decision *avd,
int allow_unknown)
{
- if (tclass < current_mapping_size) {
- unsigned i, n = current_mapping[tclass].num_perms;
+ if (tclass < map->size) {
+ struct selinux_mapping *mapping = &map->mapping[tclass];
+ unsigned int i, n = mapping->num_perms;
u32 result;
for (i = 0, result = 0; i < n; i++) {
- if (avd->allowed & current_mapping[tclass].perms[i])
+ if (avd->allowed & mapping->perms[i])
result |= 1<<i;
- if (allow_unknown && !current_mapping[tclass].perms[i])
+ if (allow_unknown && !mapping->perms[i])
result |= 1<<i;
}
avd->allowed = result;
for (i = 0, result = 0; i < n; i++)
- if (avd->auditallow & current_mapping[tclass].perms[i])
+ if (avd->auditallow & mapping->perms[i])
result |= 1<<i;
avd->auditallow = result;
for (i = 0, result = 0; i < n; i++) {
- if (avd->auditdeny & current_mapping[tclass].perms[i])
+ if (avd->auditdeny & mapping->perms[i])
result |= 1<<i;
- if (!allow_unknown && !current_mapping[tclass].perms[i])
+ if (!allow_unknown && !mapping->perms[i])
result |= 1<<i;
}
/*
@@ -266,9 +245,11 @@ static void map_decision(u16 tclass, struct av_decision *avd,
}
}
-int security_mls_enabled(void)
+int security_mls_enabled(struct selinux_state *state)
{
- return policydb.mls_enabled;
+ struct policydb *p = &state->ss->policydb;
+
+ return p->mls_enabled;
}
/*
@@ -282,7 +263,8 @@ int security_mls_enabled(void)
* of the process performing the transition. All other callers of
* constraint_expr_eval should pass in NULL for xcontext.
*/
-static int constraint_expr_eval(struct context *scontext,
+static int constraint_expr_eval(struct policydb *policydb,
+ struct context *scontext,
struct context *tcontext,
struct context *xcontext,
struct constraint_expr *cexpr)
@@ -326,8 +308,8 @@ static int constraint_expr_eval(struct context *scontext,
case CEXPR_ROLE:
val1 = scontext->role;
val2 = tcontext->role;
- r1 = policydb.role_val_to_struct[val1 - 1];
- r2 = policydb.role_val_to_struct[val2 - 1];
+ r1 = policydb->role_val_to_struct[val1 - 1];
+ r2 = policydb->role_val_to_struct[val2 - 1];
switch (e->op) {
case CEXPR_DOM:
s[++sp] = ebitmap_get_bit(&r1->dominates,
@@ -472,7 +454,8 @@ static int dump_masked_av_helper(void *k, void *d, void *args)
return 0;
}
-static void security_dump_masked_av(struct context *scontext,
+static void security_dump_masked_av(struct policydb *policydb,
+ struct context *scontext,
struct context *tcontext,
u16 tclass,
u32 permissions,
@@ -492,8 +475,8 @@ static void security_dump_masked_av(struct context *scontext,
if (!permissions)
return;
- tclass_name = sym_name(&policydb, SYM_CLASSES, tclass - 1);
- tclass_dat = policydb.class_val_to_struct[tclass - 1];
+ tclass_name = sym_name(policydb, SYM_CLASSES, tclass - 1);
+ tclass_dat = policydb->class_val_to_struct[tclass - 1];
common_dat = tclass_dat->comdatum;
/* init permission_names */
@@ -507,11 +490,11 @@ static void security_dump_masked_av(struct context *scontext,
goto out;
/* get scontext/tcontext in text form */
- if (context_struct_to_string(scontext,
+ if (context_struct_to_string(policydb, scontext,
&scontext_name, &length) < 0)
goto out;
- if (context_struct_to_string(tcontext,
+ if (context_struct_to_string(policydb, tcontext,
&tcontext_name, &length) < 0)
goto out;
@@ -550,7 +533,8 @@ static void security_dump_masked_av(struct context *scontext,
* security_boundary_permission - drops violated permissions
* on boundary constraint.
*/
-static void type_attribute_bounds_av(struct context *scontext,
+static void type_attribute_bounds_av(struct policydb *policydb,
+ struct context *scontext,
struct context *tcontext,
u16 tclass,
struct av_decision *avd)
@@ -562,14 +546,14 @@ static void type_attribute_bounds_av(struct context *scontext,
struct type_datum *target;
u32 masked = 0;
- source = flex_array_get_ptr(policydb.type_val_to_struct_array,
+ source = flex_array_get_ptr(policydb->type_val_to_struct_array,
scontext->type - 1);
BUG_ON(!source);
if (!source->bounds)
return;
- target = flex_array_get_ptr(policydb.type_val_to_struct_array,
+ target = flex_array_get_ptr(policydb->type_val_to_struct_array,
tcontext->type - 1);
BUG_ON(!target);
@@ -584,7 +568,7 @@ static void type_attribute_bounds_av(struct context *scontext,
tcontextp = &lo_tcontext;
}
- context_struct_compute_av(&lo_scontext,
+ context_struct_compute_av(policydb, &lo_scontext,
tcontextp,
tclass,
&lo_avd,
@@ -599,7 +583,7 @@ static void type_attribute_bounds_av(struct context *scontext,
avd->allowed &= ~masked;
/* audit masked permissions */
- security_dump_masked_av(scontext, tcontext,
+ security_dump_masked_av(policydb, scontext, tcontext,
tclass, masked, "bounds");
}
@@ -632,11 +616,12 @@ void services_compute_xperms_drivers(
* Compute access vectors and extended permissions based on a context
* structure pair for the permissions in a particular class.
*/
-static void context_struct_compute_av(struct context *scontext,
- struct context *tcontext,
- u16 tclass,
- struct av_decision *avd,
- struct extended_perms *xperms)
+static void context_struct_compute_av(struct policydb *policydb,
+ struct context *scontext,
+ struct context *tcontext,
+ u16 tclass,
+ struct av_decision *avd,
+ struct extended_perms *xperms)
{
struct constraint_node *constraint;
struct role_allow *ra;
@@ -655,13 +640,13 @@ static void context_struct_compute_av(struct context *scontext,
xperms->len = 0;
}
- if (unlikely(!tclass || tclass > policydb.p_classes.nprim)) {
+ if (unlikely(!tclass || tclass > policydb->p_classes.nprim)) {
if (printk_ratelimit())
- printk(KERN_WARNING "SELinux: Invalid class %hu\n", tclass);
+ pr_warn("SELinux: Invalid class %hu\n", tclass);
return;
}
- tclass_datum = policydb.class_val_to_struct[tclass - 1];
+ tclass_datum = policydb->class_val_to_struct[tclass - 1];
/*
* If a specific type enforcement rule was defined for
@@ -669,15 +654,18 @@ static void context_struct_compute_av(struct context *scontext,
*/
avkey.target_class = tclass;
avkey.specified = AVTAB_AV | AVTAB_XPERMS;
- sattr = flex_array_get(policydb.type_attr_map_array, scontext->type - 1);
+ sattr = flex_array_get(policydb->type_attr_map_array,
+ scontext->type - 1);
BUG_ON(!sattr);
- tattr = flex_array_get(policydb.type_attr_map_array, tcontext->type - 1);
+ tattr = flex_array_get(policydb->type_attr_map_array,
+ tcontext->type - 1);
BUG_ON(!tattr);
ebitmap_for_each_positive_bit(sattr, snode, i) {
ebitmap_for_each_positive_bit(tattr, tnode, j) {
avkey.source_type = i + 1;
avkey.target_type = j + 1;
- for (node = avtab_search_node(&policydb.te_avtab, &avkey);
+ for (node = avtab_search_node(&policydb->te_avtab,
+ &avkey);
node;
node = avtab_search_node_next(node, avkey.specified)) {
if (node->key.specified == AVTAB_ALLOWED)
@@ -691,7 +679,7 @@ static void context_struct_compute_av(struct context *scontext,
}
/* Check conditional av table for additional permissions */
- cond_compute_av(&policydb.te_cond_avtab, &avkey,
+ cond_compute_av(&policydb->te_cond_avtab, &avkey,
avd, xperms);
}
@@ -704,7 +692,7 @@ static void context_struct_compute_av(struct context *scontext,
constraint = tclass_datum->constraints;
while (constraint) {
if ((constraint->permissions & (avd->allowed)) &&
- !constraint_expr_eval(scontext, tcontext, NULL,
+ !constraint_expr_eval(policydb, scontext, tcontext, NULL,
constraint->expr)) {
avd->allowed &= ~(constraint->permissions);
}
@@ -716,16 +704,16 @@ static void context_struct_compute_av(struct context *scontext,
* role is changing, then check the (current_role, new_role)
* pair.
*/
- if (tclass == policydb.process_class &&
- (avd->allowed & policydb.process_trans_perms) &&
+ if (tclass == policydb->process_class &&
+ (avd->allowed & policydb->process_trans_perms) &&
scontext->role != tcontext->role) {
- for (ra = policydb.role_allow; ra; ra = ra->next) {
+ for (ra = policydb->role_allow; ra; ra = ra->next) {
if (scontext->role == ra->role &&
tcontext->role == ra->new_role)
break;
}
if (!ra)
- avd->allowed &= ~policydb.process_trans_perms;
+ avd->allowed &= ~policydb->process_trans_perms;
}
/*
@@ -733,41 +721,46 @@ static void context_struct_compute_av(struct context *scontext,
* constraint, lazy checks have to mask any violated
* permission and notice it to userspace via audit.
*/
- type_attribute_bounds_av(scontext, tcontext,
+ type_attribute_bounds_av(policydb, scontext, tcontext,
tclass, avd);
}
-static int security_validtrans_handle_fail(struct context *ocontext,
+static int security_validtrans_handle_fail(struct selinux_state *state,
+ struct context *ocontext,
struct context *ncontext,
struct context *tcontext,
u16 tclass)
{
+ struct policydb *p = &state->ss->policydb;
char *o = NULL, *n = NULL, *t = NULL;
u32 olen, nlen, tlen;
- if (context_struct_to_string(ocontext, &o, &olen))
+ if (context_struct_to_string(p, ocontext, &o, &olen))
goto out;
- if (context_struct_to_string(ncontext, &n, &nlen))
+ if (context_struct_to_string(p, ncontext, &n, &nlen))
goto out;
- if (context_struct_to_string(tcontext, &t, &tlen))
+ if (context_struct_to_string(p, tcontext, &t, &tlen))
goto out;
audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR,
"op=security_validate_transition seresult=denied"
" oldcontext=%s newcontext=%s taskcontext=%s tclass=%s",
- o, n, t, sym_name(&policydb, SYM_CLASSES, tclass-1));
+ o, n, t, sym_name(p, SYM_CLASSES, tclass-1));
out:
kfree(o);
kfree(n);
kfree(t);
- if (!selinux_enforcing)
+ if (!enforcing_enabled(state))
return 0;
return -EPERM;
}
-static int security_compute_validatetrans(u32 oldsid, u32 newsid, u32 tasksid,
+static int security_compute_validatetrans(struct selinux_state *state,
+ u32 oldsid, u32 newsid, u32 tasksid,
u16 orig_tclass, bool user)
{
+ struct policydb *policydb;
+ struct sidtab *sidtab;
struct context *ocontext;
struct context *ncontext;
struct context *tcontext;
@@ -776,41 +769,45 @@ static int security_compute_validatetrans(u32 oldsid, u32 newsid, u32 tasksid,
u16 tclass;
int rc = 0;
- if (!ss_initialized)
+
+ if (!state->initialized)
return 0;
- read_lock(&policy_rwlock);
+ read_lock(&state->ss->policy_rwlock);
+
+ policydb = &state->ss->policydb;
+ sidtab = state->ss->sidtab;
if (!user)
- tclass = unmap_class(orig_tclass);
+ tclass = unmap_class(&state->ss->map, orig_tclass);
else
tclass = orig_tclass;
- if (!tclass || tclass > policydb.p_classes.nprim) {
+ if (!tclass || tclass > policydb->p_classes.nprim) {
rc = -EINVAL;
goto out;
}
- tclass_datum = policydb.class_val_to_struct[tclass - 1];
+ tclass_datum = policydb->class_val_to_struct[tclass - 1];
- ocontext = sidtab_search(&sidtab, oldsid);
+ ocontext = sidtab_search(sidtab, oldsid);
if (!ocontext) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, oldsid);
rc = -EINVAL;
goto out;
}
- ncontext = sidtab_search(&sidtab, newsid);
+ ncontext = sidtab_search(sidtab, newsid);
if (!ncontext) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, newsid);
rc = -EINVAL;
goto out;
}
- tcontext = sidtab_search(&sidtab, tasksid);
+ tcontext = sidtab_search(sidtab, tasksid);
if (!tcontext) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, tasksid);
rc = -EINVAL;
goto out;
@@ -818,12 +815,13 @@ static int security_compute_validatetrans(u32 oldsid, u32 newsid, u32 tasksid,
constraint = tclass_datum->validatetrans;
while (constraint) {
- if (!constraint_expr_eval(ocontext, ncontext, tcontext,
- constraint->expr)) {
+ if (!constraint_expr_eval(policydb, ocontext, ncontext,
+ tcontext, constraint->expr)) {
if (user)
rc = -EPERM;
else
- rc = security_validtrans_handle_fail(ocontext,
+ rc = security_validtrans_handle_fail(state,
+ ocontext,
ncontext,
tcontext,
tclass);
@@ -833,22 +831,24 @@ static int security_compute_validatetrans(u32 oldsid, u32 newsid, u32 tasksid,
}
out:
- read_unlock(&policy_rwlock);
+ read_unlock(&state->ss->policy_rwlock);
return rc;
}
-int security_validate_transition_user(u32 oldsid, u32 newsid, u32 tasksid,
- u16 tclass)
+int security_validate_transition_user(struct selinux_state *state,
+ u32 oldsid, u32 newsid, u32 tasksid,
+ u16 tclass)
{
- return security_compute_validatetrans(oldsid, newsid, tasksid,
- tclass, true);
+ return security_compute_validatetrans(state, oldsid, newsid, tasksid,
+ tclass, true);
}
-int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid,
+int security_validate_transition(struct selinux_state *state,
+ u32 oldsid, u32 newsid, u32 tasksid,
u16 orig_tclass)
{
- return security_compute_validatetrans(oldsid, newsid, tasksid,
- orig_tclass, false);
+ return security_compute_validatetrans(state, oldsid, newsid, tasksid,
+ orig_tclass, false);
}
/*
@@ -860,30 +860,36 @@ int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid,
* @oldsid : current security identifier
* @newsid : destinated security identifier
*/
-int security_bounded_transition(u32 old_sid, u32 new_sid)
+int security_bounded_transition(struct selinux_state *state,
+ u32 old_sid, u32 new_sid)
{
+ struct policydb *policydb;
+ struct sidtab *sidtab;
struct context *old_context, *new_context;
struct type_datum *type;
int index;
int rc;
- if (!ss_initialized)
+ if (!state->initialized)
return 0;
- read_lock(&policy_rwlock);
+ read_lock(&state->ss->policy_rwlock);
+
+ policydb = &state->ss->policydb;
+ sidtab = state->ss->sidtab;
rc = -EINVAL;
- old_context = sidtab_search(&sidtab, old_sid);
+ old_context = sidtab_search(sidtab, old_sid);
if (!old_context) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %u\n",
+ pr_err("SELinux: %s: unrecognized SID %u\n",
__func__, old_sid);
goto out;
}
rc = -EINVAL;
- new_context = sidtab_search(&sidtab, new_sid);
+ new_context = sidtab_search(sidtab, new_sid);
if (!new_context) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %u\n",
+ pr_err("SELinux: %s: unrecognized SID %u\n",
__func__, new_sid);
goto out;
}
@@ -895,7 +901,7 @@ int security_bounded_transition(u32 old_sid, u32 new_sid)
index = new_context->type;
while (true) {
- type = flex_array_get_ptr(policydb.type_val_to_struct_array,
+ type = flex_array_get_ptr(policydb->type_val_to_struct_array,
index - 1);
BUG_ON(!type);
@@ -917,9 +923,9 @@ int security_bounded_transition(u32 old_sid, u32 new_sid)
char *new_name = NULL;
u32 length;
- if (!context_struct_to_string(old_context,
+ if (!context_struct_to_string(policydb, old_context,
&old_name, &length) &&
- !context_struct_to_string(new_context,
+ !context_struct_to_string(policydb, new_context,
&new_name, &length)) {
audit_log(current->audit_context,
GFP_ATOMIC, AUDIT_SELINUX_ERR,
@@ -932,17 +938,17 @@ int security_bounded_transition(u32 old_sid, u32 new_sid)
kfree(old_name);
}
out:
- read_unlock(&policy_rwlock);
+ read_unlock(&state->ss->policy_rwlock);
return rc;
}
-static void avd_init(struct av_decision *avd)
+static void avd_init(struct selinux_state *state, struct av_decision *avd)
{
avd->allowed = 0;
avd->auditallow = 0;
avd->auditdeny = 0xffffffff;
- avd->seqno = latest_granting;
+ avd->seqno = state->ss->latest_granting;
avd->flags = 0;
}
@@ -1000,12 +1006,15 @@ void services_compute_xperms_decision(struct extended_perms_decision *xpermd,
}
}
-void security_compute_xperms_decision(u32 ssid,
- u32 tsid,
- u16 orig_tclass,
- u8 driver,
- struct extended_perms_decision *xpermd)
+void security_compute_xperms_decision(struct selinux_state *state,
+ u32 ssid,
+ u32 tsid,
+ u16 orig_tclass,
+ u8 driver,
+ struct extended_perms_decision *xpermd)
{
+ struct policydb *policydb;
+ struct sidtab *sidtab;
u16 tclass;
struct context *scontext, *tcontext;
struct avtab_key avkey;
@@ -1020,60 +1029,64 @@ void security_compute_xperms_decision(u32 ssid,
memset(xpermd->auditallow->p, 0, sizeof(xpermd->auditallow->p));
memset(xpermd->dontaudit->p, 0, sizeof(xpermd->dontaudit->p));
- read_lock(&policy_rwlock);
- if (!ss_initialized)
+ read_lock(&state->ss->policy_rwlock);
+ if (!state->initialized)
goto allow;
- scontext = sidtab_search(&sidtab, ssid);
+ policydb = &state->ss->policydb;
+ sidtab = state->ss->sidtab;
+
+ scontext = sidtab_search(sidtab, ssid);
if (!scontext) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, ssid);
goto out;
}
- tcontext = sidtab_search(&sidtab, tsid);
+ tcontext = sidtab_search(sidtab, tsid);
if (!tcontext) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, tsid);
goto out;
}
- tclass = unmap_class(orig_tclass);
+ tclass = unmap_class(&state->ss->map, orig_tclass);
if (unlikely(orig_tclass && !tclass)) {
- if (policydb.allow_unknown)
+ if (policydb->allow_unknown)
goto allow;
goto out;
}
- if (unlikely(!tclass || tclass > policydb.p_classes.nprim)) {
+ if (unlikely(!tclass || tclass > policydb->p_classes.nprim)) {
pr_warn_ratelimited("SELinux: Invalid class %hu\n", tclass);
goto out;
}
avkey.target_class = tclass;
avkey.specified = AVTAB_XPERMS;
- sattr = flex_array_get(policydb.type_attr_map_array,
+ sattr = flex_array_get(policydb->type_attr_map_array,
scontext->type - 1);
BUG_ON(!sattr);
- tattr = flex_array_get(policydb.type_attr_map_array,
+ tattr = flex_array_get(policydb->type_attr_map_array,
tcontext->type - 1);
BUG_ON(!tattr);
ebitmap_for_each_positive_bit(sattr, snode, i) {
ebitmap_for_each_positive_bit(tattr, tnode, j) {
avkey.source_type = i + 1;
avkey.target_type = j + 1;
- for (node = avtab_search_node(&policydb.te_avtab, &avkey);
+ for (node = avtab_search_node(&policydb->te_avtab,
+ &avkey);
node;
node = avtab_search_node_next(node, avkey.specified))
services_compute_xperms_decision(xpermd, node);
- cond_compute_xperms(&policydb.te_cond_avtab,
+ cond_compute_xperms(&policydb->te_cond_avtab,
&avkey, xpermd);
}
}
out:
- read_unlock(&policy_rwlock);
+ read_unlock(&state->ss->policy_rwlock);
return;
allow:
memset(xpermd->allowed->p, 0xff, sizeof(xpermd->allowed->p));
@@ -1091,94 +1104,109 @@ void security_compute_xperms_decision(u32 ssid,
* Compute a set of access vector decisions based on the
* SID pair (@ssid, @tsid) for the permissions in @tclass.
*/
-void security_compute_av(u32 ssid,
+void security_compute_av(struct selinux_state *state,
+ u32 ssid,
u32 tsid,
u16 orig_tclass,
struct av_decision *avd,
struct extended_perms *xperms)
{
+ struct policydb *policydb;
+ struct sidtab *sidtab;
u16 tclass;
struct context *scontext = NULL, *tcontext = NULL;
- read_lock(&policy_rwlock);
- avd_init(avd);
+ read_lock(&state->ss->policy_rwlock);
+ avd_init(state, avd);
xperms->len = 0;
- if (!ss_initialized)
+ if (!state->initialized)
goto allow;
- scontext = sidtab_search(&sidtab, ssid);
+ policydb = &state->ss->policydb;
+ sidtab = state->ss->sidtab;
+
+ scontext = sidtab_search(sidtab, ssid);
if (!scontext) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, ssid);
goto out;
}
/* permissive domain? */
- if (ebitmap_get_bit(&policydb.permissive_map, scontext->type))
+ if (ebitmap_get_bit(&policydb->permissive_map, scontext->type))
avd->flags |= AVD_FLAGS_PERMISSIVE;
- tcontext = sidtab_search(&sidtab, tsid);
+ tcontext = sidtab_search(sidtab, tsid);
if (!tcontext) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, tsid);
goto out;
}
- tclass = unmap_class(orig_tclass);
+ tclass = unmap_class(&state->ss->map, orig_tclass);
if (unlikely(orig_tclass && !tclass)) {
- if (policydb.allow_unknown)
+ if (policydb->allow_unknown)
goto allow;
goto out;
}
- context_struct_compute_av(scontext, tcontext, tclass, avd, xperms);
- map_decision(orig_tclass, avd, policydb.allow_unknown);
+ context_struct_compute_av(policydb, scontext, tcontext, tclass, avd,
+ xperms);
+ map_decision(&state->ss->map, orig_tclass, avd,
+ policydb->allow_unknown);
out:
- read_unlock(&policy_rwlock);
+ read_unlock(&state->ss->policy_rwlock);
return;
allow:
avd->allowed = 0xffffffff;
goto out;
}
-void security_compute_av_user(u32 ssid,
+void security_compute_av_user(struct selinux_state *state,
+ u32 ssid,
u32 tsid,
u16 tclass,
struct av_decision *avd)
{
+ struct policydb *policydb;
+ struct sidtab *sidtab;
struct context *scontext = NULL, *tcontext = NULL;
- read_lock(&policy_rwlock);
- avd_init(avd);
- if (!ss_initialized)
+ read_lock(&state->ss->policy_rwlock);
+ avd_init(state, avd);
+ if (!state->initialized)
goto allow;
- scontext = sidtab_search(&sidtab, ssid);
+ policydb = &state->ss->policydb;
+ sidtab = state->ss->sidtab;
+
+ scontext = sidtab_search(sidtab, ssid);
if (!scontext) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, ssid);
goto out;
}
/* permissive domain? */
- if (ebitmap_get_bit(&policydb.permissive_map, scontext->type))
+ if (ebitmap_get_bit(&policydb->permissive_map, scontext->type))
avd->flags |= AVD_FLAGS_PERMISSIVE;
- tcontext = sidtab_search(&sidtab, tsid);
+ tcontext = sidtab_search(sidtab, tsid);
if (!tcontext) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, tsid);
goto out;
}
if (unlikely(!tclass)) {
- if (policydb.allow_unknown)
+ if (policydb->allow_unknown)
goto allow;
goto out;
}
- context_struct_compute_av(scontext, tcontext, tclass, avd, NULL);
+ context_struct_compute_av(policydb, scontext, tcontext, tclass, avd,
+ NULL);
out:
- read_unlock(&policy_rwlock);
+ read_unlock(&state->ss->policy_rwlock);
return;
allow:
avd->allowed = 0xffffffff;
@@ -1192,7 +1220,9 @@ void security_compute_av_user(u32 ssid,
* to point to this string and set `*scontext_len' to
* the length of the string.
*/
-static int context_struct_to_string(struct context *context, char **scontext, u32 *scontext_len)
+static int context_struct_to_string(struct policydb *p,
+ struct context *context,
+ char **scontext, u32 *scontext_len)
{
char *scontextp;
@@ -1211,10 +1241,10 @@ static int context_struct_to_string(struct context *context, char **scontext, u3
}
/* Compute the size of the context. */
- *scontext_len += strlen(sym_name(&policydb, SYM_USERS, context->user - 1)) + 1;
- *scontext_len += strlen(sym_name(&policydb, SYM_ROLES, context->role - 1)) + 1;
- *scontext_len += strlen(sym_name(&policydb, SYM_TYPES, context->type - 1)) + 1;
- *scontext_len += mls_compute_context_len(context);
+ *scontext_len += strlen(sym_name(p, SYM_USERS, context->user - 1)) + 1;
+ *scontext_len += strlen(sym_name(p, SYM_ROLES, context->role - 1)) + 1;
+ *scontext_len += strlen(sym_name(p, SYM_TYPES, context->type - 1)) + 1;
+ *scontext_len += mls_compute_context_len(p, context);
if (!scontext)
return 0;
@@ -1229,11 +1259,11 @@ static int context_struct_to_string(struct context *context, char **scontext, u3
* Copy the user name, role name and type name into the context.
*/
scontextp += sprintf(scontextp, "%s:%s:%s",
- sym_name(&policydb, SYM_USERS, context->user - 1),
- sym_name(&policydb, SYM_ROLES, context->role - 1),
- sym_name(&policydb, SYM_TYPES, context->type - 1));
+ sym_name(p, SYM_USERS, context->user - 1),
+ sym_name(p, SYM_ROLES, context->role - 1),
+ sym_name(p, SYM_TYPES, context->type - 1));
- mls_sid_to_context(context, &scontextp);
+ mls_sid_to_context(p, context, &scontextp);
*scontextp = 0;
@@ -1242,6 +1272,17 @@ static int context_struct_to_string(struct context *context, char **scontext, u3
#include "initial_sid_to_string.h"
+int security_sidtab_hash_stats(struct selinux_state *state, char *page)
+{
+ int rc;
+
+ read_lock(&state->ss->policy_rwlock);
+ rc = sidtab_hash_stats(state->ss->sidtab, page);
+ read_unlock(&state->ss->policy_rwlock);
+
+ return rc;
+}
+
const char *security_get_initial_sid_context(u32 sid)
{
if (unlikely(sid > SECINITSID_NUM))
@@ -1249,9 +1290,12 @@ const char *security_get_initial_sid_context(u32 sid)
return initial_sid_to_string[sid];
}
-static int security_sid_to_context_core(u32 sid, char **scontext,
+static int security_sid_to_context_core(struct selinux_state *state,
+ u32 sid, char **scontext,
u32 *scontext_len, int force)
{
+ struct policydb *policydb;
+ struct sidtab *sidtab;
struct context *context;
int rc = 0;
@@ -1259,7 +1303,7 @@ static int security_sid_to_context_core(u32 sid, char **scontext,
*scontext = NULL;
*scontext_len = 0;
- if (!ss_initialized) {
+ if (!state->initialized) {
if (sid <= SECINITSID_NUM) {
char *scontextp;
@@ -1275,25 +1319,28 @@ static int security_sid_to_context_core(u32 sid, char **scontext,
*scontext = scontextp;
goto out;
}
- printk(KERN_ERR "SELinux: %s: called before initial "
+ pr_err("SELinux: %s: called before initial "
"load_policy on unknown SID %d\n", __func__, sid);
rc = -EINVAL;
goto out;
}
- read_lock(&policy_rwlock);
+ read_lock(&state->ss->policy_rwlock);
+ policydb = &state->ss->policydb;
+ sidtab = state->ss->sidtab;
if (force)
- context = sidtab_search_force(&sidtab, sid);
+ context = sidtab_search_force(sidtab, sid);
else
- context = sidtab_search(&sidtab, sid);
+ context = sidtab_search(sidtab, sid);
if (!context) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, sid);
rc = -EINVAL;
goto out_unlock;
}
- rc = context_struct_to_string(context, scontext, scontext_len);
+ rc = context_struct_to_string(policydb, context, scontext,
+ scontext_len);
out_unlock:
- read_unlock(&policy_rwlock);
+ read_unlock(&state->ss->policy_rwlock);
out:
return rc;
@@ -1309,14 +1356,18 @@ static int security_sid_to_context_core(u32 sid, char **scontext,
* into a dynamically allocated string of the correct size. Set @scontext
* to point to this string and set @scontext_len to the length of the string.
*/
-int security_sid_to_context(u32 sid, char **scontext, u32 *scontext_len)
+int security_sid_to_context(struct selinux_state *state,
+ u32 sid, char **scontext, u32 *scontext_len)
{
- return security_sid_to_context_core(sid, scontext, scontext_len, 0);
+ return security_sid_to_context_core(state, sid, scontext,
+ scontext_len, 0);
}
-int security_sid_to_context_force(u32 sid, char **scontext, u32 *scontext_len)
+int security_sid_to_context_force(struct selinux_state *state, u32 sid,
+ char **scontext, u32 *scontext_len)
{
- return security_sid_to_context_core(sid, scontext, scontext_len, 1);
+ return security_sid_to_context_core(state, sid, scontext,
+ scontext_len, 1);
}
/*
@@ -1325,7 +1376,6 @@ int security_sid_to_context_force(u32 sid, char **scontext, u32 *scontext_len)
static int string_to_context_struct(struct policydb *pol,
struct sidtab *sidtabp,
char *scontext,
- u32 scontext_len,
struct context *ctx,
u32 def_sid)
{
@@ -1386,15 +1436,12 @@ static int string_to_context_struct(struct policydb *pol,
ctx->type = typdatum->value;
- rc = mls_context_to_sid(pol, oldc, &p, ctx, sidtabp, def_sid);
+ rc = mls_context_to_sid(pol, oldc, p, ctx, sidtabp, def_sid);
if (rc)
goto out;
- rc = -EINVAL;
- if ((p - scontext) < scontext_len)
- goto out;
-
/* Check the validity of the new context. */
+ rc = -EINVAL;
if (!policydb_context_isvalid(pol, ctx))
goto out;
rc = 0;
@@ -1404,10 +1451,49 @@ static int string_to_context_struct(struct policydb *pol,
return rc;
}
-static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
+int context_add_hash(struct policydb *policydb,
+ struct context *context)
+{
+ int rc;
+ char *str;
+ int len;
+
+ if (context->str) {
+ context->hash = context_compute_hash(context->str);
+ } else {
+ rc = context_struct_to_string(policydb, context,
+ &str, &len);
+ if (rc)
+ return rc;
+ context->hash = context_compute_hash(str);
+ kfree(str);
+ }
+ return 0;
+}
+
+static int context_struct_to_sid(struct selinux_state *state,
+ struct context *context, u32 *sid)
+{
+ int rc;
+ struct sidtab *sidtab = state->ss->sidtab;
+ struct policydb *policydb = &state->ss->policydb;
+
+ if (!context->hash) {
+ rc = context_add_hash(policydb, context);
+ if (rc)
+ return rc;
+ }
+
+ return sidtab_context_to_sid(sidtab, context, sid);
+}
+
+static int security_context_to_sid_core(struct selinux_state *state,
+ const char *scontext, u32 scontext_len,
u32 *sid, u32 def_sid, gfp_t gfp_flags,
int force)
{
+ struct policydb *policydb;
+ struct sidtab *sidtab;
char *scontext2, *str = NULL;
struct context context;
int rc = 0;
@@ -1421,7 +1507,7 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
if (!scontext2)
return -ENOMEM;
- if (!ss_initialized) {
+ if (!state->initialized) {
int i;
for (i = 1; i < SECINITSID_NUM; i++) {
@@ -1442,20 +1528,21 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
if (!str)
goto out;
}
-
- read_lock(&policy_rwlock);
- rc = string_to_context_struct(&policydb, &sidtab, scontext2,
- scontext_len, &context, def_sid);
+ read_lock(&state->ss->policy_rwlock);
+ policydb = &state->ss->policydb;
+ sidtab = state->ss->sidtab;
+ rc = string_to_context_struct(policydb, sidtab, scontext2,
+ &context, def_sid);
if (rc == -EINVAL && force) {
context.str = str;
context.len = strlen(str) + 1;
str = NULL;
} else if (rc)
goto out_unlock;
- rc = sidtab_context_to_sid(&sidtab, &context, sid);
+ rc = context_struct_to_sid(state, &context, sid);
context_destroy(&context);
out_unlock:
- read_unlock(&policy_rwlock);
+ read_unlock(&state->ss->policy_rwlock);
out:
kfree(scontext2);
kfree(str);
@@ -1474,16 +1561,19 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
* Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient
* memory is available, or 0 on success.
*/
-int security_context_to_sid(const char *scontext, u32 scontext_len, u32 *sid,
+int security_context_to_sid(struct selinux_state *state,
+ const char *scontext, u32 scontext_len, u32 *sid,
gfp_t gfp)
{
- return security_context_to_sid_core(scontext, scontext_len,
+ return security_context_to_sid_core(state, scontext, scontext_len,
sid, SECSID_NULL, gfp, 0);
}
-int security_context_str_to_sid(const char *scontext, u32 *sid, gfp_t gfp)
+int security_context_str_to_sid(struct selinux_state *state,
+ const char *scontext, u32 *sid, gfp_t gfp)
{
- return security_context_to_sid(scontext, strlen(scontext), sid, gfp);
+ return security_context_to_sid(state, scontext, strlen(scontext),
+ sid, gfp);
}
/**
@@ -1504,51 +1594,56 @@ int security_context_str_to_sid(const char *scontext, u32 *sid, gfp_t gfp)
* Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient
* memory is available, or 0 on success.
*/
-int security_context_to_sid_default(const char *scontext, u32 scontext_len,
+int security_context_to_sid_default(struct selinux_state *state,
+ const char *scontext, u32 scontext_len,
u32 *sid, u32 def_sid, gfp_t gfp_flags)
{
- return security_context_to_sid_core(scontext, scontext_len,
+ return security_context_to_sid_core(state, scontext, scontext_len,
sid, def_sid, gfp_flags, 1);
}
-int security_context_to_sid_force(const char *scontext, u32 scontext_len,
+int security_context_to_sid_force(struct selinux_state *state,
+ const char *scontext, u32 scontext_len,
u32 *sid)
{
- return security_context_to_sid_core(scontext, scontext_len,
+ return security_context_to_sid_core(state, scontext, scontext_len,
sid, SECSID_NULL, GFP_KERNEL, 1);
}
static int compute_sid_handle_invalid_context(
+ struct selinux_state *state,
struct context *scontext,
struct context *tcontext,
u16 tclass,
struct context *newcontext)
{
+ struct policydb *policydb = &state->ss->policydb;
char *s = NULL, *t = NULL, *n = NULL;
u32 slen, tlen, nlen;
- if (context_struct_to_string(scontext, &s, &slen))
+ if (context_struct_to_string(policydb, scontext, &s, &slen))
goto out;
- if (context_struct_to_string(tcontext, &t, &tlen))
+ if (context_struct_to_string(policydb, tcontext, &t, &tlen))
goto out;
- if (context_struct_to_string(newcontext, &n, &nlen))
+ if (context_struct_to_string(policydb, newcontext, &n, &nlen))
goto out;
audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR,
"op=security_compute_sid invalid_context=%s"
" scontext=%s"
" tcontext=%s"
" tclass=%s",
- n, s, t, sym_name(&policydb, SYM_CLASSES, tclass-1));
+ n, s, t, sym_name(policydb, SYM_CLASSES, tclass-1));
out:
kfree(s);
kfree(t);
kfree(n);
- if (!selinux_enforcing)
+ if (!enforcing_enabled(state))
return 0;
return -EACCES;
}
-static void filename_compute_type(struct policydb *p, struct context *newcontext,
+static void filename_compute_type(struct policydb *policydb,
+ struct context *newcontext,
u32 stype, u32 ttype, u16 tclass,
const char *objname)
{
@@ -1560,7 +1655,7 @@ static void filename_compute_type(struct policydb *p, struct context *newcontext
* like /dev or /var/run. This bitmap will quickly skip rule searches
* if the ttype does not contain any rules.
*/
- if (!ebitmap_get_bit(&p->filename_trans_ttypes, ttype))
+ if (!ebitmap_get_bit(&policydb->filename_trans_ttypes, ttype))
return;
ft.stype = stype;
@@ -1568,12 +1663,13 @@ static void filename_compute_type(struct policydb *p, struct context *newcontext
ft.tclass = tclass;
ft.name = objname;
- otype = hashtab_search(p->filename_trans, &ft);
+ otype = hashtab_search(policydb->filename_trans, &ft);
if (otype)
newcontext->type = otype->otype;
}
-static int security_compute_sid(u32 ssid,
+static int security_compute_sid(struct selinux_state *state,
+ u32 ssid,
u32 tsid,
u16 orig_tclass,
u32 specified,
@@ -1581,6 +1677,8 @@ static int security_compute_sid(u32 ssid,
u32 *out_sid,
bool kern)
{
+ struct policydb *policydb;
+ struct sidtab *sidtab;
struct class_datum *cladatum = NULL;
struct context *scontext = NULL, *tcontext = NULL, newcontext;
struct role_trans *roletr = NULL;
@@ -1591,7 +1689,7 @@ static int security_compute_sid(u32 ssid,
int rc = 0;
bool sock;
- if (!ss_initialized) {
+ if (!state->initialized) {
switch (orig_tclass) {
case SECCLASS_PROCESS: /* kernel value */
*out_sid = ssid;
@@ -1605,33 +1703,37 @@ static int security_compute_sid(u32 ssid,
context_init(&newcontext);
- read_lock(&policy_rwlock);
+ read_lock(&state->ss->policy_rwlock);
if (kern) {
- tclass = unmap_class(orig_tclass);
+ tclass = unmap_class(&state->ss->map, orig_tclass);
sock = security_is_socket_class(orig_tclass);
} else {
tclass = orig_tclass;
- sock = security_is_socket_class(map_class(tclass));
+ sock = security_is_socket_class(map_class(&state->ss->map,
+ tclass));
}
- scontext = sidtab_search(&sidtab, ssid);
+ policydb = &state->ss->policydb;
+ sidtab = state->ss->sidtab;
+
+ scontext = sidtab_search(sidtab, ssid);
if (!scontext) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, ssid);
rc = -EINVAL;
goto out_unlock;
}
- tcontext = sidtab_search(&sidtab, tsid);
+ tcontext = sidtab_search(sidtab, tsid);
if (!tcontext) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, tsid);
rc = -EINVAL;
goto out_unlock;
}
- if (tclass && tclass <= policydb.p_classes.nprim)
- cladatum = policydb.class_val_to_struct[tclass - 1];
+ if (tclass && tclass <= policydb->p_classes.nprim)
+ cladatum = policydb->class_val_to_struct[tclass - 1];
/* Set the user identity. */
switch (specified) {
@@ -1657,7 +1759,7 @@ static int security_compute_sid(u32 ssid,
} else if (cladatum && cladatum->default_role == DEFAULT_TARGET) {
newcontext.role = tcontext->role;
} else {
- if ((tclass == policydb.process_class) || (sock == true))
+ if ((tclass == policydb->process_class) || (sock == true))
newcontext.role = scontext->role;
else
newcontext.role = OBJECT_R_VAL;
@@ -1669,7 +1771,7 @@ static int security_compute_sid(u32 ssid,
} else if (cladatum && cladatum->default_type == DEFAULT_TARGET) {
newcontext.type = tcontext->type;
} else {
- if ((tclass == policydb.process_class) || (sock == true)) {
+ if ((tclass == policydb->process_class) || (sock == true)) {
/* Use the type of process. */
newcontext.type = scontext->type;
} else {
@@ -1683,11 +1785,11 @@ static int security_compute_sid(u32 ssid,
avkey.target_type = tcontext->type;
avkey.target_class = tclass;
avkey.specified = specified;
- avdatum = avtab_search(&policydb.te_avtab, &avkey);
+ avdatum = avtab_search(&policydb->te_avtab, &avkey);
/* If no permanent rule, also check for enabled conditional rules */
if (!avdatum) {
- node = avtab_search_node(&policydb.te_cond_avtab, &avkey);
+ node = avtab_search_node(&policydb->te_cond_avtab, &avkey);
for (; node; node = avtab_search_node_next(node, specified)) {
if (node->key.specified & AVTAB_ENABLED) {
avdatum = &node->datum;
@@ -1703,13 +1805,14 @@ static int security_compute_sid(u32 ssid,
/* if we have a objname this is a file trans check so check those rules */
if (objname)
- filename_compute_type(&policydb, &newcontext, scontext->type,
+ filename_compute_type(policydb, &newcontext, scontext->type,
tcontext->type, tclass, objname);
/* Check for class-specific changes. */
if (specified & AVTAB_TRANSITION) {
/* Look for a role transition rule. */
- for (roletr = policydb.role_tr; roletr; roletr = roletr->next) {
+ for (roletr = policydb->role_tr; roletr;
+ roletr = roletr->next) {
if ((roletr->role == scontext->role) &&
(roletr->type == tcontext->type) &&
(roletr->tclass == tclass)) {
@@ -1722,14 +1825,14 @@ static int security_compute_sid(u32 ssid,
/* Set the MLS attributes.
This is done last because it may allocate memory. */
- rc = mls_compute_sid(scontext, tcontext, tclass, specified,
+ rc = mls_compute_sid(policydb, scontext, tcontext, tclass, specified,
&newcontext, sock);
if (rc)
goto out_unlock;
/* Check the validity of the context. */
- if (!policydb_context_isvalid(&policydb, &newcontext)) {
- rc = compute_sid_handle_invalid_context(scontext,
+ if (!policydb_context_isvalid(policydb, &newcontext)) {
+ rc = compute_sid_handle_invalid_context(state, scontext,
tcontext,
tclass,
&newcontext);
@@ -1737,9 +1840,9 @@ static int security_compute_sid(u32 ssid,
goto out_unlock;
}
/* Obtain the sid for the context. */
- rc = sidtab_context_to_sid(&sidtab, &newcontext, out_sid);
+ rc = context_struct_to_sid(state, &newcontext, out_sid);
out_unlock:
- read_unlock(&policy_rwlock);
+ read_unlock(&state->ss->policy_rwlock);
context_destroy(&newcontext);
out:
return rc;
@@ -1758,17 +1861,21 @@ static int security_compute_sid(u32 ssid,
* if insufficient memory is available, or %0 if the new SID was
* computed successfully.
*/
-int security_transition_sid(u32 ssid, u32 tsid, u16 tclass,
+int security_transition_sid(struct selinux_state *state,
+ u32 ssid, u32 tsid, u16 tclass,
const struct qstr *qstr, u32 *out_sid)
{
- return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION,
+ return security_compute_sid(state, ssid, tsid, tclass,
+ AVTAB_TRANSITION,
qstr ? qstr->name : NULL, out_sid, true);
}
-int security_transition_sid_user(u32 ssid, u32 tsid, u16 tclass,
+int security_transition_sid_user(struct selinux_state *state,
+ u32 ssid, u32 tsid, u16 tclass,
const char *objname, u32 *out_sid)
{
- return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION,
+ return security_compute_sid(state, ssid, tsid, tclass,
+ AVTAB_TRANSITION,
objname, out_sid, false);
}
@@ -1785,12 +1892,14 @@ int security_transition_sid_user(u32 ssid, u32 tsid, u16 tclass,
* if insufficient memory is available, or %0 if the SID was
* computed successfully.
*/
-int security_member_sid(u32 ssid,
+int security_member_sid(struct selinux_state *state,
+ u32 ssid,
u32 tsid,
u16 tclass,
u32 *out_sid)
{
- return security_compute_sid(ssid, tsid, tclass, AVTAB_MEMBER, NULL,
+ return security_compute_sid(state, ssid, tsid, tclass,
+ AVTAB_MEMBER, NULL,
out_sid, false);
}
@@ -1807,145 +1916,130 @@ int security_member_sid(u32 ssid,
* if insufficient memory is available, or %0 if the SID was
* computed successfully.
*/
-int security_change_sid(u32 ssid,
+int security_change_sid(struct selinux_state *state,
+ u32 ssid,
u32 tsid,
u16 tclass,
u32 *out_sid)
{
- return security_compute_sid(ssid, tsid, tclass, AVTAB_CHANGE, NULL,
+ return security_compute_sid(state,
+ ssid, tsid, tclass, AVTAB_CHANGE, NULL,
out_sid, false);
}
-/* Clone the SID into the new SID table. */
-static int clone_sid(u32 sid,
- struct context *context,
- void *arg)
+static inline int convert_context_handle_invalid_context(
+ struct selinux_state *state,
+ struct context *context)
{
- struct sidtab *s = arg;
-
- if (sid > SECINITSID_NUM)
- return sidtab_insert(s, sid, context);
- else
- return 0;
-}
-
-static inline int convert_context_handle_invalid_context(struct context *context)
-{
+ struct policydb *policydb = &state->ss->policydb;
char *s;
u32 len;
- if (selinux_enforcing)
+ if (enforcing_enabled(state))
return -EINVAL;
- if (!context_struct_to_string(context, &s, &len)) {
- printk(KERN_WARNING "SELinux: Context %s would be invalid if enforcing\n", s);
+ if (!context_struct_to_string(policydb, context, &s, &len)) {
+ pr_warn("SELinux: Context %s would be invalid if enforcing\n",
+ s);
kfree(s);
}
return 0;
}
struct convert_context_args {
+ struct selinux_state *state;
struct policydb *oldp;
struct policydb *newp;
};
/*
* Convert the values in the security context
- * structure `c' from the values specified
+ * structure `oldc' from the values specified
* in the policy `p->oldp' to the values specified
- * in the policy `p->newp'. Verify that the
- * context is valid under the new policy.
+ * in the policy `p->newp', storing the new context
+ * in `newc'. Verify that the context is valid
+ * under the new policy.
*/
-static int convert_context(u32 key,
- struct context *c,
- void *p)
+static int convert_context(struct context *oldc, struct context *newc, void *p)
{
struct convert_context_args *args;
- struct context oldc;
struct ocontext *oc;
- struct mls_range *range;
struct role_datum *role;
struct type_datum *typdatum;
struct user_datum *usrdatum;
char *s;
u32 len;
- int rc = 0;
-
- if (key <= SECINITSID_NUM)
- goto out;
+ int rc;
args = p;
- if (c->str) {
- struct context ctx;
-
- rc = -ENOMEM;
- s = kstrdup(c->str, GFP_KERNEL);
+ if (oldc->str) {
+ s = kstrdup(oldc->str, GFP_KERNEL);
if (!s)
- goto out;
+ return -ENOMEM;
rc = string_to_context_struct(args->newp, NULL, s,
- c->len, &ctx, SECSID_NULL);
- kfree(s);
- if (!rc) {
- printk(KERN_INFO "SELinux: Context %s became valid (mapped).\n",
- c->str);
- /* Replace string with mapped representation. */
- kfree(c->str);
- memcpy(c, &ctx, sizeof(*c));
- goto out;
- } else if (rc == -EINVAL) {
- /* Retain string representation for later mapping. */
- rc = 0;
- goto out;
- } else {
- /* Other error condition, e.g. ENOMEM. */
- printk(KERN_ERR "SELinux: Unable to map context %s, rc = %d.\n",
- c->str, -rc);
- goto out;
+ newc, SECSID_NULL);
+ if (rc == -EINVAL) {
+ /*
+ * Retain string representation for later mapping.
+ *
+ * IMPORTANT: We need to copy the contents of oldc->str
+ * back into s again because string_to_context_struct()
+ * may have garbled it.
+ */
+ memcpy(s, oldc->str, oldc->len);
+ context_init(newc);
+ newc->str = s;
+ newc->len = oldc->len;
+ newc->hash = oldc->hash;
+ return 0;
}
+ kfree(s);
+ if (rc) {
+ /* Other error condition, e.g. ENOMEM. */
+ pr_err("SELinux: Unable to map context %s, rc = %d.\n",
+ oldc->str, -rc);
+ return rc;
+ }
+ pr_info("SELinux: Context %s became valid (mapped).\n",
+ oldc->str);
+ return 0;
}
- rc = context_cpy(&oldc, c);
- if (rc)
- goto out;
+ context_init(newc);
/* Convert the user. */
rc = -EINVAL;
usrdatum = hashtab_search(args->newp->p_users.table,
- sym_name(args->oldp, SYM_USERS, c->user - 1));
+ sym_name(args->oldp,
+ SYM_USERS, oldc->user - 1));
if (!usrdatum)
goto bad;
- c->user = usrdatum->value;
+ newc->user = usrdatum->value;
/* Convert the role. */
rc = -EINVAL;
role = hashtab_search(args->newp->p_roles.table,
- sym_name(args->oldp, SYM_ROLES, c->role - 1));
+ sym_name(args->oldp, SYM_ROLES, oldc->role - 1));
if (!role)
goto bad;
- c->role = role->value;
+ newc->role = role->value;
/* Convert the type. */
rc = -EINVAL;
typdatum = hashtab_search(args->newp->p_types.table,
- sym_name(args->oldp, SYM_TYPES, c->type - 1));
+ sym_name(args->oldp,
+ SYM_TYPES, oldc->type - 1));
if (!typdatum)
goto bad;
- c->type = typdatum->value;
+ newc->type = typdatum->value;
/* Convert the MLS fields if dealing with MLS policies */
if (args->oldp->mls_enabled && args->newp->mls_enabled) {
- rc = mls_convert_context(args->oldp, args->newp, c);
+ rc = mls_convert_context(args->oldp, args->newp, oldc, newc);
if (rc)
goto bad;
- } else if (args->oldp->mls_enabled && !args->newp->mls_enabled) {
- /*
- * Switching between MLS and non-MLS policy:
- * free any storage used by the MLS fields in the
- * context for all existing entries in the sidtab.
- */
- mls_context_destroy(c);
} else if (!args->oldp->mls_enabled && args->newp->mls_enabled) {
/*
* Switching between non-MLS and MLS policy:
@@ -1959,76 +2053,64 @@ static int convert_context(u32 key,
oc = oc->next;
rc = -EINVAL;
if (!oc) {
- printk(KERN_ERR "SELinux: unable to look up"
+ pr_err("SELinux: unable to look up"
" the initial SIDs list\n");
goto bad;
}
- range = &oc->context[0].range;
- rc = mls_range_set(c, range);
+ rc = mls_range_set(newc, &oc->context[0].range);
if (rc)
goto bad;
}
/* Check the validity of the new context. */
- if (!policydb_context_isvalid(args->newp, c)) {
- rc = convert_context_handle_invalid_context(&oldc);
+ if (!policydb_context_isvalid(args->newp, newc)) {
+ rc = convert_context_handle_invalid_context(args->state, oldc);
if (rc)
goto bad;
}
- context_destroy(&oldc);
+ rc = context_add_hash(args->newp, newc);
+ if (rc)
+ goto bad;
- rc = 0;
-out:
- return rc;
+ return 0;
bad:
/* Map old representation to string and save it. */
- rc = context_struct_to_string(&oldc, &s, &len);
+ rc = context_struct_to_string(args->oldp, oldc, &s, &len);
if (rc)
return rc;
- context_destroy(&oldc);
- context_destroy(c);
- c->str = s;
- c->len = len;
- printk(KERN_INFO "SELinux: Context %s became invalid (unmapped).\n",
- c->str);
- rc = 0;
- goto out;
+ context_destroy(newc);
+ newc->str = s;
+ newc->len = len;
+ newc->hash = context_compute_hash(s);
+ pr_info("SELinux: Context %s became invalid (unmapped).\n",
+ newc->str);
+ return 0;
}
-static void security_load_policycaps(void)
+static void security_load_policycaps(struct selinux_state *state)
{
+ struct policydb *p = &state->ss->policydb;
unsigned int i;
struct ebitmap_node *node;
- selinux_policycap_netpeer = ebitmap_get_bit(&policydb.policycaps,
- POLICYDB_CAPABILITY_NETPEER);
- selinux_policycap_openperm = ebitmap_get_bit(&policydb.policycaps,
- POLICYDB_CAPABILITY_OPENPERM);
- selinux_policycap_extsockclass = ebitmap_get_bit(&policydb.policycaps,
- POLICYDB_CAPABILITY_EXTSOCKCLASS);
- selinux_policycap_alwaysnetwork = ebitmap_get_bit(&policydb.policycaps,
- POLICYDB_CAPABILITY_ALWAYSNETWORK);
- selinux_policycap_cgroupseclabel =
- ebitmap_get_bit(&policydb.policycaps,
- POLICYDB_CAPABILITY_CGROUPSECLABEL);
- selinux_policycap_nnp_nosuid_transition =
- ebitmap_get_bit(&policydb.policycaps,
- POLICYDB_CAPABILITY_NNP_NOSUID_TRANSITION);
+ for (i = 0; i < ARRAY_SIZE(state->policycap); i++)
+ state->policycap[i] = ebitmap_get_bit(&p->policycaps, i);
for (i = 0; i < ARRAY_SIZE(selinux_policycap_names); i++)
pr_info("SELinux: policy capability %s=%d\n",
selinux_policycap_names[i],
- ebitmap_get_bit(&policydb.policycaps, i));
+ ebitmap_get_bit(&p->policycaps, i));
- ebitmap_for_each_positive_bit(&policydb.policycaps, node, i) {
+ ebitmap_for_each_positive_bit(&p->policycaps, node, i) {
if (i >= ARRAY_SIZE(selinux_policycap_names))
pr_info("SELinux: unknown policy capability %u\n",
i);
}
}
-static int security_preserve_bools(struct policydb *p);
+static int security_preserve_bools(struct selinux_state *state,
+ struct policydb *newpolicydb);
/**
* security_load_policy - Load a security policy configuration.
@@ -2040,14 +2122,16 @@ static int security_preserve_bools(struct policydb *p);
* This function will flush the access vector cache after
* loading the new policy.
*/
-int security_load_policy(void *data, size_t len)
+int security_load_policy(struct selinux_state *state, void *data, size_t len)
{
+ struct policydb *policydb;
+ struct sidtab *oldsidtab, *newsidtab;
struct policydb *oldpolicydb, *newpolicydb;
- struct sidtab oldsidtab, newsidtab;
- struct selinux_mapping *oldmap, *map = NULL;
+ struct selinux_mapping *oldmapping;
+ struct selinux_map newmap;
+ struct sidtab_convert_params convert_params;
struct convert_context_args args;
u32 seqno;
- u16 map_size;
int rc = 0;
struct policy_file file = { data, len }, *fp = &file;
@@ -2058,123 +2142,126 @@ int security_load_policy(void *data, size_t len)
}
newpolicydb = oldpolicydb + 1;
- if (!ss_initialized) {
- avtab_cache_init();
- ebitmap_cache_init();
- rc = policydb_read(&policydb, fp);
+ policydb = &state->ss->policydb;
+
+ newsidtab = kmalloc(sizeof(*newsidtab), GFP_KERNEL);
+ if (!newsidtab) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (!state->initialized) {
+ rc = policydb_read(policydb, fp);
if (rc) {
- avtab_cache_destroy();
- ebitmap_cache_destroy();
+ kfree(newsidtab);
goto out;
}
- policydb.len = len;
- rc = selinux_set_mapping(&policydb, secclass_map,
- ¤t_mapping,
- ¤t_mapping_size);
+ policydb->len = len;
+ rc = selinux_set_mapping(policydb, secclass_map,
+ &state->ss->map);
if (rc) {
- policydb_destroy(&policydb);
- avtab_cache_destroy();
- ebitmap_cache_destroy();
+ kfree(newsidtab);
+ policydb_destroy(policydb);
goto out;
}
- rc = policydb_load_isids(&policydb, &sidtab);
+ rc = policydb_load_isids(policydb, newsidtab);
if (rc) {
- policydb_destroy(&policydb);
- avtab_cache_destroy();
- ebitmap_cache_destroy();
+ kfree(newsidtab);
+ policydb_destroy(policydb);
goto out;
}
- security_load_policycaps();
- ss_initialized = 1;
- seqno = ++latest_granting;
+ state->ss->sidtab = newsidtab;
+ security_load_policycaps(state);
+ state->initialized = 1;
+ seqno = ++state->ss->latest_granting;
selinux_complete_init();
- avc_ss_reset(seqno);
+ avc_ss_reset(state->avc, seqno);
selnl_notify_policyload(seqno);
- selinux_status_update_policyload(seqno);
+ selinux_status_update_policyload(state, seqno);
selinux_netlbl_cache_invalidate();
selinux_xfrm_notify_policyload();
goto out;
}
-#if 0
- sidtab_hash_eval(&sidtab, "sids");
-#endif
-
rc = policydb_read(newpolicydb, fp);
- if (rc)
+ if (rc) {
+ kfree(newsidtab);
goto out;
+ }
newpolicydb->len = len;
/* If switching between different policy types, log MLS status */
- if (policydb.mls_enabled && !newpolicydb->mls_enabled)
- printk(KERN_INFO "SELinux: Disabling MLS support...\n");
- else if (!policydb.mls_enabled && newpolicydb->mls_enabled)
- printk(KERN_INFO "SELinux: Enabling MLS support...\n");
+ if (policydb->mls_enabled && !newpolicydb->mls_enabled)
+ pr_info("SELinux: Disabling MLS support...\n");
+ else if (!policydb->mls_enabled && newpolicydb->mls_enabled)
+ pr_info("SELinux: Enabling MLS support...\n");
- rc = policydb_load_isids(newpolicydb, &newsidtab);
+ rc = policydb_load_isids(newpolicydb, newsidtab);
if (rc) {
- printk(KERN_ERR "SELinux: unable to load the initial SIDs\n");
+ pr_err("SELinux: unable to load the initial SIDs\n");
policydb_destroy(newpolicydb);
+ kfree(newsidtab);
goto out;
}
- rc = selinux_set_mapping(newpolicydb, secclass_map, &map, &map_size);
+ rc = selinux_set_mapping(newpolicydb, secclass_map, &newmap);
if (rc)
goto err;
- rc = security_preserve_bools(newpolicydb);
+ rc = security_preserve_bools(state, newpolicydb);
if (rc) {
- printk(KERN_ERR "SELinux: unable to preserve booleans\n");
+ pr_err("SELinux: unable to preserve booleans\n");
goto err;
}
- /* Clone the SID table. */
- sidtab_shutdown(&sidtab);
-
- rc = sidtab_map(&sidtab, clone_sid, &newsidtab);
- if (rc)
- goto err;
+ oldsidtab = state->ss->sidtab;
/*
* Convert the internal representations of contexts
* in the new SID table.
*/
- args.oldp = &policydb;
+ args.state = state;
+ args.oldp = policydb;
args.newp = newpolicydb;
- rc = sidtab_map(&newsidtab, convert_context, &args);
+
+ convert_params.func = convert_context;
+ convert_params.args = &args;
+ convert_params.target = newsidtab;
+
+ rc = sidtab_convert(oldsidtab, &convert_params);
if (rc) {
- printk(KERN_ERR "SELinux: unable to convert the internal"
+ pr_err("SELinux: unable to convert the internal"
" representation of contexts in the new SID"
" table\n");
goto err;
}
/* Save the old policydb and SID table to free later. */
- memcpy(oldpolicydb, &policydb, sizeof(policydb));
- sidtab_set(&oldsidtab, &sidtab);
+ memcpy(oldpolicydb, policydb, sizeof(*policydb));
/* Install the new policydb and SID table. */
- write_lock_irq(&policy_rwlock);
- memcpy(&policydb, newpolicydb, sizeof(policydb));
- sidtab_set(&sidtab, &newsidtab);
- security_load_policycaps();
- oldmap = current_mapping;
- current_mapping = map;
- current_mapping_size = map_size;
- seqno = ++latest_granting;
- write_unlock_irq(&policy_rwlock);
+ write_lock_irq(&state->ss->policy_rwlock);
+ memcpy(policydb, newpolicydb, sizeof(*policydb));
+ state->ss->sidtab = newsidtab;
+ security_load_policycaps(state);
+ oldmapping = state->ss->map.mapping;
+ state->ss->map.mapping = newmap.mapping;
+ state->ss->map.size = newmap.size;
+ seqno = ++state->ss->latest_granting;
+ write_unlock_irq(&state->ss->policy_rwlock);
/* Free the old policydb and SID table. */
policydb_destroy(oldpolicydb);
- sidtab_destroy(&oldsidtab);
- kfree(oldmap);
+ sidtab_destroy(oldsidtab);
+ kfree(oldsidtab);
+ kfree(oldmapping);
- avc_ss_reset(seqno);
+ avc_ss_reset(state->avc, seqno);
selnl_notify_policyload(seqno);
- selinux_status_update_policyload(seqno);
+ selinux_status_update_policyload(state, seqno);
selinux_netlbl_cache_invalidate();
selinux_xfrm_notify_policyload();
@@ -2182,8 +2269,9 @@ int security_load_policy(void *data, size_t len)
goto out;
err:
- kfree(map);
- sidtab_destroy(&newsidtab);
+ kfree(newmap.mapping);
+ sidtab_destroy(newsidtab);
+ kfree(newsidtab);
policydb_destroy(newpolicydb);
out:
@@ -2191,13 +2279,14 @@ int security_load_policy(void *data, size_t len)
return rc;
}
-size_t security_policydb_len(void)
+size_t security_policydb_len(struct selinux_state *state)
{
+ struct policydb *p = &state->ss->policydb;
size_t len;
- read_lock(&policy_rwlock);
- len = policydb.len;
- read_unlock(&policy_rwlock);
+ read_lock(&state->ss->policy_rwlock);
+ len = p->len;
+ read_unlock(&state->ss->policy_rwlock);
return len;
}
@@ -2208,14 +2297,20 @@ size_t security_policydb_len(void)
* @port: port number
* @out_sid: security identifier
*/
-int security_port_sid(u8 protocol, u16 port, u32 *out_sid)
+int security_port_sid(struct selinux_state *state,
+ u8 protocol, u16 port, u32 *out_sid)
{
+ struct policydb *policydb;
+ struct sidtab *sidtab;
struct ocontext *c;
int rc = 0;
- read_lock(&policy_rwlock);
+ read_lock(&state->ss->policy_rwlock);
- c = policydb.ocontexts[OCON_PORT];
+ policydb = &state->ss->policydb;
+ sidtab = state->ss->sidtab;
+
+ c = policydb->ocontexts[OCON_PORT];
while (c) {
if (c->u.port.protocol == protocol &&
c->u.port.low_port <= port &&
@@ -2226,8 +2321,7 @@ int security_port_sid(u8 protocol, u16 port, u32 *out_sid)
if (c) {
if (!c->sid[0]) {
- rc = sidtab_context_to_sid(&sidtab,
- &c->context[0],
+ rc = context_struct_to_sid(state, &c->context[0],
&c->sid[0]);
if (rc)
goto out;
@@ -2238,7 +2332,7 @@ int security_port_sid(u8 protocol, u16 port, u32 *out_sid)
}
out:
- read_unlock(&policy_rwlock);
+ read_unlock(&state->ss->policy_rwlock);
return rc;
}
@@ -2248,14 +2342,18 @@ int security_port_sid(u8 protocol, u16 port, u32 *out_sid)
* @pkey_num: pkey number
* @out_sid: security identifier
*/
-int security_ib_pkey_sid(u64 subnet_prefix, u16 pkey_num, u32 *out_sid)
+int security_ib_pkey_sid(struct selinux_state *state,
+ u64 subnet_prefix, u16 pkey_num, u32 *out_sid)
{
+ struct policydb *policydb;
struct ocontext *c;
int rc = 0;
- read_lock(&policy_rwlock);
+ read_lock(&state->ss->policy_rwlock);
- c = policydb.ocontexts[OCON_IBPKEY];
+ policydb = &state->ss->policydb;
+
+ c = policydb->ocontexts[OCON_IBPKEY];
while (c) {
if (c->u.ibpkey.low_pkey <= pkey_num &&
c->u.ibpkey.high_pkey >= pkey_num &&
@@ -2267,7 +2365,7 @@ int security_ib_pkey_sid(u64 subnet_prefix, u16 pkey_num, u32 *out_sid)
if (c) {
if (!c->sid[0]) {
- rc = sidtab_context_to_sid(&sidtab,
+ rc = context_struct_to_sid(state,
&c->context[0],
&c->sid[0]);
if (rc)
@@ -2278,7 +2376,7 @@ int security_ib_pkey_sid(u64 subnet_prefix, u16 pkey_num, u32 *out_sid)
*out_sid = SECINITSID_UNLABELED;
out:
- read_unlock(&policy_rwlock);
+ read_unlock(&state->ss->policy_rwlock);
return rc;
}
@@ -2288,14 +2386,20 @@ int security_ib_pkey_sid(u64 subnet_prefix, u16 pkey_num, u32 *out_sid)
* @port: port number
* @out_sid: security identifier
*/
-int security_ib_endport_sid(const char *dev_name, u8 port_num, u32 *out_sid)
+int security_ib_endport_sid(struct selinux_state *state,
+ const char *dev_name, u8 port_num, u32 *out_sid)
{
+ struct policydb *policydb;
+ struct sidtab *sidtab;
struct ocontext *c;
int rc = 0;
- read_lock(&policy_rwlock);
+ read_lock(&state->ss->policy_rwlock);
- c = policydb.ocontexts[OCON_IBENDPORT];
+ policydb = &state->ss->policydb;
+ sidtab = state->ss->sidtab;
+
+ c = policydb->ocontexts[OCON_IBENDPORT];
while (c) {
if (c->u.ibendport.port == port_num &&
!strncmp(c->u.ibendport.dev_name,
@@ -2308,8 +2412,7 @@ int security_ib_endport_sid(const char *dev_name, u8 port_num, u32 *out_sid)
if (c) {
if (!c->sid[0]) {
- rc = sidtab_context_to_sid(&sidtab,
- &c->context[0],
+ rc = context_struct_to_sid(state, &c->context[0],
&c->sid[0]);
if (rc)
goto out;
@@ -2319,7 +2422,7 @@ int security_ib_endport_sid(const char *dev_name, u8 port_num, u32 *out_sid)
*out_sid = SECINITSID_UNLABELED;
out:
- read_unlock(&policy_rwlock);
+ read_unlock(&state->ss->policy_rwlock);
return rc;
}
@@ -2328,14 +2431,20 @@ int security_ib_endport_sid(const char *dev_name, u8 port_num, u32 *out_sid)
* @name: interface name
* @if_sid: interface SID
*/
-int security_netif_sid(char *name, u32 *if_sid)
+int security_netif_sid(struct selinux_state *state,
+ char *name, u32 *if_sid)
{
+ struct policydb *policydb;
+ struct sidtab *sidtab;
int rc = 0;
struct ocontext *c;
- read_lock(&policy_rwlock);
+ read_lock(&state->ss->policy_rwlock);
- c = policydb.ocontexts[OCON_NETIF];
+ policydb = &state->ss->policydb;
+ sidtab = state->ss->sidtab;
+
+ c = policydb->ocontexts[OCON_NETIF];
while (c) {
if (strcmp(name, c->u.name) == 0)
break;
@@ -2344,13 +2453,11 @@ int security_netif_sid(char *name, u32 *if_sid)
if (c) {
if (!c->sid[0] || !c->sid[1]) {
- rc = sidtab_context_to_sid(&sidtab,
- &c->context[0],
- &c->sid[0]);
+ rc = context_struct_to_sid(state, &c->context[0],
+ &c->sid[0]);
if (rc)
goto out;
- rc = sidtab_context_to_sid(&sidtab,
- &c->context[1],
+ rc = context_struct_to_sid(state, &c->context[1],
&c->sid[1]);
if (rc)
goto out;
@@ -2360,7 +2467,7 @@ int security_netif_sid(char *name, u32 *if_sid)
*if_sid = SECINITSID_NETIF;
out:
- read_unlock(&policy_rwlock);
+ read_unlock(&state->ss->policy_rwlock);
return rc;
}
@@ -2384,15 +2491,19 @@ static int match_ipv6_addrmask(u32 *input, u32 *addr, u32 *mask)
* @addrlen: address length in bytes
* @out_sid: security identifier
*/
-int security_node_sid(u16 domain,
+int security_node_sid(struct selinux_state *state,
+ u16 domain,
void *addrp,
u32 addrlen,
u32 *out_sid)
{
+ struct policydb *policydb;
int rc;
struct ocontext *c;
- read_lock(&policy_rwlock);
+ read_lock(&state->ss->policy_rwlock);
+
+ policydb = &state->ss->policydb;
switch (domain) {
case AF_INET: {
@@ -2404,7 +2515,7 @@ int security_node_sid(u16 domain,
addr = *((u32 *)addrp);
- c = policydb.ocontexts[OCON_NODE];
+ c = policydb->ocontexts[OCON_NODE];
while (c) {
if (c->u.node.addr == (addr & c->u.node.mask))
break;
@@ -2417,7 +2528,7 @@ int security_node_sid(u16 domain,
rc = -EINVAL;
if (addrlen != sizeof(u64) * 2)
goto out;
- c = policydb.ocontexts[OCON_NODE6];
+ c = policydb->ocontexts[OCON_NODE6];
while (c) {
if (match_ipv6_addrmask(addrp, c->u.node6.addr,
c->u.node6.mask))
@@ -2434,7 +2545,7 @@ int security_node_sid(u16 domain,
if (c) {
if (!c->sid[0]) {
- rc = sidtab_context_to_sid(&sidtab,
+ rc = context_struct_to_sid(state,
&c->context[0],
&c->sid[0]);
if (rc)
@@ -2447,7 +2558,7 @@ int security_node_sid(u16 domain,
rc = 0;
out:
- read_unlock(&policy_rwlock);
+ read_unlock(&state->ss->policy_rwlock);
return rc;
}
@@ -2467,11 +2578,14 @@ int security_node_sid(u16 domain,
* number of elements in the array.
*/
-int security_get_user_sids(u32 fromsid,
+int security_get_user_sids(struct selinux_state *state,
+ u32 fromsid,
char *username,
u32 **sids,
u32 *nel)
{
+ struct policydb *policydb;
+ struct sidtab *sidtab;
struct context *fromcon, usercon;
u32 *mysids = NULL, *mysids2, sid;
u32 mynel = 0, maxnel = SIDS_NEL;
@@ -2483,20 +2597,23 @@ int security_get_user_sids(u32 fromsid,
*sids = NULL;
*nel = 0;
- if (!ss_initialized)
+ if (!state->initialized)
goto out;
- read_lock(&policy_rwlock);
+ read_lock(&state->ss->policy_rwlock);
+
+ policydb = &state->ss->policydb;
+ sidtab = state->ss->sidtab;
context_init(&usercon);
rc = -EINVAL;
- fromcon = sidtab_search(&sidtab, fromsid);
+ fromcon = sidtab_search(sidtab, fromsid);
if (!fromcon)
goto out_unlock;
rc = -EINVAL;
- user = hashtab_search(policydb.p_users.table, username);
+ user = hashtab_search(policydb->p_users.table, username);
if (!user)
goto out_unlock;
@@ -2508,15 +2625,21 @@ int security_get_user_sids(u32 fromsid,
goto out_unlock;
ebitmap_for_each_positive_bit(&user->roles, rnode, i) {
- role = policydb.role_val_to_struct[i];
+ role = policydb->role_val_to_struct[i];
usercon.role = i + 1;
ebitmap_for_each_positive_bit(&role->types, tnode, j) {
usercon.type = j + 1;
+ /*
+ * The same context struct is reused here so the hash
+ * must be reset.
+ */
+ usercon.hash = 0;
- if (mls_setup_user_range(fromcon, user, &usercon))
+ if (mls_setup_user_range(policydb, fromcon, user,
+ &usercon))
continue;
- rc = sidtab_context_to_sid(&sidtab, &usercon, &sid);
+ rc = context_struct_to_sid(state, &usercon, &sid);
if (rc)
goto out_unlock;
if (mynel < maxnel) {
@@ -2536,7 +2659,7 @@ int security_get_user_sids(u32 fromsid,
}
rc = 0;
out_unlock:
- read_unlock(&policy_rwlock);
+ read_unlock(&state->ss->policy_rwlock);
if (rc || !mynel) {
kfree(mysids);
goto out;
@@ -2550,7 +2673,8 @@ int security_get_user_sids(u32 fromsid,
}
for (i = 0, j = 0; i < mynel; i++) {
struct av_decision dummy_avd;
- rc = avc_has_perm_noaudit(fromsid, mysids[i],
+ rc = avc_has_perm_noaudit(state,
+ fromsid, mysids[i],
SECCLASS_PROCESS, /* kernel value */
PROCESS__TRANSITION, AVC_STRICT,
&dummy_avd);
@@ -2579,11 +2703,13 @@ int security_get_user_sids(u32 fromsid,
*
* The caller must acquire the policy_rwlock before calling this function.
*/
-static inline int __security_genfs_sid(const char *fstype,
+static inline int __security_genfs_sid(struct selinux_state *state,
+ const char *fstype,
char *path,
u16 orig_sclass,
u32 *sid)
{
+ struct policydb *policydb = &state->ss->policydb;
int len;
u16 sclass;
struct genfs *genfs;
@@ -2593,10 +2719,10 @@ static inline int __security_genfs_sid(const char *fstype,
while (path[0] == '/' && path[1] == '/')
path++;
- sclass = unmap_class(orig_sclass);
+ sclass = unmap_class(&state->ss->map, orig_sclass);
*sid = SECINITSID_UNLABELED;
- for (genfs = policydb.genfs; genfs; genfs = genfs->next) {
+ for (genfs = policydb->genfs; genfs; genfs = genfs->next) {
cmp = strcmp(fstype, genfs->fstype);
if (cmp <= 0)
break;
@@ -2618,7 +2744,7 @@ static inline int __security_genfs_sid(const char *fstype,
goto out;
if (!c->sid[0]) {
- rc = sidtab_context_to_sid(&sidtab, &c->context[0], &c->sid[0]);
+ rc = context_struct_to_sid(state, &c->context[0], &c->sid[0]);
if (rc)
goto out;
}
@@ -2639,16 +2765,17 @@ static inline int __security_genfs_sid(const char *fstype,
* Acquire policy_rwlock before calling __security_genfs_sid() and release
* it afterward.
*/
-int security_genfs_sid(const char *fstype,
+int security_genfs_sid(struct selinux_state *state,
+ const char *fstype,
char *path,
u16 orig_sclass,
u32 *sid)
{
int retval;
- read_lock(&policy_rwlock);
- retval = __security_genfs_sid(fstype, path, orig_sclass, sid);
- read_unlock(&policy_rwlock);
+ read_lock(&state->ss->policy_rwlock);
+ retval = __security_genfs_sid(state, fstype, path, orig_sclass, sid);
+ read_unlock(&state->ss->policy_rwlock);
return retval;
}
@@ -2656,16 +2783,21 @@ int security_genfs_sid(const char *fstype,
* security_fs_use - Determine how to handle labeling for a filesystem.
* @sb: superblock in question
*/
-int security_fs_use(struct super_block *sb)
+int security_fs_use(struct selinux_state *state, struct super_block *sb)
{
+ struct policydb *policydb;
+ struct sidtab *sidtab;
int rc = 0;
struct ocontext *c;
struct superblock_security_struct *sbsec = sb->s_security;
const char *fstype = sb->s_type->name;
- read_lock(&policy_rwlock);
+ read_lock(&state->ss->policy_rwlock);
- c = policydb.ocontexts[OCON_FSUSE];
+ policydb = &state->ss->policydb;
+ sidtab = state->ss->sidtab;
+
+ c = policydb->ocontexts[OCON_FSUSE];
while (c) {
if (strcmp(fstype, c->u.name) == 0)
break;
@@ -2675,14 +2807,14 @@ int security_fs_use(struct super_block *sb)
if (c) {
sbsec->behavior = c->v.behavior;
if (!c->sid[0]) {
- rc = sidtab_context_to_sid(&sidtab, &c->context[0],
+ rc = context_struct_to_sid(state, &c->context[0],
&c->sid[0]);
if (rc)
goto out;
}
sbsec->sid = c->sid[0];
} else {
- rc = __security_genfs_sid(fstype, "/", SECCLASS_DIR,
+ rc = __security_genfs_sid(state, fstype, "/", SECCLASS_DIR,
&sbsec->sid);
if (rc) {
sbsec->behavior = SECURITY_FS_USE_NONE;
@@ -2693,20 +2825,25 @@ int security_fs_use(struct super_block *sb)
}
out:
- read_unlock(&policy_rwlock);
+ read_unlock(&state->ss->policy_rwlock);
return rc;
}
-int security_get_bools(int *len, char ***names, int **values)
+int security_get_bools(struct selinux_state *state,
+ int *len, char ***names, int **values)
{
+ struct policydb *policydb;
int i, rc;
- read_lock(&policy_rwlock);
+ read_lock(&state->ss->policy_rwlock);
+
+ policydb = &state->ss->policydb;
+
*names = NULL;
*values = NULL;
rc = 0;
- *len = policydb.p_bools.nprim;
+ *len = policydb->p_bools.nprim;
if (!*len)
goto out;
@@ -2721,16 +2858,17 @@ int security_get_bools(int *len, char ***names, int **values)
goto err;
for (i = 0; i < *len; i++) {
- (*values)[i] = policydb.bool_val_to_struct[i]->state;
+ (*values)[i] = policydb->bool_val_to_struct[i]->state;
rc = -ENOMEM;
- (*names)[i] = kstrdup(sym_name(&policydb, SYM_BOOLS, i), GFP_ATOMIC);
+ (*names)[i] = kstrdup(sym_name(policydb, SYM_BOOLS, i),
+ GFP_ATOMIC);
if (!(*names)[i])
goto err;
}
rc = 0;
out:
- read_unlock(&policy_rwlock);
+ read_unlock(&state->ss->policy_rwlock);
return rc;
err:
if (*names) {
@@ -2742,90 +2880,98 @@ int security_get_bools(int *len, char ***names, int **values)
}
-int security_set_bools(int len, int *values)
+int security_set_bools(struct selinux_state *state, int len, int *values)
{
+ struct policydb *policydb;
int i, rc;
int lenp, seqno = 0;
struct cond_node *cur;
- write_lock_irq(&policy_rwlock);
+ write_lock_irq(&state->ss->policy_rwlock);
+
+ policydb = &state->ss->policydb;
rc = -EFAULT;
- lenp = policydb.p_bools.nprim;
+ lenp = policydb->p_bools.nprim;
if (len != lenp)
goto out;
for (i = 0; i < len; i++) {
- if (!!values[i] != policydb.bool_val_to_struct[i]->state) {
+ if (!!values[i] != policydb->bool_val_to_struct[i]->state) {
audit_log(current->audit_context, GFP_ATOMIC,
AUDIT_MAC_CONFIG_CHANGE,
"bool=%s val=%d old_val=%d auid=%u ses=%u",
- sym_name(&policydb, SYM_BOOLS, i),
+ sym_name(policydb, SYM_BOOLS, i),
!!values[i],
- policydb.bool_val_to_struct[i]->state,
+ policydb->bool_val_to_struct[i]->state,
from_kuid(&init_user_ns, audit_get_loginuid(current)),
audit_get_sessionid(current));
}
if (values[i])
- policydb.bool_val_to_struct[i]->state = 1;
+ policydb->bool_val_to_struct[i]->state = 1;
else
- policydb.bool_val_to_struct[i]->state = 0;
+ policydb->bool_val_to_struct[i]->state = 0;
}
- for (cur = policydb.cond_list; cur; cur = cur->next) {
- rc = evaluate_cond_node(&policydb, cur);
+ for (cur = policydb->cond_list; cur; cur = cur->next) {
+ rc = evaluate_cond_node(policydb, cur);
if (rc)
goto out;
}
- seqno = ++latest_granting;
+ seqno = ++state->ss->latest_granting;
rc = 0;
out:
- write_unlock_irq(&policy_rwlock);
+ write_unlock_irq(&state->ss->policy_rwlock);
if (!rc) {
- avc_ss_reset(seqno);
+ avc_ss_reset(state->avc, seqno);
selnl_notify_policyload(seqno);
- selinux_status_update_policyload(seqno);
+ selinux_status_update_policyload(state, seqno);
selinux_xfrm_notify_policyload();
}
return rc;
}
-int security_get_bool_value(int index)
+int security_get_bool_value(struct selinux_state *state,
+ int index)
{
+ struct policydb *policydb;
int rc;
int len;
- read_lock(&policy_rwlock);
+ read_lock(&state->ss->policy_rwlock);
+
+ policydb = &state->ss->policydb;
rc = -EFAULT;
- len = policydb.p_bools.nprim;
+ len = policydb->p_bools.nprim;
if (index >= len)
goto out;
- rc = policydb.bool_val_to_struct[index]->state;
+ rc = policydb->bool_val_to_struct[index]->state;
out:
- read_unlock(&policy_rwlock);
+ read_unlock(&state->ss->policy_rwlock);
return rc;
}
-static int security_preserve_bools(struct policydb *p)
+static int security_preserve_bools(struct selinux_state *state,
+ struct policydb *policydb)
{
int rc, nbools = 0, *bvalues = NULL, i;
char **bnames = NULL;
struct cond_bool_datum *booldatum;
struct cond_node *cur;
- rc = security_get_bools(&nbools, &bnames, &bvalues);
+ rc = security_get_bools(state, &nbools, &bnames, &bvalues);
if (rc)
goto out;
for (i = 0; i < nbools; i++) {
- booldatum = hashtab_search(p->p_bools.table, bnames[i]);
+ booldatum = hashtab_search(policydb->p_bools.table, bnames[i]);
if (booldatum)
booldatum->state = bvalues[i];
}
- for (cur = p->cond_list; cur; cur = cur->next) {
- rc = evaluate_cond_node(p, cur);
+ for (cur = policydb->cond_list; cur; cur = cur->next) {
+ rc = evaluate_cond_node(policydb, cur);
if (rc)
goto out;
}
@@ -2844,8 +2990,11 @@ static int security_preserve_bools(struct policydb *p)
* security_sid_mls_copy() - computes a new sid based on the given
* sid and the mls portion of mls_sid.
*/
-int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid)
+int security_sid_mls_copy(struct selinux_state *state,
+ u32 sid, u32 mls_sid, u32 *new_sid)
{
+ struct policydb *policydb = &state->ss->policydb;
+ struct sidtab *sidtab = state->ss->sidtab;
struct context *context1;
struct context *context2;
struct context newcon;
@@ -2854,27 +3003,27 @@ int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid)
int rc;
rc = 0;
- if (!ss_initialized || !policydb.mls_enabled) {
+ if (!state->initialized || !policydb->mls_enabled) {
*new_sid = sid;
goto out;
}
context_init(&newcon);
- read_lock(&policy_rwlock);
+ read_lock(&state->ss->policy_rwlock);
rc = -EINVAL;
- context1 = sidtab_search(&sidtab, sid);
+ context1 = sidtab_search(sidtab, sid);
if (!context1) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, sid);
goto out_unlock;
}
rc = -EINVAL;
- context2 = sidtab_search(&sidtab, mls_sid);
+ context2 = sidtab_search(sidtab, mls_sid);
if (!context2) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, mls_sid);
goto out_unlock;
}
@@ -2887,10 +3036,11 @@ int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid)
goto out_unlock;
/* Check the validity of the new context. */
- if (!policydb_context_isvalid(&policydb, &newcon)) {
- rc = convert_context_handle_invalid_context(&newcon);
+ if (!policydb_context_isvalid(policydb, &newcon)) {
+ rc = convert_context_handle_invalid_context(state, &newcon);
if (rc) {
- if (!context_struct_to_string(&newcon, &s, &len)) {
+ if (!context_struct_to_string(policydb, &newcon, &s,
+ &len)) {
audit_log(current->audit_context,
GFP_ATOMIC, AUDIT_SELINUX_ERR,
"op=security_sid_mls_copy "
@@ -2900,10 +3050,9 @@ int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid)
goto out_unlock;
}
}
-
- rc = sidtab_context_to_sid(&sidtab, &newcon, new_sid);
+ rc = context_struct_to_sid(state, &newcon, new_sid);
out_unlock:
- read_unlock(&policy_rwlock);
+ read_unlock(&state->ss->policy_rwlock);
context_destroy(&newcon);
out:
return rc;
@@ -2929,10 +3078,13 @@ int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid)
* multiple, inconsistent labels | -<errno> | SECSID_NULL
*
*/
-int security_net_peersid_resolve(u32 nlbl_sid, u32 nlbl_type,
+int security_net_peersid_resolve(struct selinux_state *state,
+ u32 nlbl_sid, u32 nlbl_type,
u32 xfrm_sid,
u32 *peer_sid)
{
+ struct policydb *policydb = &state->ss->policydb;
+ struct sidtab *sidtab = state->ss->sidtab;
int rc;
struct context *nlbl_ctx;
struct context *xfrm_ctx;
@@ -2954,25 +3106,27 @@ int security_net_peersid_resolve(u32 nlbl_sid, u32 nlbl_type,
return 0;
}
- /* we don't need to check ss_initialized here since the only way both
+ /*
+ * We don't need to check initialized here since the only way both
* nlbl_sid and xfrm_sid are not equal to SECSID_NULL would be if the
- * security server was initialized and ss_initialized was true */
- if (!policydb.mls_enabled)
+ * security server was initialized and state->initialized was true.
+ */
+ if (!policydb->mls_enabled)
return 0;
- read_lock(&policy_rwlock);
+ read_lock(&state->ss->policy_rwlock);
rc = -EINVAL;
- nlbl_ctx = sidtab_search(&sidtab, nlbl_sid);
+ nlbl_ctx = sidtab_search(sidtab, nlbl_sid);
if (!nlbl_ctx) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, nlbl_sid);
goto out;
}
rc = -EINVAL;
- xfrm_ctx = sidtab_search(&sidtab, xfrm_sid);
+ xfrm_ctx = sidtab_search(sidtab, xfrm_sid);
if (!xfrm_ctx) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, xfrm_sid);
goto out;
}
@@ -2987,7 +3141,7 @@ int security_net_peersid_resolve(u32 nlbl_sid, u32 nlbl_type,
* expressive */
*peer_sid = xfrm_sid;
out:
- read_unlock(&policy_rwlock);
+ read_unlock(&state->ss->policy_rwlock);
return rc;
}
@@ -3004,19 +3158,21 @@ static int get_classes_callback(void *k, void *d, void *args)
return 0;
}
-int security_get_classes(char ***classes, int *nclasses)
+int security_get_classes(struct selinux_state *state,
+ char ***classes, int *nclasses)
{
+ struct policydb *policydb = &state->ss->policydb;
int rc;
- read_lock(&policy_rwlock);
+ read_lock(&state->ss->policy_rwlock);
rc = -ENOMEM;
- *nclasses = policydb.p_classes.nprim;
+ *nclasses = policydb->p_classes.nprim;
*classes = kcalloc(*nclasses, sizeof(**classes), GFP_ATOMIC);
if (!*classes)
goto out;
- rc = hashtab_map(policydb.p_classes.table, get_classes_callback,
+ rc = hashtab_map(policydb->p_classes.table, get_classes_callback,
*classes);
if (rc) {
int i;
@@ -3026,7 +3182,7 @@ int security_get_classes(char ***classes, int *nclasses)
}
out:
- read_unlock(&policy_rwlock);
+ read_unlock(&state->ss->policy_rwlock);
return rc;
}
@@ -3043,17 +3199,19 @@ static int get_permissions_callback(void *k, void *d, void *args)
return 0;
}
-int security_get_permissions(char *class, char ***perms, int *nperms)
+int security_get_permissions(struct selinux_state *state,
+ char *class, char ***perms, int *nperms)
{
+ struct policydb *policydb = &state->ss->policydb;
int rc, i;
struct class_datum *match;
- read_lock(&policy_rwlock);
+ read_lock(&state->ss->policy_rwlock);
rc = -EINVAL;
- match = hashtab_search(policydb.p_classes.table, class);
+ match = hashtab_search(policydb->p_classes.table, class);
if (!match) {
- printk(KERN_ERR "SELinux: %s: unrecognized class %s\n",
+ pr_err("SELinux: %s: unrecognized class %s\n",
__func__, class);
goto out;
}
@@ -3077,25 +3235,25 @@ int security_get_permissions(char *class, char ***perms, int *nperms)
goto err;
out:
- read_unlock(&policy_rwlock);
+ read_unlock(&state->ss->policy_rwlock);
return rc;
err:
- read_unlock(&policy_rwlock);
+ read_unlock(&state->ss->policy_rwlock);
for (i = 0; i < *nperms; i++)
kfree((*perms)[i]);
kfree(*perms);
return rc;
}
-int security_get_reject_unknown(void)
+int security_get_reject_unknown(struct selinux_state *state)
{
- return policydb.reject_unknown;
+ return state->ss->policydb.reject_unknown;
}
-int security_get_allow_unknown(void)
+int security_get_allow_unknown(struct selinux_state *state)
{
- return policydb.allow_unknown;
+ return state->ss->policydb.allow_unknown;
}
/**
@@ -3108,13 +3266,15 @@ int security_get_allow_unknown(void)
* supported, false (0) if it isn't supported.
*
*/
-int security_policycap_supported(unsigned int req_cap)
+int security_policycap_supported(struct selinux_state *state,
+ unsigned int req_cap)
{
+ struct policydb *policydb = &state->ss->policydb;
int rc;
- read_lock(&policy_rwlock);
- rc = ebitmap_get_bit(&policydb.policycaps, req_cap);
- read_unlock(&policy_rwlock);
+ read_lock(&state->ss->policy_rwlock);
+ rc = ebitmap_get_bit(&policydb->policycaps, req_cap);
+ read_unlock(&state->ss->policy_rwlock);
return rc;
}
@@ -3136,6 +3296,8 @@ void selinux_audit_rule_free(void *vrule)
int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
{
+ struct selinux_state *state = &selinux_state;
+ struct policydb *policydb = &state->ss->policydb;
struct selinux_audit_rule *tmprule;
struct role_datum *roledatum;
struct type_datum *typedatum;
@@ -3145,7 +3307,7 @@ int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
*rule = NULL;
- if (!ss_initialized)
+ if (!state->initialized)
return -EOPNOTSUPP;
switch (field) {
@@ -3178,15 +3340,15 @@ int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
context_init(&tmprule->au_ctxt);
- read_lock(&policy_rwlock);
+ read_lock(&state->ss->policy_rwlock);
- tmprule->au_seqno = latest_granting;
+ tmprule->au_seqno = state->ss->latest_granting;
switch (field) {
case AUDIT_SUBJ_USER:
case AUDIT_OBJ_USER:
rc = -EINVAL;
- userdatum = hashtab_search(policydb.p_users.table, rulestr);
+ userdatum = hashtab_search(policydb->p_users.table, rulestr);
if (!userdatum)
goto out;
tmprule->au_ctxt.user = userdatum->value;
@@ -3194,7 +3356,7 @@ int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
case AUDIT_SUBJ_ROLE:
case AUDIT_OBJ_ROLE:
rc = -EINVAL;
- roledatum = hashtab_search(policydb.p_roles.table, rulestr);
+ roledatum = hashtab_search(policydb->p_roles.table, rulestr);
if (!roledatum)
goto out;
tmprule->au_ctxt.role = roledatum->value;
@@ -3202,7 +3364,7 @@ int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
case AUDIT_SUBJ_TYPE:
case AUDIT_OBJ_TYPE:
rc = -EINVAL;
- typedatum = hashtab_search(policydb.p_types.table, rulestr);
+ typedatum = hashtab_search(policydb->p_types.table, rulestr);
if (!typedatum)
goto out;
tmprule->au_ctxt.type = typedatum->value;
@@ -3211,14 +3373,15 @@ int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
case AUDIT_SUBJ_CLR:
case AUDIT_OBJ_LEV_LOW:
case AUDIT_OBJ_LEV_HIGH:
- rc = mls_from_string(rulestr, &tmprule->au_ctxt, GFP_ATOMIC);
+ rc = mls_from_string(policydb, rulestr, &tmprule->au_ctxt,
+ GFP_ATOMIC);
if (rc)
goto out;
break;
}
rc = 0;
out:
- read_unlock(&policy_rwlock);
+ read_unlock(&state->ss->policy_rwlock);
if (rc) {
selinux_audit_rule_free(tmprule);
@@ -3258,6 +3421,7 @@ int selinux_audit_rule_known(struct audit_krule *rule)
int selinux_audit_rule_match(u32 sid, u32 field, u32 op, void *vrule,
struct audit_context *actx)
{
+ struct selinux_state *state = &selinux_state;
struct context *ctxt;
struct mls_level *level;
struct selinux_audit_rule *rule = vrule;
@@ -3268,14 +3432,14 @@ int selinux_audit_rule_match(u32 sid, u32 field, u32 op, void *vrule,
return -ENOENT;
}
- read_lock(&policy_rwlock);
+ read_lock(&state->ss->policy_rwlock);
- if (rule->au_seqno < latest_granting) {
+ if (rule->au_seqno < state->ss->latest_granting) {
match = -ESTALE;
goto out;
}
- ctxt = sidtab_search(&sidtab, sid);
+ ctxt = sidtab_search(state->ss->sidtab, sid);
if (unlikely(!ctxt)) {
WARN_ONCE(1, "selinux_audit_rule_match: unrecognized SID %d\n",
sid);
@@ -3359,7 +3523,7 @@ int selinux_audit_rule_match(u32 sid, u32 field, u32 op, void *vrule,
}
out:
- read_unlock(&policy_rwlock);
+ read_unlock(&state->ss->policy_rwlock);
return match;
}
@@ -3433,19 +3597,22 @@ static void security_netlbl_cache_add(struct netlbl_lsm_secattr *secattr,
* failure.
*
*/
-int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr,
+int security_netlbl_secattr_to_sid(struct selinux_state *state,
+ struct netlbl_lsm_secattr *secattr,
u32 *sid)
{
+ struct policydb *policydb = &state->ss->policydb;
+ struct sidtab *sidtab = state->ss->sidtab;
int rc;
struct context *ctx;
struct context ctx_new;
- if (!ss_initialized) {
+ if (!state->initialized) {
*sid = SECSID_NULL;
return 0;
}
- read_lock(&policy_rwlock);
+ read_lock(&state->ss->policy_rwlock);
if (secattr->flags & NETLBL_SECATTR_CACHE)
*sid = *(u32 *)secattr->cache->data;
@@ -3453,7 +3620,7 @@ int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr,
*sid = secattr->attr.secid;
else if (secattr->flags & NETLBL_SECATTR_MLS_LVL) {
rc = -EIDRM;
- ctx = sidtab_search(&sidtab, SECINITSID_NETMSG);
+ ctx = sidtab_search(sidtab, SECINITSID_NETMSG);
if (ctx == NULL)
goto out;
@@ -3461,17 +3628,17 @@ int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr,
ctx_new.user = ctx->user;
ctx_new.role = ctx->role;
ctx_new.type = ctx->type;
- mls_import_netlbl_lvl(&ctx_new, secattr);
+ mls_import_netlbl_lvl(policydb, &ctx_new, secattr);
if (secattr->flags & NETLBL_SECATTR_MLS_CAT) {
- rc = mls_import_netlbl_cat(&ctx_new, secattr);
+ rc = mls_import_netlbl_cat(policydb, &ctx_new, secattr);
if (rc)
goto out;
}
rc = -EIDRM;
- if (!mls_context_isvalid(&policydb, &ctx_new))
+ if (!mls_context_isvalid(policydb, &ctx_new))
goto out_free;
- rc = sidtab_context_to_sid(&sidtab, &ctx_new, sid);
+ rc = context_struct_to_sid(state, &ctx_new, sid);
if (rc)
goto out_free;
@@ -3481,12 +3648,12 @@ int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr,
} else
*sid = SECSID_NULL;
- read_unlock(&policy_rwlock);
+ read_unlock(&state->ss->policy_rwlock);
return 0;
out_free:
ebitmap_destroy(&ctx_new.range.level[0].cat);
out:
- read_unlock(&policy_rwlock);
+ read_unlock(&state->ss->policy_rwlock);
return rc;
}
@@ -3500,33 +3667,35 @@ int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr,
* Returns zero on success, negative values on failure.
*
*/
-int security_netlbl_sid_to_secattr(u32 sid, struct netlbl_lsm_secattr *secattr)
+int security_netlbl_sid_to_secattr(struct selinux_state *state,
+ u32 sid, struct netlbl_lsm_secattr *secattr)
{
+ struct policydb *policydb = &state->ss->policydb;
int rc;
struct context *ctx;
- if (!ss_initialized)
+ if (!state->initialized)
return 0;
- read_lock(&policy_rwlock);
+ read_lock(&state->ss->policy_rwlock);
rc = -ENOENT;
- ctx = sidtab_search(&sidtab, sid);
+ ctx = sidtab_search(state->ss->sidtab, sid);
if (ctx == NULL)
goto out;
rc = -ENOMEM;
- secattr->domain = kstrdup(sym_name(&policydb, SYM_TYPES, ctx->type - 1),
+ secattr->domain = kstrdup(sym_name(policydb, SYM_TYPES, ctx->type - 1),
GFP_ATOMIC);
if (secattr->domain == NULL)
goto out;
secattr->attr.secid = sid;
secattr->flags |= NETLBL_SECATTR_DOMAIN_CPY | NETLBL_SECATTR_SECID;
- mls_export_netlbl_lvl(ctx, secattr);
- rc = mls_export_netlbl_cat(ctx, secattr);
+ mls_export_netlbl_lvl(policydb, ctx, secattr);
+ rc = mls_export_netlbl_cat(policydb, ctx, secattr);
out:
- read_unlock(&policy_rwlock);
+ read_unlock(&state->ss->policy_rwlock);
return rc;
}
#endif /* CONFIG_NETLABEL */
@@ -3537,15 +3706,17 @@ int security_netlbl_sid_to_secattr(u32 sid, struct netlbl_lsm_secattr *secattr)
* @len: length of data in bytes
*
*/
-int security_read_policy(void **data, size_t *len)
+int security_read_policy(struct selinux_state *state,
+ void **data, size_t *len)
{
+ struct policydb *policydb = &state->ss->policydb;
int rc;
struct policy_file fp;
- if (!ss_initialized)
+ if (!state->initialized)
return -EINVAL;
- *len = security_policydb_len();
+ *len = security_policydb_len(state);
*data = vmalloc_user(*len);
if (!*data)
@@ -3554,9 +3725,9 @@ int security_read_policy(void **data, size_t *len)
fp.data = *data;
fp.len = *len;
- read_lock(&policy_rwlock);
- rc = policydb_write(&policydb, &fp);
- read_unlock(&policy_rwlock);
+ read_lock(&state->ss->policy_rwlock);
+ rc = policydb_write(policydb, &fp);
+ read_unlock(&state->ss->policy_rwlock);
if (rc)
return rc;
diff --git a/security/selinux/ss/services.h b/security/selinux/ss/services.h
index 356bdd3..fc40640 100644
--- a/security/selinux/ss/services.h
+++ b/security/selinux/ss/services.h
@@ -8,9 +8,30 @@
#define _SS_SERVICES_H_
#include "policydb.h"
-#include "sidtab.h"
+#include "context.h"
-extern struct policydb policydb;
+/* Mapping for a single class */
+struct selinux_mapping {
+ u16 value; /* policy value for class */
+ unsigned int num_perms; /* number of permissions in class */
+ u32 perms[sizeof(u32) * 8]; /* policy values for permissions */
+};
+
+/* Map for all of the classes, with array size */
+struct selinux_map {
+ struct selinux_mapping *mapping; /* indexed by class */
+ u16 size; /* array size of mapping */
+};
+
+struct selinux_ss {
+ struct sidtab *sidtab;
+ struct policydb policydb;
+ rwlock_t policy_rwlock;
+ u32 latest_granting;
+ struct selinux_map map;
+ struct page *status_page;
+ struct mutex status_lock;
+};
void services_compute_xperms_drivers(struct extended_perms *xperms,
struct avtab_node *node);
@@ -18,5 +39,6 @@ void services_compute_xperms_drivers(struct extended_perms *xperms,
void services_compute_xperms_decision(struct extended_perms_decision *xpermd,
struct avtab_node *node);
-#endif /* _SS_SERVICES_H_ */
+int context_add_hash(struct policydb *policydb, struct context *context);
+#endif /* _SS_SERVICES_H_ */
diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c
index 5be31b7..d9d8599 100644
--- a/security/selinux/ss/sidtab.c
+++ b/security/selinux/ss/sidtab.c
@@ -2,108 +2,226 @@
/*
* Implementation of the SID table type.
*
- * Author : Stephen Smalley, <sds@tycho.nsa.gov>
+ * Original author: Stephen Smalley, <sds@tycho.nsa.gov>
+ * Author: Ondrej Mosnacek, <omosnacek@gmail.com>
+ *
+ * Copyright (C) 2018 Red Hat, Inc.
*/
+#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/sched.h>
#include <linux/spinlock.h>
-#include <linux/errno.h>
+#include <asm/barrier.h>
#include "flask.h"
#include "security.h"
#include "sidtab.h"
-#define SIDTAB_HASH(sid) \
-(sid & SIDTAB_HASH_MASK)
+#define index_to_sid(index) (index + SECINITSID_NUM + 1)
+#define sid_to_index(sid) (sid - (SECINITSID_NUM + 1))
int sidtab_init(struct sidtab *s)
{
- int i;
+ u32 i;
- s->htable = kmalloc_array(SIDTAB_SIZE, sizeof(*s->htable), GFP_ATOMIC);
- if (!s->htable)
- return -ENOMEM;
- for (i = 0; i < SIDTAB_SIZE; i++)
- s->htable[i] = NULL;
- s->nel = 0;
- s->next_sid = 1;
- s->shutdown = 0;
+ memset(s->roots, 0, sizeof(s->roots));
+
+ for (i = 0; i < SECINITSID_NUM; i++)
+ s->isids[i].set = 0;
+
+ s->count = 0;
+ s->convert = NULL;
+ hash_init(s->context_to_sid);
+
spin_lock_init(&s->lock);
return 0;
}
-int sidtab_insert(struct sidtab *s, u32 sid, struct context *context)
+static u32 context_to_sid(struct sidtab *s, struct context *context)
{
- int hvalue;
- struct sidtab_node *prev, *cur, *newnode;
+ struct sidtab_entry_leaf *entry;
+ u32 sid = 0;
- if (!s)
- return -ENOMEM;
+ rcu_read_lock();
+ hash_for_each_possible_rcu(s->context_to_sid, entry, list,
+ context->hash) {
+ if (context_cmp(&entry->context, context)) {
+ sid = entry->sid;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return sid;
+}
- hvalue = SIDTAB_HASH(sid);
- prev = NULL;
- cur = s->htable[hvalue];
- while (cur && sid > cur->sid) {
- prev = cur;
- cur = cur->next;
+int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context)
+{
+ struct sidtab_isid_entry *entry;
+ int rc;
+
+ if (sid == 0 || sid > SECINITSID_NUM)
+ return -EINVAL;
+
+ entry = &s->isids[sid - 1];
+
+ rc = context_cpy(&entry->leaf.context, context);
+ if (rc)
+ return rc;
+
+ entry->set = 1;
+
+ /*
+ * Multiple initial sids may map to the same context. Check that this
+ * context is not already represented in the context_to_sid hashtable
+ * to avoid duplicate entries and long linked lists upon hash
+ * collision.
+ */
+ if (!context_to_sid(s, context)) {
+ entry->leaf.sid = sid;
+ hash_add(s->context_to_sid, &entry->leaf.list, context->hash);
}
- if (cur && sid == cur->sid)
- return -EEXIST;
-
- newnode = kmalloc(sizeof(*newnode), GFP_ATOMIC);
- if (!newnode)
- return -ENOMEM;
-
- newnode->sid = sid;
- if (context_cpy(&newnode->context, context)) {
- kfree(newnode);
- return -ENOMEM;
- }
-
- if (prev) {
- newnode->next = prev->next;
- wmb();
- prev->next = newnode;
- } else {
- newnode->next = s->htable[hvalue];
- wmb();
- s->htable[hvalue] = newnode;
- }
-
- s->nel++;
- if (sid >= s->next_sid)
- s->next_sid = sid + 1;
return 0;
}
+int sidtab_hash_stats(struct sidtab *sidtab, char *page)
+{
+ int i;
+ int chain_len = 0;
+ int slots_used = 0;
+ int entries = 0;
+ int max_chain_len = 0;
+ int cur_bucket = 0;
+ struct sidtab_entry_leaf *entry;
+
+ rcu_read_lock();
+ hash_for_each_rcu(sidtab->context_to_sid, i, entry, list) {
+ entries++;
+ if (i == cur_bucket) {
+ chain_len++;
+ if (chain_len == 1)
+ slots_used++;
+ } else {
+ cur_bucket = i;
+ if (chain_len > max_chain_len)
+ max_chain_len = chain_len;
+ chain_len = 0;
+ }
+ }
+ rcu_read_unlock();
+
+ if (chain_len > max_chain_len)
+ max_chain_len = chain_len;
+
+ return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
+ "longest chain: %d\n", entries,
+ slots_used, SIDTAB_HASH_BUCKETS, max_chain_len);
+}
+
+static u32 sidtab_level_from_count(u32 count)
+{
+ u32 capacity = SIDTAB_LEAF_ENTRIES;
+ u32 level = 0;
+
+ while (count > capacity) {
+ capacity <<= SIDTAB_INNER_SHIFT;
+ ++level;
+ }
+ return level;
+}
+
+static int sidtab_alloc_roots(struct sidtab *s, u32 level)
+{
+ u32 l;
+
+ if (!s->roots[0].ptr_leaf) {
+ s->roots[0].ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
+ GFP_ATOMIC);
+ if (!s->roots[0].ptr_leaf)
+ return -ENOMEM;
+ }
+ for (l = 1; l <= level; ++l)
+ if (!s->roots[l].ptr_inner) {
+ s->roots[l].ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
+ GFP_ATOMIC);
+ if (!s->roots[l].ptr_inner)
+ return -ENOMEM;
+ s->roots[l].ptr_inner->entries[0] = s->roots[l - 1];
+ }
+ return 0;
+}
+
+static struct sidtab_entry_leaf *sidtab_do_lookup(struct sidtab *s, u32 index,
+ int alloc)
+{
+ union sidtab_entry_inner *entry;
+ u32 level, capacity_shift, leaf_index = index / SIDTAB_LEAF_ENTRIES;
+
+ /* find the level of the subtree we need */
+ level = sidtab_level_from_count(index + 1);
+ capacity_shift = level * SIDTAB_INNER_SHIFT;
+
+ /* allocate roots if needed */
+ if (alloc && sidtab_alloc_roots(s, level) != 0)
+ return NULL;
+
+ /* lookup inside the subtree */
+ entry = &s->roots[level];
+ while (level != 0) {
+ capacity_shift -= SIDTAB_INNER_SHIFT;
+ --level;
+
+ entry = &entry->ptr_inner->entries[leaf_index >> capacity_shift];
+ leaf_index &= ((u32)1 << capacity_shift) - 1;
+
+ if (!entry->ptr_inner) {
+ if (alloc)
+ entry->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
+ GFP_ATOMIC);
+ if (!entry->ptr_inner)
+ return NULL;
+ }
+ }
+ if (!entry->ptr_leaf) {
+ if (alloc)
+ entry->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
+ GFP_ATOMIC);
+ if (!entry->ptr_leaf)
+ return NULL;
+ }
+ return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES];
+}
+
+static struct context *sidtab_lookup(struct sidtab *s, u32 index)
+{
+ /* read entries only after reading count */
+ u32 count = smp_load_acquire(&s->count);
+
+ if (index >= count)
+ return NULL;
+
+ return &sidtab_do_lookup(s, index, 0)->context;
+}
+
+static struct context *sidtab_lookup_initial(struct sidtab *s, u32 sid)
+{
+ return s->isids[sid - 1].set ? &s->isids[sid - 1].leaf.context : NULL;
+}
+
static struct context *sidtab_search_core(struct sidtab *s, u32 sid, int force)
{
- int hvalue;
- struct sidtab_node *cur;
+ struct context *context;
- if (!s)
- return NULL;
-
- hvalue = SIDTAB_HASH(sid);
- cur = s->htable[hvalue];
- while (cur && sid > cur->sid)
- cur = cur->next;
-
- if (force && cur && sid == cur->sid && cur->context.len)
- return &cur->context;
-
- if (!cur || sid != cur->sid || cur->context.len) {
- /* Remap invalid SIDs to the unlabeled SID. */
- sid = SECINITSID_UNLABELED;
- hvalue = SIDTAB_HASH(sid);
- cur = s->htable[hvalue];
- while (cur && sid > cur->sid)
- cur = cur->next;
- if (!cur || sid != cur->sid)
- return NULL;
+ if (sid != 0) {
+ if (sid > SECINITSID_NUM)
+ context = sidtab_lookup(s, sid_to_index(sid));
+ else
+ context = sidtab_lookup_initial(s, sid);
+ if (context && (!context->len || force))
+ return context;
}
- return &cur->context;
+ return sidtab_lookup_initial(s, SECINITSID_UNLABELED);
}
struct context *sidtab_search(struct sidtab *s, u32 sid)
@@ -116,192 +234,252 @@ struct context *sidtab_search_force(struct sidtab *s, u32 sid)
return sidtab_search_core(s, sid, 1);
}
-int sidtab_map(struct sidtab *s,
- int (*apply) (u32 sid,
- struct context *context,
- void *args),
- void *args)
+int sidtab_context_to_sid(struct sidtab *s, struct context *context,
+ u32 *sid)
{
- int i, rc = 0;
- struct sidtab_node *cur;
+ unsigned long flags;
+ u32 count;
+ struct sidtab_convert_params *convert;
+ struct sidtab_entry_leaf *dst, *dst_convert;
+ int rc;
- if (!s)
- goto out;
+ *sid = context_to_sid(s, context);
+ if (*sid)
+ return 0;
- for (i = 0; i < SIDTAB_SIZE; i++) {
- cur = s->htable[i];
- while (cur) {
- rc = apply(cur->sid, &cur->context, args);
- if (rc)
- goto out;
- cur = cur->next;
+ /* lock-free search failed: lock, re-search, and insert if not found */
+ spin_lock_irqsave(&s->lock, flags);
+
+ rc = 0;
+ *sid = context_to_sid(s, context);
+ if (*sid)
+ goto out_unlock;
+
+ /* read entries only after reading count */
+ count = smp_load_acquire(&s->count);
+ convert = s->convert;
+
+ /* bail out if we already reached max entries */
+ rc = -EOVERFLOW;
+ if (count >= SIDTAB_MAX)
+ goto out_unlock;
+
+ /* insert context into new entry */
+ rc = -ENOMEM;
+ dst = sidtab_do_lookup(s, count, 1);
+ if (!dst)
+ goto out_unlock;
+
+ dst->sid = index_to_sid(count);
+
+ rc = context_cpy(&dst->context, context);
+ if (rc)
+ goto out_unlock;
+
+ /*
+ * if we are building a new sidtab, we need to convert the context
+ * and insert it there as well
+ */
+ if (convert) {
+ rc = -ENOMEM;
+ dst_convert = sidtab_do_lookup(convert->target, count, 1);
+ if (!dst_convert) {
+ context_destroy(&dst->context);
+ goto out_unlock;
}
+
+ rc = convert->func(context, &dst_convert->context,
+ convert->args);
+ if (rc) {
+ context_destroy(&dst->context);
+ goto out_unlock;
+ }
+ dst_convert->sid = index_to_sid(count);
+ convert->target->count = count + 1;
+
+ hash_add_rcu(convert->target->context_to_sid,
+ &dst_convert->list, dst_convert->context.hash);
}
-out:
+
+ if (context->len)
+ pr_info("SELinux: Context %s is not valid (left unmapped).\n",
+ context->str);
+
+ *sid = index_to_sid(count);
+
+ /* write entries before updating count */
+ smp_store_release(&s->count, count + 1);
+ hash_add_rcu(s->context_to_sid, &dst->list, dst->context.hash);
+
+ rc = 0;
+out_unlock:
+ spin_unlock_irqrestore(&s->lock, flags);
return rc;
}
-static void sidtab_update_cache(struct sidtab *s, struct sidtab_node *n, int loc)
+static void sidtab_convert_hashtable(struct sidtab *s, u32 count)
{
- BUG_ON(loc >= SIDTAB_CACHE_LEN);
+ struct sidtab_entry_leaf *entry;
+ u32 i;
- while (loc > 0) {
- s->cache[loc] = s->cache[loc - 1];
- loc--;
+ for (i = 0; i < count; i++) {
+ entry = sidtab_do_lookup(s, i, 0);
+ entry->sid = index_to_sid(i);
+
+ hash_add_rcu(s->context_to_sid, &entry->list,
+ entry->context.hash);
+
}
- s->cache[0] = n;
}
-static inline u32 sidtab_search_context(struct sidtab *s,
- struct context *context)
+static int sidtab_convert_tree(union sidtab_entry_inner *edst,
+ union sidtab_entry_inner *esrc,
+ u32 *pos, u32 count, u32 level,
+ struct sidtab_convert_params *convert)
{
- int i;
- struct sidtab_node *cur;
+ int rc;
+ u32 i;
- for (i = 0; i < SIDTAB_SIZE; i++) {
- cur = s->htable[i];
- while (cur) {
- if (context_cmp(&cur->context, context)) {
- sidtab_update_cache(s, cur, SIDTAB_CACHE_LEN - 1);
- return cur->sid;
- }
- cur = cur->next;
+ if (level != 0) {
+ if (!edst->ptr_inner) {
+ edst->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
+ GFP_KERNEL);
+ if (!edst->ptr_inner)
+ return -ENOMEM;
}
+ i = 0;
+ while (i < SIDTAB_INNER_ENTRIES && *pos < count) {
+ rc = sidtab_convert_tree(&edst->ptr_inner->entries[i],
+ &esrc->ptr_inner->entries[i],
+ pos, count, level - 1,
+ convert);
+ if (rc)
+ return rc;
+ i++;
+ }
+ } else {
+ if (!edst->ptr_leaf) {
+ edst->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
+ GFP_KERNEL);
+ if (!edst->ptr_leaf)
+ return -ENOMEM;
+ }
+ i = 0;
+ while (i < SIDTAB_LEAF_ENTRIES && *pos < count) {
+ rc = convert->func(&esrc->ptr_leaf->entries[i].context,
+ &edst->ptr_leaf->entries[i].context,
+ convert->args);
+ if (rc)
+ return rc;
+ (*pos)++;
+ i++;
+ }
+ cond_resched();
}
+
return 0;
}
-static inline u32 sidtab_search_cache(struct sidtab *s, struct context *context)
+int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
{
- int i;
- struct sidtab_node *node;
-
- for (i = 0; i < SIDTAB_CACHE_LEN; i++) {
- node = s->cache[i];
- if (unlikely(!node))
- return 0;
- if (context_cmp(&node->context, context)) {
- sidtab_update_cache(s, node, i);
- return node->sid;
- }
- }
- return 0;
-}
-
-int sidtab_context_to_sid(struct sidtab *s,
- struct context *context,
- u32 *out_sid)
-{
- u32 sid;
- int ret = 0;
unsigned long flags;
+ u32 count, level, pos;
+ int rc;
- *out_sid = SECSID_NULL;
+ spin_lock_irqsave(&s->lock, flags);
- sid = sidtab_search_cache(s, context);
- if (!sid)
- sid = sidtab_search_context(s, context);
- if (!sid) {
- spin_lock_irqsave(&s->lock, flags);
- /* Rescan now that we hold the lock. */
- sid = sidtab_search_context(s, context);
- if (sid)
- goto unlock_out;
- /* No SID exists for the context. Allocate a new one. */
- if (s->next_sid == UINT_MAX || s->shutdown) {
- ret = -ENOMEM;
- goto unlock_out;
- }
- sid = s->next_sid++;
- if (context->len)
- printk(KERN_INFO
- "SELinux: Context %s is not valid (left unmapped).\n",
- context->str);
- ret = sidtab_insert(s, sid, context);
- if (ret)
- s->next_sid--;
-unlock_out:
+ /* concurrent policy loads are not allowed */
+ if (s->convert) {
spin_unlock_irqrestore(&s->lock, flags);
+ return -EBUSY;
}
- if (ret)
- return ret;
+ count = s->count;
+ level = sidtab_level_from_count(count);
- *out_sid = sid;
+ /* allocate last leaf in the new sidtab (to avoid race with
+ * live convert)
+ */
+ rc = sidtab_do_lookup(params->target, count - 1, 1) ? 0 : -ENOMEM;
+ if (rc) {
+ spin_unlock_irqrestore(&s->lock, flags);
+ return rc;
+ }
+
+ /* set count in case no new entries are added during conversion */
+ params->target->count = count;
+
+ /* enable live convert of new entries */
+ s->convert = params;
+
+ /* we can safely convert the tree outside the lock */
+ spin_unlock_irqrestore(&s->lock, flags);
+
+ pr_info("SELinux: Converting %u SID table entries...\n", count);
+
+ /* convert all entries not covered by live convert */
+ pos = 0;
+ rc = sidtab_convert_tree(¶ms->target->roots[level],
+ &s->roots[level], &pos, count, level, params);
+ if (rc) {
+ /* we need to keep the old table - disable live convert */
+ spin_lock_irqsave(&s->lock, flags);
+ s->convert = NULL;
+ spin_unlock_irqrestore(&s->lock, flags);
+ return rc;
+ }
+ /*
+ * The hashtable can also be modified in sidtab_context_to_sid()
+ * so we must re-acquire the lock here.
+ */
+ spin_lock_irqsave(&s->lock, flags);
+ sidtab_convert_hashtable(params->target, count);
+ spin_unlock_irqrestore(&s->lock, flags);
+
return 0;
}
-void sidtab_hash_eval(struct sidtab *h, char *tag)
+static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level)
{
- int i, chain_len, slots_used, max_chain_len;
- struct sidtab_node *cur;
+ u32 i;
- slots_used = 0;
- max_chain_len = 0;
- for (i = 0; i < SIDTAB_SIZE; i++) {
- cur = h->htable[i];
- if (cur) {
- slots_used++;
- chain_len = 0;
- while (cur) {
- chain_len++;
- cur = cur->next;
- }
+ if (level != 0) {
+ struct sidtab_node_inner *node = entry.ptr_inner;
- if (chain_len > max_chain_len)
- max_chain_len = chain_len;
- }
+ if (!node)
+ return;
+
+ for (i = 0; i < SIDTAB_INNER_ENTRIES; i++)
+ sidtab_destroy_tree(node->entries[i], level - 1);
+ kfree(node);
+ } else {
+ struct sidtab_node_leaf *node = entry.ptr_leaf;
+
+ if (!node)
+ return;
+
+ for (i = 0; i < SIDTAB_LEAF_ENTRIES; i++)
+ context_destroy(&node->entries[i].context);
+ kfree(node);
}
-
- printk(KERN_DEBUG "%s: %d entries and %d/%d buckets used, longest "
- "chain length %d\n", tag, h->nel, slots_used, SIDTAB_SIZE,
- max_chain_len);
}
void sidtab_destroy(struct sidtab *s)
{
- int i;
- struct sidtab_node *cur, *temp;
+ u32 i, level;
- if (!s)
- return;
+ for (i = 0; i < SECINITSID_NUM; i++)
+ if (s->isids[i].set)
+ context_destroy(&s->isids[i].leaf.context);
- for (i = 0; i < SIDTAB_SIZE; i++) {
- cur = s->htable[i];
- while (cur) {
- temp = cur;
- cur = cur->next;
- context_destroy(&temp->context);
- kfree(temp);
- }
- s->htable[i] = NULL;
- }
- kfree(s->htable);
- s->htable = NULL;
- s->nel = 0;
- s->next_sid = 1;
-}
+ level = SIDTAB_MAX_LEVEL;
+ while (level && !s->roots[level].ptr_inner)
+ --level;
-void sidtab_set(struct sidtab *dst, struct sidtab *src)
-{
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&src->lock, flags);
- dst->htable = src->htable;
- dst->nel = src->nel;
- dst->next_sid = src->next_sid;
- dst->shutdown = 0;
- for (i = 0; i < SIDTAB_CACHE_LEN; i++)
- dst->cache[i] = NULL;
- spin_unlock_irqrestore(&src->lock, flags);
-}
-
-void sidtab_shutdown(struct sidtab *s)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&s->lock, flags);
- s->shutdown = 1;
- spin_unlock_irqrestore(&s->lock, flags);
+ sidtab_destroy_tree(s->roots[level], level);
+ /*
+ * The context_to_sid hashtable's objects are all shared
+ * with the isids array and context tree, and so don't need
+ * to be cleaned up here.
+ */
}
diff --git a/security/selinux/ss/sidtab.h b/security/selinux/ss/sidtab.h
index a1a1d26..8564ec0 100644
--- a/security/selinux/ss/sidtab.h
+++ b/security/selinux/ss/sidtab.h
@@ -1,56 +1,112 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * A security identifier table (sidtab) is a hash table
+ * A security identifier table (sidtab) is a lookup table
* of security context structures indexed by SID value.
*
- * Author : Stephen Smalley, <sds@tycho.nsa.gov>
+ * Original author: Stephen Smalley, <sds@tycho.nsa.gov>
+ * Author: Ondrej Mosnacek, <omosnacek@gmail.com>
+ *
+ * Copyright (C) 2018 Red Hat, Inc.
*/
#ifndef _SS_SIDTAB_H_
#define _SS_SIDTAB_H_
-#include "context.h"
+#include <linux/spinlock_types.h>
+#include <linux/log2.h>
+#include <linux/hashtable.h>
-struct sidtab_node {
- u32 sid; /* security identifier */
- struct context context; /* security context structure */
- struct sidtab_node *next;
+#include "context.h"
+#include "flask.h"
+
+struct sidtab_entry_leaf {
+ u32 sid;
+ struct context context;
+ struct hlist_node list;
};
-#define SIDTAB_HASH_BITS 7
-#define SIDTAB_HASH_BUCKETS (1 << SIDTAB_HASH_BITS)
-#define SIDTAB_HASH_MASK (SIDTAB_HASH_BUCKETS-1)
+struct sidtab_node_inner;
+struct sidtab_node_leaf;
-#define SIDTAB_SIZE SIDTAB_HASH_BUCKETS
+union sidtab_entry_inner {
+ struct sidtab_node_inner *ptr_inner;
+ struct sidtab_node_leaf *ptr_leaf;
+};
+
+/* align node size to page boundary */
+#define SIDTAB_NODE_ALLOC_SHIFT PAGE_SHIFT
+#define SIDTAB_NODE_ALLOC_SIZE PAGE_SIZE
+
+#define size_to_shift(size) ((size) == 1 ? 1 : (const_ilog2((size) - 1) + 1))
+
+#define SIDTAB_INNER_SHIFT \
+ (SIDTAB_NODE_ALLOC_SHIFT - size_to_shift(sizeof(union sidtab_entry_inner)))
+#define SIDTAB_INNER_ENTRIES ((size_t)1 << SIDTAB_INNER_SHIFT)
+#define SIDTAB_LEAF_ENTRIES \
+ (SIDTAB_NODE_ALLOC_SIZE / sizeof(struct sidtab_entry_leaf))
+
+#define SIDTAB_MAX_BITS 32
+#define SIDTAB_MAX U32_MAX
+/* ensure enough tree levels for SIDTAB_MAX entries */
+#define SIDTAB_MAX_LEVEL \
+ DIV_ROUND_UP(SIDTAB_MAX_BITS - size_to_shift(SIDTAB_LEAF_ENTRIES), \
+ SIDTAB_INNER_SHIFT)
+
+struct sidtab_node_leaf {
+ struct sidtab_entry_leaf entries[SIDTAB_LEAF_ENTRIES];
+};
+
+struct sidtab_node_inner {
+ union sidtab_entry_inner entries[SIDTAB_INNER_ENTRIES];
+};
+
+struct sidtab_isid_entry {
+ int set;
+ struct sidtab_entry_leaf leaf;
+};
+
+struct sidtab_convert_params {
+ int (*func)(struct context *oldc, struct context *newc, void *args);
+ void *args;
+ struct sidtab *target;
+};
+
+#define SIDTAB_HASH_BITS CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS
+#define SIDTAB_HASH_BUCKETS (1 << SIDTAB_HASH_BITS)
struct sidtab {
- struct sidtab_node **htable;
- unsigned int nel; /* number of elements */
- unsigned int next_sid; /* next SID to allocate */
- unsigned char shutdown;
-#define SIDTAB_CACHE_LEN 3
- struct sidtab_node *cache[SIDTAB_CACHE_LEN];
+ /*
+ * lock-free read access only for as many items as a prior read of
+ * 'count'
+ */
+ union sidtab_entry_inner roots[SIDTAB_MAX_LEVEL + 1];
+ /*
+ * access atomically via {READ|WRITE}_ONCE(); only increment under
+ * spinlock
+ */
+ u32 count;
+ /* access only under spinlock */
+ struct sidtab_convert_params *convert;
spinlock_t lock;
+
+ /* index == SID - 1 (no entry for SECSID_NULL) */
+ struct sidtab_isid_entry isids[SECINITSID_NUM];
+
+ /* Hash table for fast reverse context-to-sid lookups. */
+ DECLARE_HASHTABLE(context_to_sid, SIDTAB_HASH_BITS);
};
int sidtab_init(struct sidtab *s);
-int sidtab_insert(struct sidtab *s, u32 sid, struct context *context);
+int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context);
struct context *sidtab_search(struct sidtab *s, u32 sid);
struct context *sidtab_search_force(struct sidtab *s, u32 sid);
-int sidtab_map(struct sidtab *s,
- int (*apply) (u32 sid,
- struct context *context,
- void *args),
- void *args);
+int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params);
-int sidtab_context_to_sid(struct sidtab *s,
- struct context *context,
- u32 *sid);
+int sidtab_context_to_sid(struct sidtab *s, struct context *context, u32 *sid);
-void sidtab_hash_eval(struct sidtab *h, char *tag);
void sidtab_destroy(struct sidtab *s);
-void sidtab_set(struct sidtab *dst, struct sidtab *src);
-void sidtab_shutdown(struct sidtab *s);
+
+int sidtab_hash_stats(struct sidtab *sidtab, char *page);
#endif /* _SS_SIDTAB_H_ */
diff --git a/security/selinux/ss/status.c b/security/selinux/ss/status.c
index d982365..a121de4 100644
--- a/security/selinux/ss/status.c
+++ b/security/selinux/ss/status.c
@@ -35,8 +35,6 @@
* In most cases, application shall confirm the kernel status is not
* changed without any system call invocations.
*/
-static struct page *selinux_status_page;
-static DEFINE_MUTEX(selinux_status_lock);
/*
* selinux_kernel_status_page
@@ -44,21 +42,21 @@ static DEFINE_MUTEX(selinux_status_lock);
* It returns a reference to selinux_status_page. If the status page is
* not allocated yet, it also tries to allocate it at the first time.
*/
-struct page *selinux_kernel_status_page(void)
+struct page *selinux_kernel_status_page(struct selinux_state *state)
{
struct selinux_kernel_status *status;
struct page *result = NULL;
- mutex_lock(&selinux_status_lock);
- if (!selinux_status_page) {
- selinux_status_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
+ mutex_lock(&state->ss->status_lock);
+ if (!state->ss->status_page) {
+ state->ss->status_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
- if (selinux_status_page) {
- status = page_address(selinux_status_page);
+ if (state->ss->status_page) {
+ status = page_address(state->ss->status_page);
status->version = SELINUX_KERNEL_STATUS_VERSION;
status->sequence = 0;
- status->enforcing = selinux_enforcing;
+ status->enforcing = enforcing_enabled(state);
/*
* NOTE: the next policyload event shall set
* a positive value on the status->policyload,
@@ -66,11 +64,12 @@ struct page *selinux_kernel_status_page(void)
* So, application can know it was updated.
*/
status->policyload = 0;
- status->deny_unknown = !security_get_allow_unknown();
+ status->deny_unknown =
+ !security_get_allow_unknown(state);
}
}
- result = selinux_status_page;
- mutex_unlock(&selinux_status_lock);
+ result = state->ss->status_page;
+ mutex_unlock(&state->ss->status_lock);
return result;
}
@@ -80,13 +79,14 @@ struct page *selinux_kernel_status_page(void)
*
* It updates status of the current enforcing/permissive mode.
*/
-void selinux_status_update_setenforce(int enforcing)
+void selinux_status_update_setenforce(struct selinux_state *state,
+ int enforcing)
{
struct selinux_kernel_status *status;
- mutex_lock(&selinux_status_lock);
- if (selinux_status_page) {
- status = page_address(selinux_status_page);
+ mutex_lock(&state->ss->status_lock);
+ if (state->ss->status_page) {
+ status = page_address(state->ss->status_page);
status->sequence++;
smp_wmb();
@@ -96,7 +96,7 @@ void selinux_status_update_setenforce(int enforcing)
smp_wmb();
status->sequence++;
}
- mutex_unlock(&selinux_status_lock);
+ mutex_unlock(&state->ss->status_lock);
}
/*
@@ -105,22 +105,23 @@ void selinux_status_update_setenforce(int enforcing)
* It updates status of the times of policy reloaded, and current
* setting of deny_unknown.
*/
-void selinux_status_update_policyload(int seqno)
+void selinux_status_update_policyload(struct selinux_state *state,
+ int seqno)
{
struct selinux_kernel_status *status;
- mutex_lock(&selinux_status_lock);
- if (selinux_status_page) {
- status = page_address(selinux_status_page);
+ mutex_lock(&state->ss->status_lock);
+ if (state->ss->status_page) {
+ status = page_address(state->ss->status_page);
status->sequence++;
smp_wmb();
status->policyload = seqno;
- status->deny_unknown = !security_get_allow_unknown();
+ status->deny_unknown = !security_get_allow_unknown(state);
smp_wmb();
status->sequence++;
}
- mutex_unlock(&selinux_status_lock);
+ mutex_unlock(&state->ss->status_lock);
}
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index 56e354f..fa1430c 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -101,11 +101,13 @@ static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp,
ctx->ctx_len = str_len;
memcpy(ctx->ctx_str, &uctx[1], str_len);
ctx->ctx_str[str_len] = '\0';
- rc = security_context_to_sid(ctx->ctx_str, str_len, &ctx->ctx_sid, gfp);
+ rc = security_context_to_sid(&selinux_state, ctx->ctx_str, str_len,
+ &ctx->ctx_sid, gfp);
if (rc)
goto err;
- rc = avc_has_perm(tsec->sid, ctx->ctx_sid,
+ rc = avc_has_perm(&selinux_state,
+ tsec->sid, ctx->ctx_sid,
SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT, NULL);
if (rc)
goto err;
@@ -141,7 +143,8 @@ static int selinux_xfrm_delete(struct xfrm_sec_ctx *ctx)
if (!ctx)
return 0;
- return avc_has_perm(tsec->sid, ctx->ctx_sid,
+ return avc_has_perm(&selinux_state,
+ tsec->sid, ctx->ctx_sid,
SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT,
NULL);
}
@@ -163,7 +166,8 @@ int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
if (!selinux_authorizable_ctx(ctx))
return -EINVAL;
- rc = avc_has_perm(fl_secid, ctx->ctx_sid,
+ rc = avc_has_perm(&selinux_state,
+ fl_secid, ctx->ctx_sid,
SECCLASS_ASSOCIATION, ASSOCIATION__POLMATCH, NULL);
return (rc == -EACCES ? -ESRCH : rc);
}
@@ -202,7 +206,8 @@ int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
/* We don't need a separate SA Vs. policy polmatch check since the SA
* is now of the same label as the flow and a flow Vs. policy polmatch
* check had already happened in selinux_xfrm_policy_lookup() above. */
- return (avc_has_perm(fl->flowi_secid, state_sid,
+ return (avc_has_perm(&selinux_state,
+ fl->flowi_secid, state_sid,
SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO,
NULL) ? 0 : 1);
}
@@ -352,7 +357,8 @@ int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x,
if (secid == 0)
return -EINVAL;
- rc = security_sid_to_context(secid, &ctx_str, &str_len);
+ rc = security_sid_to_context(&selinux_state, secid, &ctx_str,
+ &str_len);
if (rc)
return rc;
@@ -420,7 +426,8 @@ int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
/* This check even when there's no association involved is intended,
* according to Trent Jaeger, to make sure a process can't engage in
* non-IPsec communication unless explicitly allowed by policy. */
- return avc_has_perm(sk_sid, peer_sid,
+ return avc_has_perm(&selinux_state,
+ sk_sid, peer_sid,
SECCLASS_ASSOCIATION, ASSOCIATION__RECVFROM, ad);
}
@@ -463,6 +470,6 @@ int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
/* This check even when there's no association involved is intended,
* according to Trent Jaeger, to make sure a process can't engage in
* non-IPsec communication unless explicitly allowed by policy. */
- return avc_has_perm(sk_sid, SECINITSID_UNLABELED,
+ return avc_has_perm(&selinux_state, sk_sid, SECINITSID_UNLABELED,
SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO, ad);
}
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 3096883..93da68b 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -720,6 +720,10 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
LONG_MAX - runtime->buffer_size)
runtime->boundary *= 2;
+ /* clear the buffer for avoiding possible kernel info leaks */
+ if (runtime->dma_area && !substream->ops->copy_user)
+ memset(runtime->dma_area, 0, runtime->dma_bytes);
+
snd_pcm_timer_resolution_change(substream);
snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 3449b72..ebdb2b4 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -88,6 +88,9 @@ static LIST_HEAD(snd_timer_slave_list);
/* lock for slave active lists */
static DEFINE_SPINLOCK(slave_active_lock);
+#define MAX_SLAVE_INSTANCES 1000
+static int num_slaves;
+
static DEFINE_MUTEX(register_mutex);
static int snd_timer_free(struct snd_timer *timer);
@@ -266,6 +269,10 @@ int snd_timer_open(struct snd_timer_instance **ti,
err = -EINVAL;
goto unlock;
}
+ if (num_slaves >= MAX_SLAVE_INSTANCES) {
+ err = -EBUSY;
+ goto unlock;
+ }
timeri = snd_timer_instance_new(owner, NULL);
if (!timeri) {
err = -ENOMEM;
@@ -275,6 +282,7 @@ int snd_timer_open(struct snd_timer_instance **ti,
timeri->slave_id = tid->device;
timeri->flags |= SNDRV_TIMER_IFLG_SLAVE;
list_add_tail(&timeri->open_list, &snd_timer_slave_list);
+ num_slaves++;
err = snd_timer_check_slave(timeri);
if (err < 0) {
snd_timer_close_locked(timeri, &card_dev_to_put);
@@ -364,6 +372,8 @@ static int snd_timer_close_locked(struct snd_timer_instance *timeri,
struct snd_timer_instance *slave, *tmp;
list_del(&timeri->open_list);
+ if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
+ num_slaves--;
/* force to stop the timer */
snd_timer_stop(timeri);
diff --git a/sound/firewire/motu/motu-proc.c b/sound/firewire/motu/motu-proc.c
index 4edc064..706f1e98 100644
--- a/sound/firewire/motu/motu-proc.c
+++ b/sound/firewire/motu/motu-proc.c
@@ -17,7 +17,7 @@ static const char *const clock_names[] = {
[SND_MOTU_CLOCK_SOURCE_SPDIF_ON_OPT] = "S/PDIF on optical interface",
[SND_MOTU_CLOCK_SOURCE_SPDIF_ON_OPT_A] = "S/PDIF on optical interface A",
[SND_MOTU_CLOCK_SOURCE_SPDIF_ON_OPT_B] = "S/PDIF on optical interface B",
- [SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX] = "S/PCIF on coaxial interface",
+ [SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX] = "S/PDIF on coaxial interface",
[SND_MOTU_CLOCK_SOURCE_AESEBU_ON_XLR] = "AESEBU on XLR interface",
[SND_MOTU_CLOCK_SOURCE_WORD_ON_BNC] = "Word clock on BNC interface",
};
diff --git a/sound/isa/cs423x/cs4236.c b/sound/isa/cs423x/cs4236.c
index 70559e5..7d4e18c 100644
--- a/sound/isa/cs423x/cs4236.c
+++ b/sound/isa/cs423x/cs4236.c
@@ -293,7 +293,8 @@ static int snd_cs423x_pnp_init_mpu(int dev, struct pnp_dev *pdev)
} else {
mpu_port[dev] = pnp_port_start(pdev, 0);
if (mpu_irq[dev] >= 0 &&
- pnp_irq_valid(pdev, 0) && pnp_irq(pdev, 0) >= 0) {
+ pnp_irq_valid(pdev, 0) &&
+ pnp_irq(pdev, 0) != (resource_size_t)-1) {
mpu_irq[dev] = pnp_irq(pdev, 0);
} else {
mpu_irq[dev] = -1; /* disable interrupt */
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 8fcb421..fa261b2 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -883,7 +883,7 @@ static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr,
return -EAGAIN; /* give a chance to retry */
}
- dev_WARN(chip->card->dev,
+ dev_err(chip->card->dev,
"azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
bus->last_cmd[addr]);
chip->single_cmd = 1;
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 9876d8d..92f5f45 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -1300,13 +1300,14 @@ struct scp_msg {
static void dspio_clear_response_queue(struct hda_codec *codec)
{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
unsigned int dummy = 0;
- int status = -1;
+ int status;
/* clear all from the response queue */
do {
status = dspio_read(codec, &dummy);
- } while (status == 0);
+ } while (status == 0 && time_before(jiffies, timeout));
}
static int dspio_get_response_data(struct hda_codec *codec)
@@ -4424,12 +4425,14 @@ static void ca0132_process_dsp_response(struct hda_codec *codec,
struct ca0132_spec *spec = codec->spec;
codec_dbg(codec, "ca0132_process_dsp_response\n");
+ snd_hda_power_up_pm(codec);
if (spec->wait_scp) {
if (dspio_get_response_data(codec) >= 0)
spec->wait_scp = 0;
}
dspio_clear_response_queue(codec);
+ snd_hda_power_down_pm(codec);
}
static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 41e3c77..5a7afbe 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -378,6 +378,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
case 0x10ec0672:
alc_update_coef_idx(codec, 0xd, 0, 1<<14); /* EAPD Ctrl */
break;
+ case 0x10ec0222:
case 0x10ec0623:
alc_update_coef_idx(codec, 0x19, 1<<13, 0);
break;
@@ -396,6 +397,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
break;
case 0x10ec0899:
case 0x10ec0900:
+ case 0x10ec0b00:
case 0x10ec1168:
case 0x10ec1220:
alc_update_coef_idx(codec, 0x7, 1<<1, 0);
@@ -2389,6 +2391,7 @@ static int patch_alc882(struct hda_codec *codec)
case 0x10ec0882:
case 0x10ec0885:
case 0x10ec0900:
+ case 0x10ec0b00:
case 0x10ec1220:
break;
default:
@@ -8398,6 +8401,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
HDA_CODEC_ENTRY(0x10ec0892, "ALC892", patch_alc662),
HDA_CODEC_ENTRY(0x10ec0899, "ALC898", patch_alc882),
HDA_CODEC_ENTRY(0x10ec0900, "ALC1150", patch_alc882),
+ HDA_CODEC_ENTRY(0x10ec0b00, "ALCS1200A", patch_alc882),
HDA_CODEC_ENTRY(0x10ec1168, "ALC1220", patch_alc882),
HDA_CODEC_ENTRY(0x10ec1220, "ALC1220", patch_alc882),
{} /* terminator */
diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c
index 057c2f3..41ea8e7 100644
--- a/sound/pci/ice1712/ice1724.c
+++ b/sound/pci/ice1712/ice1724.c
@@ -661,6 +661,7 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate,
unsigned long flags;
unsigned char mclk_change;
unsigned int i, old_rate;
+ bool call_set_rate = false;
if (rate > ice->hw_rates->list[ice->hw_rates->count - 1])
return -EINVAL;
@@ -684,7 +685,7 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate,
* setting clock rate for internal clock mode */
old_rate = ice->get_rate(ice);
if (force || (old_rate != rate))
- ice->set_rate(ice, rate);
+ call_set_rate = true;
else if (rate == ice->cur_rate) {
spin_unlock_irqrestore(&ice->reg_lock, flags);
return 0;
@@ -692,12 +693,14 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate,
}
ice->cur_rate = rate;
+ spin_unlock_irqrestore(&ice->reg_lock, flags);
+
+ if (call_set_rate)
+ ice->set_rate(ice, rate);
/* setting master clock */
mclk_change = ice->set_mclk(ice, rate);
- spin_unlock_irqrestore(&ice->reg_lock, flags);
-
if (mclk_change && ice->gpio.i2s_mclk_changed)
ice->gpio.i2s_mclk_changed(ice);
if (ice->gpio.set_pro_rate)
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index 1cd20b8..82ee8f4 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -297,6 +297,7 @@ static bool rt5677_volatile_register(struct device *dev, unsigned int reg)
case RT5677_I2C_MASTER_CTRL7:
case RT5677_I2C_MASTER_CTRL8:
case RT5677_HAP_GENE_CTRL2:
+ case RT5677_PWR_ANLG2: /* Modified by DSP firmware */
case RT5677_PWR_DSP_ST:
case RT5677_PRIV_DATA:
case RT5677_ASRC_22:
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index fd2731d..0e8008d 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -2791,7 +2791,7 @@ static int fll_factors(struct _fll_div *fll_div, unsigned int Fref,
if (target % Fref == 0) {
fll_div->theta = 0;
- fll_div->lambda = 0;
+ fll_div->lambda = 1;
} else {
gcd_fll = gcd(target, fratio * Fref);
@@ -2861,7 +2861,7 @@ static int wm8962_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
return -EINVAL;
}
- if (fll_div.theta || fll_div.lambda)
+ if (fll_div.theta)
fll1 |= WM8962_FLL_FRAC;
/* Stop the FLL while we reconfigure */
diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
index 41cb1fe..4051962 100644
--- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
@@ -422,6 +422,9 @@ static int kabylake_dmic_startup(struct snd_pcm_substream *substream)
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
dmic_constraints);
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
+ snd_pcm_hw_constraint_msbits(runtime, 0, 16, 16);
+
return snd_pcm_hw_constraint_list(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
}
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 2d5cf26..72301bc 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -1921,6 +1921,7 @@ static int soc_tplg_pcm_elems_load(struct soc_tplg *tplg,
int count = hdr->count;
int i;
bool abi_match;
+ int ret;
if (tplg->pass != SOC_TPLG_PASS_PCM_DAI)
return 0;
@@ -1957,7 +1958,12 @@ static int soc_tplg_pcm_elems_load(struct soc_tplg *tplg,
}
/* create the FE DAIs and DAI links */
- soc_tplg_pcm_create(tplg, _pcm);
+ ret = soc_tplg_pcm_create(tplg, _pcm);
+ if (ret < 0) {
+ if (!abi_match)
+ kfree(_pcm);
+ return ret;
+ }
/* offset by version-specific struct size and
* real priv data size
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index ad14d6b..51ee791 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1143,6 +1143,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
+ case USB_ID(0x05a7, 0x1020): /* Bose Companion 5 */
case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */
case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
diff --git a/tools/lib/subcmd/Makefile b/tools/lib/subcmd/Makefile
index 5b2cd5e..5dbb0dd 100644
--- a/tools/lib/subcmd/Makefile
+++ b/tools/lib/subcmd/Makefile
@@ -28,7 +28,9 @@
endif
endif
-ifeq ($(CC_NO_CLANG), 0)
+ifeq ($(DEBUG),1)
+ CFLAGS += -O0
+else ifeq ($(CC_NO_CLANG), 0)
CFLAGS += -O3
else
CFLAGS += -O6
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
index 8107f06..a0ac01c 100644
--- a/tools/lib/traceevent/Makefile
+++ b/tools/lib/traceevent/Makefile
@@ -115,6 +115,7 @@
LIB_TARGET = libtraceevent.a libtraceevent.so.$(EVENT_PARSE_VERSION)
LIB_INSTALL = libtraceevent.a libtraceevent.so*
+LIB_INSTALL := $(addprefix $(OUTPUT),$(LIB_INSTALL))
INCLUDES = -I. -I $(srctree)/tools/include $(CONFIG_INCLUDES)
diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
index 5e10ba7..569bcef 100644
--- a/tools/lib/traceevent/parse-filter.c
+++ b/tools/lib/traceevent/parse-filter.c
@@ -1492,8 +1492,10 @@ static int copy_filter_type(struct event_filter *filter,
if (strcmp(str, "TRUE") == 0 || strcmp(str, "FALSE") == 0) {
/* Add trivial event */
arg = allocate_arg();
- if (arg == NULL)
+ if (arg == NULL) {
+ free(str);
return -1;
+ }
arg->type = FILTER_ARG_BOOLEAN;
if (strcmp(str, "TRUE") == 0)
@@ -1502,8 +1504,11 @@ static int copy_filter_type(struct event_filter *filter,
arg->boolean.value = 0;
filter_type = add_filter_type(filter, event->id);
- if (filter_type == NULL)
+ if (filter_type == NULL) {
+ free(str);
+ free_arg(arg);
return -1;
+ }
filter_type->filter = arg;
diff --git a/tools/objtool/arch/x86/lib/x86-opcode-map.txt b/tools/objtool/arch/x86/lib/x86-opcode-map.txt
index e0b8593..0a0e911 100644
--- a/tools/objtool/arch/x86/lib/x86-opcode-map.txt
+++ b/tools/objtool/arch/x86/lib/x86-opcode-map.txt
@@ -333,7 +333,7 @@
06: CLTS
07: SYSRET (o64)
08: INVD
-09: WBINVD
+09: WBINVD | WBNOINVD (F3)
0a:
0b: UD2 (1B)
0c:
@@ -364,7 +364,7 @@
# a ModR/M byte.
1a: BNDCL Gv,Ev (F3) | BNDCU Gv,Ev (F2) | BNDMOV Gv,Ev (66) | BNDLDX Gv,Ev
1b: BNDCN Gv,Ev (F2) | BNDMOV Ev,Gv (66) | BNDMK Gv,Ev (F3) | BNDSTX Ev,Gv
-1c:
+1c: Grp20 (1A),(1C)
1d:
1e:
1f: NOP Ev
@@ -792,6 +792,8 @@
f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v)
f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
+f8: MOVDIR64B Gv,Mdqq (66) | ENQCMD Gv,Mdqq (F2) | ENQCMDS Gv,Mdqq (F3)
+f9: MOVDIRI My,Gy
EndTable
Table: 3-byte opcode 2 (0x0f 0x3a)
@@ -943,9 +945,9 @@
EndTable
GrpTable: Grp7
-0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B)
-1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B)
-2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B)
+0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) | PCONFIG (101),(11B) | ENCLV (000),(11B)
+1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B) | ENCLS (111),(11B)
+2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B) | ENCLU (111),(11B)
3: LIDT Ms
4: SMSW Mw/Rv
5: rdpkru (110),(11B) | wrpkru (111),(11B)
@@ -1020,7 +1022,7 @@
3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
4: XSAVE | ptwrite Ey (F3),(11B)
5: XRSTOR | lfence (11B)
-6: XSAVEOPT | clwb (66) | mfence (11B)
+6: XSAVEOPT | clwb (66) | mfence (11B) | TPAUSE Rd (66),(11B) | UMONITOR Rv (F3),(11B) | UMWAIT Rd (F2),(11B)
7: clflush | clflushopt (66) | sfence (11B)
EndTable
@@ -1051,6 +1053,10 @@
6: vscatterpf1qps/d Wx (66),(ev)
EndTable
+GrpTable: Grp20
+0: cldemote Mb
+EndTable
+
# AMD's Prefetch Group
GrpTable: GrpP
0: PREFETCH
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 4ddb072..fd4dd12 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -342,6 +342,13 @@ static int report__setup_sample_type(struct report *rep)
PERF_SAMPLE_BRANCH_ANY))
rep->nonany_branch_mode = true;
+#ifndef HAVE_LIBUNWIND_SUPPORT
+ if (dwarf_callchain_users) {
+ ui__warning("Please install libunwind development packages "
+ "during the perf build.\n");
+ }
+#endif
+
return 0;
}
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 7678952..09c4380 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -355,7 +355,7 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
"selected. Hence, no address to lookup the source line number.\n");
return -EINVAL;
}
- if (PRINT_FIELD(BRSTACKINSN) &&
+ if (PRINT_FIELD(BRSTACKINSN) && !allow_user_set &&
!(perf_evlist__combined_branch_type(session->evlist) &
PERF_SAMPLE_BRANCH_ANY)) {
pr_err("Display of branch stack assembler requested, but non all-branch filter set\n"
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
index 89c8e16..94fe546 100644
--- a/tools/perf/tests/task-exit.c
+++ b/tools/perf/tests/task-exit.c
@@ -104,6 +104,7 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
if (perf_evlist__mmap(evlist, 128, true) < 0) {
pr_debug("failed to mmap events: %d (%s)\n", errno,
str_error_r(errno, sbuf, sizeof(sbuf)));
+ err = -1;
goto out_delete_evlist;
}
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index f5acda1..289ef63 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -321,20 +321,50 @@ bool die_is_func_def(Dwarf_Die *dw_die)
}
/**
+ * die_entrypc - Returns entry PC (the lowest address) of a DIE
+ * @dw_die: a DIE
+ * @addr: where to store entry PC
+ *
+ * Since dwarf_entrypc() does not return entry PC if the DIE has only address
+ * range, we have to use this to retrieve the lowest address from the address
+ * range attribute.
+ */
+int die_entrypc(Dwarf_Die *dw_die, Dwarf_Addr *addr)
+{
+ Dwarf_Addr base, end;
+
+ if (!addr)
+ return -EINVAL;
+
+ if (dwarf_entrypc(dw_die, addr) == 0)
+ return 0;
+
+ return dwarf_ranges(dw_die, 0, &base, addr, &end) < 0 ? -ENOENT : 0;
+}
+
+/**
* die_is_func_instance - Ensure that this DIE is an instance of a subprogram
* @dw_die: a DIE
*
* Ensure that this DIE is an instance (which has an entry address).
- * This returns true if @dw_die is a function instance. If not, you need to
- * call die_walk_instances() to find actual instances.
+ * This returns true if @dw_die is a function instance. If not, the @dw_die
+ * must be a prototype. You can use die_walk_instances() to find actual
+ * instances.
**/
bool die_is_func_instance(Dwarf_Die *dw_die)
{
Dwarf_Addr tmp;
+ Dwarf_Attribute attr_mem;
+ int tag = dwarf_tag(dw_die);
- /* Actually gcc optimizes non-inline as like as inlined */
- return !dwarf_func_inline(dw_die) && dwarf_entrypc(dw_die, &tmp) == 0;
+ if (tag != DW_TAG_subprogram &&
+ tag != DW_TAG_inlined_subroutine)
+ return false;
+
+ return dwarf_entrypc(dw_die, &tmp) == 0 ||
+ dwarf_attr(dw_die, DW_AT_ranges, &attr_mem) != NULL;
}
+
/**
* die_get_data_member_location - Get the data-member offset
* @mb_die: a DIE of a member of a data structure
@@ -611,6 +641,9 @@ static int __die_walk_instances_cb(Dwarf_Die *inst, void *data)
Dwarf_Die *origin;
int tmp;
+ if (!die_is_func_instance(inst))
+ return DIE_FIND_CB_CONTINUE;
+
attr = dwarf_attr(inst, DW_AT_abstract_origin, &attr_mem);
if (attr == NULL)
return DIE_FIND_CB_CONTINUE;
@@ -682,15 +715,14 @@ static int __die_walk_funclines_cb(Dwarf_Die *in_die, void *data)
if (dwarf_tag(in_die) == DW_TAG_inlined_subroutine) {
fname = die_get_call_file(in_die);
lineno = die_get_call_lineno(in_die);
- if (fname && lineno > 0 && dwarf_entrypc(in_die, &addr) == 0) {
+ if (fname && lineno > 0 && die_entrypc(in_die, &addr) == 0) {
lw->retval = lw->callback(fname, lineno, addr, lw->data);
if (lw->retval != 0)
return DIE_FIND_CB_END;
}
+ if (!lw->recursive)
+ return DIE_FIND_CB_SIBLING;
}
- if (!lw->recursive)
- /* Don't need to search recursively */
- return DIE_FIND_CB_SIBLING;
if (addr) {
fname = dwarf_decl_file(in_die);
@@ -723,7 +755,7 @@ static int __die_walk_funclines(Dwarf_Die *sp_die, bool recursive,
/* Handle function declaration line */
fname = dwarf_decl_file(sp_die);
if (fname && dwarf_decl_line(sp_die, &lineno) == 0 &&
- dwarf_entrypc(sp_die, &addr) == 0) {
+ die_entrypc(sp_die, &addr) == 0) {
lw.retval = callback(fname, lineno, addr, data);
if (lw.retval != 0)
goto done;
@@ -737,6 +769,10 @@ static int __die_walk_culines_cb(Dwarf_Die *sp_die, void *data)
{
struct __line_walk_param *lw = data;
+ /*
+ * Since inlined function can include another inlined function in
+ * the same file, we need to walk in it recursively.
+ */
lw->retval = __die_walk_funclines(sp_die, true, lw->callback, lw->data);
if (lw->retval != 0)
return DWARF_CB_ABORT;
@@ -761,11 +797,12 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
Dwarf_Lines *lines;
Dwarf_Line *line;
Dwarf_Addr addr;
- const char *fname, *decf = NULL;
+ const char *fname, *decf = NULL, *inf = NULL;
int lineno, ret = 0;
int decl = 0, inl;
Dwarf_Die die_mem, *cu_die;
size_t nlines, i;
+ bool flag;
/* Get the CU die */
if (dwarf_tag(rt_die) != DW_TAG_compile_unit) {
@@ -796,6 +833,12 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
"Possible error in debuginfo.\n");
continue;
}
+ /* Skip end-of-sequence */
+ if (dwarf_lineendsequence(line, &flag) != 0 || flag)
+ continue;
+ /* Skip Non statement line-info */
+ if (dwarf_linebeginstatement(line, &flag) != 0 || !flag)
+ continue;
/* Filter lines based on address */
if (rt_die != cu_die) {
/*
@@ -805,13 +848,21 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
*/
if (!dwarf_haspc(rt_die, addr))
continue;
+
if (die_find_inlinefunc(rt_die, addr, &die_mem)) {
+ /* Call-site check */
+ inf = die_get_call_file(&die_mem);
+ if ((inf && !strcmp(inf, decf)) &&
+ die_get_call_lineno(&die_mem) == lineno)
+ goto found;
+
dwarf_decl_line(&die_mem, &inl);
if (inl != decl ||
decf != dwarf_decl_file(&die_mem))
continue;
}
}
+found:
/* Get source line */
fname = dwarf_linesrc(line, NULL, NULL);
@@ -826,8 +877,9 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
*/
if (rt_die != cu_die)
/*
- * Don't need walk functions recursively, because nested
- * inlined functions don't have lines of the specified DIE.
+ * Don't need walk inlined functions recursively, because
+ * inner inlined functions don't have the lines of the
+ * specified function.
*/
ret = __die_walk_funclines(rt_die, false, callback, data);
else {
@@ -1002,7 +1054,7 @@ static int die_get_var_innermost_scope(Dwarf_Die *sp_die, Dwarf_Die *vr_die,
bool first = true;
const char *name;
- ret = dwarf_entrypc(sp_die, &entry);
+ ret = die_entrypc(sp_die, &entry);
if (ret)
return ret;
@@ -1065,7 +1117,7 @@ int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf)
bool first = true;
const char *name;
- ret = dwarf_entrypc(sp_die, &entry);
+ ret = die_entrypc(sp_die, &entry);
if (ret)
return ret;
diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h
index 8ac53bf1..ee15fac4 100644
--- a/tools/perf/util/dwarf-aux.h
+++ b/tools/perf/util/dwarf-aux.h
@@ -41,6 +41,9 @@ int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
/* Get DW_AT_linkage_name (should be NULL for C binary) */
const char *die_get_linkage_name(Dwarf_Die *dw_die);
+/* Get the lowest PC in DIE (including range list) */
+int die_entrypc(Dwarf_Die *dw_die, Dwarf_Addr *addr);
+
/* Ensure that this DIE is a subprogram and definition (not declaration) */
bool die_is_func_def(Dwarf_Die *dw_die);
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 29e2bb3..096c52f 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -1253,8 +1253,15 @@ static int __parse_events_add_pmu(struct parse_events_state *parse_state,
if (get_config_terms(head_config, &config_terms))
return -ENOMEM;
- if (perf_pmu__config(pmu, &attr, head_config, parse_state->error))
+ if (perf_pmu__config(pmu, &attr, head_config, parse_state->error)) {
+ struct perf_evsel_config_term *pos, *tmp;
+
+ list_for_each_entry_safe(pos, tmp, &config_terms, list) {
+ list_del_init(&pos->list);
+ free(pos);
+ }
return -EINVAL;
+ }
evsel = __add_event(list, &parse_state->idx, &attr,
get_config_name(head_config), pmu,
diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h
index c9319f8..f732e3a 100644
--- a/tools/perf/util/perf_regs.h
+++ b/tools/perf/util/perf_regs.h
@@ -34,7 +34,7 @@ int perf_reg_value(u64 *valp, struct regs_dump *regs, int id);
static inline const char *perf_reg_name(int id __maybe_unused)
{
- return NULL;
+ return "unknown";
}
static inline int perf_reg_value(u64 *valp __maybe_unused,
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index a5731de..30a5e92 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -767,6 +767,16 @@ static int find_best_scope_cb(Dwarf_Die *fn_die, void *data)
return 0;
}
+/* Return innermost DIE */
+static int find_inner_scope_cb(Dwarf_Die *fn_die, void *data)
+{
+ struct find_scope_param *fsp = data;
+
+ memcpy(fsp->die_mem, fn_die, sizeof(Dwarf_Die));
+ fsp->found = true;
+ return 1;
+}
+
/* Find an appropriate scope fits to given conditions */
static Dwarf_Die *find_best_scope(struct probe_finder *pf, Dwarf_Die *die_mem)
{
@@ -778,8 +788,13 @@ static Dwarf_Die *find_best_scope(struct probe_finder *pf, Dwarf_Die *die_mem)
.die_mem = die_mem,
.found = false,
};
+ int ret;
- cu_walk_functions_at(&pf->cu_die, pf->addr, find_best_scope_cb, &fsp);
+ ret = cu_walk_functions_at(&pf->cu_die, pf->addr, find_best_scope_cb,
+ &fsp);
+ if (!ret && !fsp.found)
+ cu_walk_functions_at(&pf->cu_die, pf->addr,
+ find_inner_scope_cb, &fsp);
return fsp.found ? die_mem : NULL;
}
@@ -953,7 +968,7 @@ static int probe_point_inline_cb(Dwarf_Die *in_die, void *data)
ret = find_probe_point_lazy(in_die, pf);
else {
/* Get probe address */
- if (dwarf_entrypc(in_die, &addr) != 0) {
+ if (die_entrypc(in_die, &addr) != 0) {
pr_warning("Failed to get entry address of %s.\n",
dwarf_diename(in_die));
return -ENOENT;
@@ -1005,7 +1020,7 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
param->retval = find_probe_point_by_line(pf);
} else if (die_is_func_instance(sp_die)) {
/* Instances always have the entry address */
- dwarf_entrypc(sp_die, &pf->addr);
+ die_entrypc(sp_die, &pf->addr);
/* But in some case the entry address is 0 */
if (pf->addr == 0) {
pr_debug("%s has no entry PC. Skipped\n",
@@ -1417,6 +1432,18 @@ static int collect_variables_cb(Dwarf_Die *die_mem, void *data)
return DIE_FIND_CB_END;
}
+static bool available_var_finder_overlap(struct available_var_finder *af)
+{
+ int i;
+
+ for (i = 0; i < af->nvls; i++) {
+ if (af->pf.addr == af->vls[i].point.address)
+ return true;
+ }
+ return false;
+
+}
+
/* Add a found vars into available variables list */
static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
{
@@ -1427,6 +1454,14 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
Dwarf_Die die_mem;
int ret;
+ /*
+ * For some reason (e.g. different column assigned to same address),
+ * this callback can be called with the address which already passed.
+ * Ignore it first.
+ */
+ if (available_var_finder_overlap(af))
+ return 0;
+
/* Check number of tevs */
if (af->nvls == af->max_vls) {
pr_warning("Too many( > %d) probe point found.\n", af->max_vls);
@@ -1570,7 +1605,7 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
/* Get function entry information */
func = basefunc = dwarf_diename(&spdie);
if (!func ||
- dwarf_entrypc(&spdie, &baseaddr) != 0 ||
+ die_entrypc(&spdie, &baseaddr) != 0 ||
dwarf_decl_line(&spdie, &baseline) != 0) {
lineno = 0;
goto post;
@@ -1587,7 +1622,7 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
while (die_find_top_inlinefunc(&spdie, (Dwarf_Addr)addr,
&indie)) {
/* There is an inline function */
- if (dwarf_entrypc(&indie, &_addr) == 0 &&
+ if (die_entrypc(&indie, &_addr) == 0 &&
_addr == addr) {
/*
* addr is at an inline function entry.
diff --git a/tools/perf/util/strbuf.c b/tools/perf/util/strbuf.c
index 9005fbe..23092fd 100644
--- a/tools/perf/util/strbuf.c
+++ b/tools/perf/util/strbuf.c
@@ -109,7 +109,6 @@ static int strbuf_addv(struct strbuf *sb, const char *fmt, va_list ap)
return ret;
}
len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap_saved);
- va_end(ap_saved);
if (len > strbuf_avail(sb)) {
pr_debug("this should not happen, your vsnprintf is broken");
va_end(ap_saved);
diff --git a/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c b/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
index f794d6b..3e4ff4a 100644
--- a/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
+++ b/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
@@ -40,7 +40,6 @@ static cstate_t hsw_ext_cstates[HSW_EXT_CSTATE_COUNT] = {
{
.name = "PC9",
.desc = N_("Processor Package C9"),
- .desc = N_("Processor Package C2"),
.id = PC9,
.range = RANGE_PACKAGE,
.get_count_percent = hsw_ext_get_count_percent,
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 913539a..9babb3f 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -7281,7 +7281,7 @@ static struct bpf_test tests[] = {
offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(),
},
- .errstr = "dereference of modified ctx ptr R1 off=68+8, ctx+const is allowed, ctx+const+const is not",
+ .errstr = "dereference of modified ctx ptr",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
@@ -7944,6 +7944,62 @@ static struct bpf_test tests[] = {
.errstr = "BPF_XADD stores into R2 packet",
.prog_type = BPF_PROG_TYPE_XDP,
},
+ {
+ "pass unmodified ctx pointer to helper",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_csum_update),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ },
+ {
+ "pass modified ctx pointer to helper, 1",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_csum_update),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "dereference of modified ctx ptr",
+ },
+ {
+ "pass modified ctx pointer to helper, 2",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_socket_cookie),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result_unpriv = REJECT,
+ .result = REJECT,
+ .errstr_unpriv = "dereference of modified ctx ptr",
+ .errstr = "dereference of modified ctx ptr",
+ },
+ {
+ "pass modified ctx pointer to helper, 3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_csum_update),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "variable ctx access var_off=(0x0; 0x4)",
+ },
};
static int probe_filter_length(const struct bpf_insn *fp)
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
index 891130d..8a5066d 100755
--- a/tools/testing/selftests/net/rtnetlink.sh
+++ b/tools/testing/selftests/net/rtnetlink.sh
@@ -195,6 +195,26 @@
echo "PASS: route get"
}
+kci_test_addrlft()
+{
+ for i in $(seq 10 100) ;do
+ lft=$(((RANDOM%3) + 1))
+ ip addr add 10.23.11.$i/32 dev "$devdummy" preferred_lft $lft valid_lft $((lft+1))
+ check_err $?
+ done
+
+ sleep 5
+
+ ip addr show dev "$devdummy" | grep "10.23.11."
+ if [ $? -eq 0 ]; then
+ echo "FAIL: preferred_lft addresses remaining"
+ check_err 1
+ return
+ fi
+
+ echo "PASS: preferred_lft addresses have expired"
+}
+
kci_test_addrlabel()
{
ret=0
@@ -245,6 +265,7 @@
kci_test_polrouting
kci_test_route_get
+ kci_test_addrlft
kci_test_tc
kci_test_gre
kci_test_bridge