Merge branch 'android-msm-bullhead-3.10-nyc-mr2' into android-msm-bullhead-3.10-oc
November 2017.1
Bug: 65558923
Change-Id: Ib1024a7c80940ab5b4601852fe2775e503d021e2
Signed-off-by: Andrew Lehmer <alehmer@google.com>
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 352c03e..d6edcda 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1167,11 +1167,20 @@
Functional default: enabled if accept_ra is enabled.
disabled if accept_ra is disabled.
+accept_ra_rt_info_min_plen - INTEGER
+ Minimum prefix length of Route Information in RA.
+
+ Route Information w/ prefix smaller than this variable shall
+ be ignored.
+
+ Functional default: 0 if accept_ra_rtr_pref is enabled.
+ -1 if accept_ra_rtr_pref is disabled.
+
accept_ra_rt_info_max_plen - INTEGER
Maximum prefix length of Route Information in RA.
- Route Information w/ prefix larger than or equal to this
- variable shall be ignored.
+ Route Information w/ prefix larger than this variable shall
+ be ignored.
Functional default: 0 if accept_ra_rtr_pref is enabled.
-1 if accept_ra_rtr_pref is disabled.
diff --git a/Documentation/printk-formats.txt b/Documentation/printk-formats.txt
index 3af5ae6..3f35e2d 100644
--- a/Documentation/printk-formats.txt
+++ b/Documentation/printk-formats.txt
@@ -45,6 +45,11 @@
users. The behaviour of %pK depends on the kptr_restrict sysctl - see
Documentation/sysctl/kernel.txt for more details.
+ %pP 0x01234567 or 0x0123456789abcdef
+
+ For printing kernel pointers which should always be shown, even to
+ unprivileged users.
+
Struct Resources:
%pr [mem 0x60000000-0x6fffffff flags 0x2200] or
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 6c46487..8e7209b 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -334,6 +334,15 @@
When kptr_restrict is set to (2), kernel pointers printed using
%pK will be replaced with 0's regardless of privileges.
+When kptr_restrict is set to (3), kernel pointers printed using
+%p and %pK will be replaced with 0's regardless of privileges,
+however kernel pointers printed using %pP will continue to be printed.
+
+When kptr_restrict is set to (4), kernel pointers printed with
+%p, %pK, %pa, and %p[rR] will be replaced with 0's regardless of
+privileges. Kernel pointers printed using %pP will continue to be
+printed.
+
==============================================================
kstack_depth_to_print: (X86 only)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 8456f0c..0ec9bdd 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -687,6 +687,24 @@
endmenu
+menu "Expermimental Security Enhancements"
+
+config KUSER_HELPERS_SELECTIVE_DISABLE
+ bool "Disable KUSER_HELPERS selectively"
+ depends on ARM64
+ help
+ Kuser_Helpers is a set of functions in the vector page at a fixed address that
+ are used to support applications compiled for legacy ARM <7. These functions
+ being at a fixed address creates an ASLR bypass.
+
+ Do not enable this feature unless you know what you are doing, this is
+ experimental, and will break any application that uses the Kuser_Helpers or
+ registers a signal handler and uses the kernel trampolines.
+
+ If unsure, say N.
+
+endmenu
+
source "net/Kconfig"
source "drivers/Kconfig"
diff --git a/arch/arm64/configs/bullhead_defconfig b/arch/arm64/configs/bullhead_defconfig
index 0ae9ab0..3988805 100644
--- a/arch/arm64/configs/bullhead_defconfig
+++ b/arch/arm64/configs/bullhead_defconfig
@@ -202,6 +202,7 @@
CONFIG_IPC_ROUTER_SECURITY=y
CONFIG_CMA=y
CONFIG_ARM_CCI=y
+CONFIG_PROC_DEVICETREE=y
CONFIG_ZRAM=y
CONFIG_ZRAM_LZ4_COMPRESS=y
CONFIG_BLK_DEV_LOOP=y
@@ -271,6 +272,7 @@
CONFIG_CNSS_MAC_BUG=y
CONFIG_CLD_LL_CORE=y
CONFIG_BUS_AUTO_SUSPEND=y
+CONFIG_CNSS_GENL=y
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
@@ -518,6 +520,7 @@
CONFIG_STAGING=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_BINDER_DEVICES="binder,hwbinder"
CONFIG_ASHMEM=y
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
CONFIG_ANDROID_INTF_ALARM_DEV=y
@@ -613,6 +616,8 @@
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_QUOTA=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
CONFIG_FUSE_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 5487269..159743c 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -83,6 +83,8 @@
return read_cpuid(CTR_EL0);
}
+void cpuinfo_store_cpu(void);
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
index 52b484b..77667c3 100644
--- a/arch/arm64/include/asm/hw_breakpoint.h
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -65,7 +65,11 @@
/* Lengths */
#define ARM_BREAKPOINT_LEN_1 0x1
#define ARM_BREAKPOINT_LEN_2 0x3
+#define ARM_BREAKPOINT_LEN_3 0x7
#define ARM_BREAKPOINT_LEN_4 0xf
+#define ARM_BREAKPOINT_LEN_5 0x1f
+#define ARM_BREAKPOINT_LEN_6 0x3f
+#define ARM_BREAKPOINT_LEN_7 0x7f
#define ARM_BREAKPOINT_LEN_8 0xff
/* Kernel stepping */
@@ -107,7 +111,7 @@
struct pmu;
extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
- int *gen_len, int *gen_type);
+ int *gen_len, int *gen_type, int *offset);
extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index de7b854..14a2a51 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -311,9 +311,21 @@
case ARM_BREAKPOINT_LEN_2:
len_in_bytes = 2;
break;
+ case ARM_BREAKPOINT_LEN_3:
+ len_in_bytes = 3;
+ break;
case ARM_BREAKPOINT_LEN_4:
len_in_bytes = 4;
break;
+ case ARM_BREAKPOINT_LEN_5:
+ len_in_bytes = 5;
+ break;
+ case ARM_BREAKPOINT_LEN_6:
+ len_in_bytes = 6;
+ break;
+ case ARM_BREAKPOINT_LEN_7:
+ len_in_bytes = 7;
+ break;
case ARM_BREAKPOINT_LEN_8:
len_in_bytes = 8;
break;
@@ -343,7 +355,7 @@
* to generic breakpoint descriptions.
*/
int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
- int *gen_len, int *gen_type)
+ int *gen_len, int *gen_type, int *offset)
{
/* Type */
switch (ctrl.type) {
@@ -363,17 +375,33 @@
return -EINVAL;
}
+ if (!ctrl.len)
+ return -EINVAL;
+ *offset = __ffs(ctrl.len);
+
/* Len */
- switch (ctrl.len) {
+ switch (ctrl.len >> *offset) {
case ARM_BREAKPOINT_LEN_1:
*gen_len = HW_BREAKPOINT_LEN_1;
break;
case ARM_BREAKPOINT_LEN_2:
*gen_len = HW_BREAKPOINT_LEN_2;
break;
+ case ARM_BREAKPOINT_LEN_3:
+ *gen_len = HW_BREAKPOINT_LEN_3;
+ break;
case ARM_BREAKPOINT_LEN_4:
*gen_len = HW_BREAKPOINT_LEN_4;
break;
+ case ARM_BREAKPOINT_LEN_5:
+ *gen_len = HW_BREAKPOINT_LEN_5;
+ break;
+ case ARM_BREAKPOINT_LEN_6:
+ *gen_len = HW_BREAKPOINT_LEN_6;
+ break;
+ case ARM_BREAKPOINT_LEN_7:
+ *gen_len = HW_BREAKPOINT_LEN_7;
+ break;
case ARM_BREAKPOINT_LEN_8:
*gen_len = HW_BREAKPOINT_LEN_8;
break;
@@ -417,9 +445,21 @@
case HW_BREAKPOINT_LEN_2:
info->ctrl.len = ARM_BREAKPOINT_LEN_2;
break;
+ case HW_BREAKPOINT_LEN_3:
+ info->ctrl.len = ARM_BREAKPOINT_LEN_3;
+ break;
case HW_BREAKPOINT_LEN_4:
info->ctrl.len = ARM_BREAKPOINT_LEN_4;
break;
+ case HW_BREAKPOINT_LEN_5:
+ info->ctrl.len = ARM_BREAKPOINT_LEN_5;
+ break;
+ case HW_BREAKPOINT_LEN_6:
+ info->ctrl.len = ARM_BREAKPOINT_LEN_6;
+ break;
+ case HW_BREAKPOINT_LEN_7:
+ info->ctrl.len = ARM_BREAKPOINT_LEN_7;
+ break;
case HW_BREAKPOINT_LEN_8:
info->ctrl.len = ARM_BREAKPOINT_LEN_8;
break;
@@ -511,18 +551,17 @@
default:
return -EINVAL;
}
-
- info->address &= ~alignment_mask;
- info->ctrl.len <<= offset;
} else {
if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE)
alignment_mask = 0x3;
else
alignment_mask = 0x7;
- if (info->address & alignment_mask)
- return -EINVAL;
+ offset = info->address & alignment_mask;
}
+ info->address &= ~alignment_mask;
+ info->ctrl.len <<= offset;
+
/*
* Disallow per-task kernel breakpoints since these would
* complicate the stepping code.
@@ -653,12 +692,47 @@
return 0;
}
+/*
+ * Arm64 hardware does not always report a watchpoint hit address that matches
+ * one of the watchpoints set. It can also report an address "near" the
+ * watchpoint if a single instruction access both watched and unwatched
+ * addresses. There is no straight-forward way, short of disassembling the
+ * offending instruction, to map that address back to the watchpoint. This
+ * function computes the distance of the memory access from the watchpoint as a
+ * heuristic for the likelyhood that a given access triggered the watchpoint.
+ *
+ * See Section D2.10.5 "Determining the memory location that caused a Watchpoint
+ * exception" of ARMv8 Architecture Reference Manual for details.
+ *
+ * The function returns the distance of the address from the bytes watched by
+ * the watchpoint. In case of an exact match, it returns 0.
+ */
+static u64 get_distance_from_watchpoint(unsigned long addr, u64 val,
+ struct arch_hw_breakpoint_ctrl *ctrl)
+{
+ u64 wp_low, wp_high;
+ u32 lens, lene;
+
+ lens = __ffs(ctrl->len);
+ lene = __fls(ctrl->len);
+
+ wp_low = val + lens;
+ wp_high = val + lene;
+ if (addr < wp_low)
+ return wp_low - addr;
+ else if (addr > wp_high)
+ return addr - wp_high;
+ else
+ return 0;
+}
+
static int watchpoint_handler(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
- int i, step = 0, *kernel_step, access;
+ int i, step = 0, *kernel_step, access, closest_match = 0;
+ u64 min_dist = -1, dist;
u32 ctrl_reg;
- u64 val, alignment_mask;
+ u64 val;
struct perf_event *wp, **slots;
struct debug_info *debug_info;
struct arch_hw_breakpoint *info;
@@ -667,35 +741,15 @@
slots = this_cpu_ptr(wp_on_reg);
debug_info = ¤t->thread.debug;
+ /*
+ * Find all watchpoints that match the reported address. If no exact
+ * match is found. Attribute the hit to the closest watchpoint.
+ */
+ rcu_read_lock();
for (i = 0; i < core_num_wrps; ++i) {
- rcu_read_lock();
-
wp = slots[i];
-
if (wp == NULL)
- goto unlock;
-
- info = counter_arch_bp(wp);
- /* AArch32 watchpoints are either 4 or 8 bytes aligned. */
- if (is_compat_task()) {
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
- alignment_mask = 0x7;
- else
- alignment_mask = 0x3;
- } else {
- alignment_mask = 0x7;
- }
-
- /* Check if the watchpoint value matches. */
- val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
- if (val != (addr & ~alignment_mask))
- goto unlock;
-
- /* Possible match, check the byte address select to confirm. */
- ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i);
- decode_ctrl_reg(ctrl_reg, &ctrl);
- if (!((1 << (addr & alignment_mask)) & ctrl.len))
- goto unlock;
+ continue;
/*
* Check that the access type matches.
@@ -704,18 +758,41 @@
access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W :
HW_BREAKPOINT_R;
if (!(access & hw_breakpoint_type(wp)))
- goto unlock;
+ continue;
+ /* Check if the watchpoint value and byte select match. */
+ val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
+ ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i);
+ decode_ctrl_reg(ctrl_reg, &ctrl);
+ dist = get_distance_from_watchpoint(addr, val, &ctrl);
+ if (dist < min_dist) {
+ min_dist = dist;
+ closest_match = i;
+ }
+ /* Is this an exact match? */
+ if (dist != 0)
+ continue;
+
+ info = counter_arch_bp(wp);
info->trigger = addr;
perf_bp_event(wp, regs);
/* Do we need to handle the stepping? */
if (!wp->overflow_handler)
step = 1;
-
-unlock:
- rcu_read_unlock();
}
+ if (min_dist > 0 && min_dist != -1) {
+ /* No exact match found. */
+ wp = slots[closest_match];
+ info = counter_arch_bp(wp);
+ info->trigger = addr;
+ perf_bp_event(wp, regs);
+
+ /* Do we need to handle the stepping? */
+ if (!wp->overflow_handler)
+ step = 1;
+ }
+ rcu_read_unlock();
if (!step)
return 0;
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 7b1f45c..da1b32d 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -220,13 +220,13 @@
struct arch_hw_breakpoint_ctrl ctrl,
struct perf_event_attr *attr)
{
- int err, len, type, disabled = !ctrl.enabled;
+ int err, len, type, offset, disabled = !ctrl.enabled;
attr->disabled = disabled;
if (disabled)
return 0;
- err = arch_bp_generic_fields(ctrl, &len, &type);
+ err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
if (err)
return err;
@@ -245,6 +245,7 @@
attr->bp_len = len;
attr->bp_type = type;
+ attr->bp_addr += offset;
return 0;
}
@@ -297,7 +298,7 @@
if (IS_ERR(bp))
return PTR_ERR(bp);
- *addr = bp ? bp->attr.bp_addr : 0;
+ *addr = bp ? counter_arch_bp(bp)->address : 0;
return 0;
}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 85c5235..f220e18 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -45,6 +45,7 @@
#include <linux/of_platform.h>
#include <linux/dma-mapping.h>
#include <linux/efi.h>
+#include <linux/personality.h>
#include <asm/fixmap.h>
#include <asm/cputype.h>
@@ -124,6 +125,19 @@
printk("%s", buf);
}
+struct cpuinfo_arm64 {
+ struct cpu cpu;
+ u32 reg_midr;
+};
+
+static DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
+
+void cpuinfo_store_cpu(void)
+{
+ struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data);
+ info->reg_midr = read_cpuid_id();
+}
+
void __init smp_setup_processor_id(void)
{
/*
@@ -308,6 +322,8 @@
static void __init setup_machine_fdt(phys_addr_t dt_phys)
{
+ cpuinfo_store_cpu();
+
if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) {
early_print("\n"
"Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n"
@@ -434,14 +450,12 @@
}
arch_initcall(arm64_device_init);
-static DEFINE_PER_CPU(struct cpu, cpu_data);
-
static int __init topology_init(void)
{
int i;
for_each_possible_cpu(i) {
- struct cpu *cpu = &per_cpu(cpu_data, i);
+ struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
cpu->hotpluggable = 1;
register_cpu(cpu, i);
}
@@ -462,14 +476,49 @@
NULL
};
+#ifdef CONFIG_COMPAT
+static const char *compat_hwcap_str[] = {
+ "swp",
+ "half",
+ "thumb",
+ "26bit",
+ "fastmult",
+ "fpa",
+ "vfp",
+ "edsp",
+ "java",
+ "iwmmxt",
+ "crunch",
+ "thumbee",
+ "neon",
+ "vfpv3",
+ "vfpv3d16",
+ "tls",
+ "vfpv4",
+ "idiva",
+ "idivt",
+ "vfpd32",
+ "lpae",
+ "evtstrm"
+};
+
+static const char *compat_hwcap2_str[] = {
+ "aes",
+ "pmull",
+ "sha1",
+ "sha2",
+ "crc32",
+ NULL
+};
+#endif /* CONFIG_COMPAT */
static int c_show(struct seq_file *m, void *v)
{
- int i;
-
- seq_printf(m, "Processor\t: %s rev %d (%s)\n",
- cpu_name, read_cpuid_id() & 15, ELF_PLATFORM);
+ int i, j;
for_each_present_cpu(i) {
+ struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
+ u32 midr = cpuinfo->reg_midr;
+
/*
* glibc reads /proc/cpuinfo to determine the number of
* online processors, looking for lines beginning with
@@ -478,30 +527,39 @@
#ifdef CONFIG_SMP
seq_printf(m, "processor\t: %d\n", i);
#endif
+
+ /*
+ * Dump out the common processor features in a single line.
+ * Userspace should read the hwcaps with getauxval(AT_HWCAP)
+ * rather than attempting to parse this, but there's a body of
+ * software which does already (at least for 32-bit).
+ */
+ seq_puts(m, "Features\t:");
+
+ if (personality(current->personality) == PER_LINUX32) {
+#ifdef CONFIG_COMPAT
+ for (j = 0; compat_hwcap_str[j]; j++)
+ if (COMPAT_ELF_HWCAP & (1 << j))
+ seq_printf(m, " %s", compat_hwcap_str[j]);
+
+ for (j = 0; compat_hwcap2_str[j]; j++)
+ if (compat_elf_hwcap2 & (1 << j))
+ seq_printf(m, " %s", compat_hwcap2_str[j]);
+#endif /* CONFIG_COMPAT */
+ } else {
+ for (j = 0; hwcap_str[j]; j++)
+ if (elf_hwcap & (1 << j))
+ seq_printf(m, " %s", hwcap_str[j]);
+ }
+ seq_puts(m, "\n");
+
+ seq_printf(m, "CPU implementer\t: 0x%02x\n", (midr >> 24));
+ seq_printf(m, "CPU architecture: 8\n");
+ seq_printf(m, "CPU variant\t: 0x%x\n", ((midr >> 20) & 0xf));
+ seq_printf(m, "CPU part\t: 0x%03x\n", ((midr >> 4) & 0xfff));
+ seq_printf(m, "CPU revision\t: %d\n\n", (midr & 0xf));
}
- /* dump out the processor features */
- seq_puts(m, "Features\t: ");
-
- for (i = 0; hwcap_str[i]; i++)
- if (elf_hwcap & (1 << i))
- seq_printf(m, "%s ", hwcap_str[i]);
-#ifdef CONFIG_ARMV7_COMPAT_CPUINFO
- if (is_compat_task()) {
- /* Print out the non-optional ARMv8 HW capabilities */
- seq_printf(m, "wp half thumb fastmult vfp edsp neon vfpv3 tlsi ");
- seq_printf(m, "vfpv4 idiva idivt ");
- }
-#endif
-
- seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
- seq_printf(m, "CPU architecture: 8\n");
- seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15);
- seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff);
- seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
-
- seq_puts(m, "\n");
-
if (!arch_read_hardware_id)
seq_printf(m, "Hardware\t: %s\n", machine_name);
else
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index f7829153..797585e 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -162,6 +162,11 @@
cpu_ops[cpu]->cpu_postboot();
/*
+ * Log the CPU info before it is marked online and might get read.
+ */
+ cpuinfo_store_cpu();
+
+ /*
* Enable GIC and timers.
*/
smp_store_cpu_info(cpu);
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 2198961..619344d 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -139,7 +139,7 @@
struct stackframe frame;
const register unsigned long current_sp asm ("sp");
- pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
+ pr_debug("%s(regs = %pP tsk = %pP)\n", __func__, regs, tsk);
if (!tsk)
tsk = current;
@@ -207,7 +207,7 @@
print_modules();
__show_regs(regs);
- pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
+ pr_emerg("Process %.*s (pid: %d, stack limit = 0x%pP)\n",
TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
if (!user_mode(regs) || in_interrupt()) {
diff --git a/build.config b/build.config
index f0c3c7f..796f1b9 100644
--- a/build.config
+++ b/build.config
@@ -4,6 +4,7 @@
DEFCONFIG=bullhead_defconfig
EXTRA_CMDS=''
KERNEL_DIR=private/msm-lge
+POST_DEFCONFIG_CMDS="check_defconfig"
LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin
FILES="
arch/arm64/boot/Image.gz-dtb
diff --git a/drivers/clk/qcom/clock-cpu-8994.c b/drivers/clk/qcom/clock-cpu-8994.c
index 6eb346b..7928f2e 100644
--- a/drivers/clk/qcom/clock-cpu-8994.c
+++ b/drivers/clk/qcom/clock-cpu-8994.c
@@ -987,6 +987,8 @@
case AUX_CLK_SEL:
rate = sys_apcsaux_clk.c.rate;
break;
+ default:
+ return;
};
break;
case PLL0_EARLY_SEL:
@@ -997,6 +999,8 @@
rate = readl_relaxed(base + C0_PLLA_L_VAL);
rate *= xo_ao.c.rate;
break;
+ default:
+ return;
};
/* One regulator */
@@ -1556,7 +1560,7 @@
struct platform_device *apc0_dev, *apc1_dev;
struct device_node *apc0_node, *apc1_node;
unsigned long apc0_fmax, apc1_fmax;
- int cpu, a53_cpu, a57_cpu;
+ int cpu, a53_cpu = 0, a57_cpu = 0;
apc0_node = of_parse_phandle(pdev->dev.of_node, "vdd-a53-supply", 0);
apc1_node = of_parse_phandle(pdev->dev.of_node, "vdd-a57-supply", 0);
@@ -1957,7 +1961,6 @@
u64 pte_efuse;
char a57speedbinstr[] = "qcom,a57-speedbinXX-vXX";
char a53speedbinstr[] = "qcom,a53-speedbinXX-vXX";
-
v2 = msm8994_v2 | msm8992;
a53_pll0_main.c.flags = CLKFLAG_NO_RATE_CACHE;
@@ -1984,7 +1987,7 @@
snprintf(a53speedbinstr, ARRAY_SIZE(a53speedbinstr),
"qcom,a53-speedbin%d-v%d", a53speedbin, pvs_ver);
- } else if (v2)
+ } else
pte_efuse = readl_relaxed(vbases[EFUSE_BASE]);
snprintf(a53speedbinstr, ARRAY_SIZE(a53speedbinstr),
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 8fc3ca2..0e94578 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -574,7 +574,7 @@
static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
unsigned long val, void *data)
{
- int ret, count = 0, i;
+ int ret = 0, count = 0, i;
struct cpufreq_policy *policy = data;
struct cpufreq_frequency_table *table;
unsigned int cpu_num, cpu = policy->cpu;
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index f5fb59a..c728d9d 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2015,2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1024,6 +1024,13 @@
if (drawctxt->base.flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
set_bit(KGSL_FT_DISABLE, &cmdbatch->fault_policy);
+ /*
+ * Set the fault tolerance policy to FT_REPLAY - As context wants
+ * to invalidate it after a replay attempt fails. This doesn't
+ * require to execute the default FT policy.
+ */
+ else if (drawctxt->base.flags & KGSL_CONTEXT_INVALIDATE_ON_FAULT)
+ set_bit(KGSL_FT_REPLAY, &cmdbatch->fault_policy);
else
cmdbatch->fault_policy = adreno_dev->ft_policy;
@@ -1904,7 +1911,7 @@
*
* Process expired commands and send new ones.
*/
-static void adreno_dispatcher_work(struct work_struct *work)
+static void adreno_dispatcher_work(struct kthread_work *work)
{
struct adreno_dispatcher *dispatcher =
container_of(work, struct adreno_dispatcher, work);
@@ -1981,7 +1988,7 @@
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- queue_work(device->work_queue, &dispatcher->work);
+ queue_kthread_work(&kgsl_driver.worker, &dispatcher->work);
}
/**
@@ -2272,7 +2279,7 @@
setup_timer(&dispatcher->fault_timer, adreno_dispatcher_fault_timer,
(unsigned long) adreno_dev);
- INIT_WORK(&dispatcher->work, adreno_dispatcher_work);
+ init_kthread_work(&dispatcher->work, adreno_dispatcher_work);
init_completion(&dispatcher->idle_gate);
complete_all(&dispatcher->idle_gate);
diff --git a/drivers/gpu/msm/adreno_dispatch.h b/drivers/gpu/msm/adreno_dispatch.h
index c2721ad..d763714 100644
--- a/drivers/gpu/msm/adreno_dispatch.h
+++ b/drivers/gpu/msm/adreno_dispatch.h
@@ -72,7 +72,7 @@
atomic_t fault;
struct plist_head pending;
spinlock_t plist_lock;
- struct work_struct work;
+ struct kthread_work work;
struct kobject kobj;
struct completion idle_gate;
};
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index f1b7231..a9107c8 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2015,2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -333,6 +333,7 @@
KGSL_CONTEXT_PER_CONTEXT_TS |
KGSL_CONTEXT_USER_GENERATED_TS |
KGSL_CONTEXT_NO_FAULT_TOLERANCE |
+ KGSL_CONTEXT_INVALIDATE_ON_FAULT |
KGSL_CONTEXT_CTX_SWITCH |
KGSL_CONTEXT_PRIORITY_MASK |
KGSL_CONTEXT_TYPE_MASK |
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 44f981c..87a6b9e 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -4866,6 +4866,8 @@
static int __init kgsl_core_init(void)
{
int result = 0;
+ struct sched_param param = { .sched_priority = 2 };
+
/* alloc major and minor device numbers */
result = alloc_chrdev_region(&kgsl_driver.major, 0, KGSL_DEVICE_MAX,
"kgsl");
@@ -4927,6 +4929,18 @@
kgsl_mmu_set_mmutype(ksgl_mmu_type);
+ init_kthread_worker(&kgsl_driver.worker);
+
+ kgsl_driver.worker_thread = kthread_run(kthread_worker_fn,
+ &kgsl_driver.worker, "kgsl_worker_thread");
+
+ if (IS_ERR(kgsl_driver.worker_thread)) {
+ pr_err("unable to start kgsl thread\n");
+ goto err;
+ }
+
+ sched_setscheduler(kgsl_driver.worker_thread, SCHED_FIFO, ¶m);
+
kgsl_events_init();
/* create the memobjs kmem cache */
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index cb8b388..e4e53e3 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -25,6 +25,7 @@
#include <linux/regulator/consumer.h>
#include <linux/mm.h>
#include <linux/dma-attrs.h>
+#include <linux/kthread.h>
/* The number of memstore arrays limits the number of contexts allowed.
* If more contexts are needed, update multiple for MEMSTORE_SIZE
@@ -95,6 +96,8 @@
unsigned int mapped_max;
} stats;
unsigned int full_cache_threshold;
+ struct kthread_worker worker;
+ struct task_struct *worker_thread;
};
extern struct kgsl_driver kgsl_driver;
@@ -223,7 +226,7 @@
void *priv;
struct list_head node;
unsigned int created;
- struct work_struct work;
+ struct kthread_work work;
int result;
struct kgsl_event_group *group;
};
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index ab03192..424998a 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2015,2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -87,6 +87,7 @@
{ KGSL_CONTEXT_PER_CONTEXT_TS, "PER_CONTEXT_TS" }, \
{ KGSL_CONTEXT_USER_GENERATED_TS, "USER_TS" }, \
{ KGSL_CONTEXT_NO_FAULT_TOLERANCE, "NO_FT" }, \
+ { KGSL_CONTEXT_INVALIDATE_ON_FAULT, "INVALIDATE_ON_FAULT" }, \
{ KGSL_CONTEXT_PWR_CONSTRAINT, "PWR" }, \
{ KGSL_CONTEXT_SAVE_GMEM, "SAVE_GMEM" }
diff --git a/drivers/gpu/msm/kgsl_events.c b/drivers/gpu/msm/kgsl_events.c
index 81fcbe6..f4bca88 100644
--- a/drivers/gpu/msm/kgsl_events.c
+++ b/drivers/gpu/msm/kgsl_events.c
@@ -32,7 +32,7 @@
{
list_del(&event->node);
event->result = result;
- queue_work(device->events_wq, &event->work);
+ queue_kthread_work(&kgsl_driver.worker, &event->work);
}
/**
@@ -42,7 +42,7 @@
* Each event callback has its own work struct and is run on a event specific
* workqeuue. This is the worker that queues up the event callback function.
*/
-static void _kgsl_event_worker(struct work_struct *work)
+static void _kgsl_event_worker(struct kthread_work *work)
{
struct kgsl_event *event = container_of(work, struct kgsl_event, work);
int id = KGSL_CONTEXT_ID(event->context);
@@ -261,7 +261,7 @@
event->created = jiffies;
event->group = group;
- INIT_WORK(&event->work, _kgsl_event_worker);
+ init_kthread_work(&event->work, _kgsl_event_worker);
trace_kgsl_register_event(KGSL_CONTEXT_ID(context), timestamp, func);
@@ -276,7 +276,7 @@
if (timestamp_cmp(retired, timestamp) >= 0) {
event->result = KGSL_EVENT_RETIRED;
- queue_work(device->events_wq, &event->work);
+ queue_kthread_work(&kgsl_driver.worker, &event->work);
spin_unlock(&group->lock);
return 0;
}
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 0bb3bb8..9b26c23 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -28,6 +28,8 @@
#define UHID_NAME "uhid"
#define UHID_BUFSIZE 32
+static DEFINE_MUTEX(uhid_open_mutex);
+
struct uhid_device {
struct mutex devlock;
bool running;
@@ -105,15 +107,26 @@
static int uhid_hid_open(struct hid_device *hid)
{
struct uhid_device *uhid = hid->driver_data;
+ int retval = 0;
- return uhid_queue_event(uhid, UHID_OPEN);
+ mutex_lock(&uhid_open_mutex);
+ if (!hid->open++) {
+ retval = uhid_queue_event(uhid, UHID_OPEN);
+ if (retval)
+ hid->open--;
+ }
+ mutex_unlock(&uhid_open_mutex);
+ return retval;
}
static void uhid_hid_close(struct hid_device *hid)
{
struct uhid_device *uhid = hid->driver_data;
- uhid_queue_event(uhid, UHID_CLOSE);
+ mutex_lock(&uhid_open_mutex);
+ if (!--hid->open)
+ uhid_queue_event(uhid, UHID_CLOSE);
+ mutex_unlock(&uhid_open_mutex);
}
static int uhid_hid_input(struct input_dev *input, unsigned int type,
diff --git a/drivers/input/misc/vl6180/stmvl6180_module-cci.c b/drivers/input/misc/vl6180/stmvl6180_module-cci.c
index e33d168..e0855d0 100644
--- a/drivers/input/misc/vl6180/stmvl6180_module-cci.c
+++ b/drivers/input/misc/vl6180/stmvl6180_module-cci.c
@@ -119,11 +119,11 @@
vl6180_errmsg("failed %d\n", __LINE__);
return rc;
}
- }
- vl6180_dbgmsg("vreg-name: %s min_volt: %d max_volt: %d",
+ vl6180_dbgmsg("vreg-name: %s min_volt: %d max_volt: %d",
vreg_cfg->cam_vreg->reg_name,
vreg_cfg->cam_vreg->min_voltage,
vreg_cfg->cam_vreg->max_voltage);
+ }
data->en_gpio = of_get_named_gpio(of_node,
"stmvl6180,ldaf-en-gpio",0);
diff --git a/drivers/input/touchscreen/synaptics_fw_update.c b/drivers/input/touchscreen/synaptics_fw_update.c
index 360e455..ade1404 100644
--- a/drivers/input/touchscreen/synaptics_fw_update.c
+++ b/drivers/input/touchscreen/synaptics_fw_update.c
@@ -2474,7 +2474,7 @@
fwu->ts_info = kzalloc(RMI4_INFO_MAX_LEN, GFP_KERNEL);
if (!fwu->ts_info) {
dev_err(&rmi4_data->i2c_client->dev, "Not enough memory\n");
- goto exit_free_ts_info;
+ goto exit_free_mem;
}
synaptics_rmi4_update_debug_info();
@@ -2519,7 +2519,6 @@
}
return 0;
-exit_free_ts_info:
debugfs_remove(temp);
exit_remove_attrs:
for (attr_count--; attr_count >= 0; attr_count--) {
diff --git a/drivers/leds/leds-qpnp-flash.c b/drivers/leds/leds-qpnp-flash.c
index 1094413..b4da6fb 100644
--- a/drivers/leds/leds-qpnp-flash.c
+++ b/drivers/leds/leds-qpnp-flash.c
@@ -390,15 +390,16 @@
flash_node = container_of(led_cdev, struct flash_node_data, cdev);
led = dev_get_drvdata(&flash_node->spmi_dev->dev);
- if (led->pdata->power_detect_en) {
- max_curr_avail_ma =
- qpnp_flash_led_get_max_avail_current(flash_node, led);
+ if (!led->pdata->power_detect_en)
+ return -EINVAL;
- if (max_curr_avail_ma < 0)
- return -EINVAL;
- else
- max_curr_avail_ma = (int)flash_node->max_current;
- }
+ max_curr_avail_ma =
+ qpnp_flash_led_get_max_avail_current(flash_node, led);
+
+ if (max_curr_avail_ma < 0)
+ return -EINVAL;
+ else
+ max_curr_avail_ma = (int)flash_node->max_current;
return snprintf(buf, PAGE_SIZE, "%u\n", max_curr_avail_ma);
}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
index 5417006..ac3c80b 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
@@ -1329,7 +1329,7 @@
uint32_t debug_start_addr = 0;
uint32_t debug_end_addr = 0;
uint32_t debug_frame_id = 0;
- enum msm_isp_buffer_state debug_state;
+ enum msm_isp_buffer_state debug_state = 0;
unsigned long flags;
struct msm_isp_bufq *bufq = NULL;
diff --git a/drivers/media/platform/msm/vidc/msm_smem.c b/drivers/media/platform/msm/vidc/msm_smem.c
index 9469fb2..ff407ca 100644
--- a/drivers/media/platform/msm/vidc/msm_smem.c
+++ b/drivers/media/platform/msm/vidc/msm_smem.c
@@ -338,7 +338,7 @@
}
handle = ion_import_dma_buf(client->clnt, fd);
ret = handle == priv;
- handle ? ion_free(client->clnt, handle) : 0;
+ (!IS_ERR_OR_NULL(handle)) ? ion_free(client->clnt, handle) : 0;
return ret;
}
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 78c060a..bda711e 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -336,4 +336,5 @@
source "drivers/net/wireless/zd1211rw/Kconfig"
source "drivers/net/wireless/mwifiex/Kconfig"
source "drivers/net/wireless/cnss/Kconfig"
+source "drivers/net/wireless/cnss_genl/Kconfig"
endif # WLAN
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 703f3aa..fb4e696 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -62,3 +62,4 @@
obj-$(CONFIG_WCNSS_CORE) += wcnss/
obj-$(CONFIG_CNSS) += cnss/
obj-$(CONFIG_WCNSS_MEM_PRE_ALLOC) += cnss_prealloc/
+obj-$(CONFIG_CNSS_GENL) += cnss_genl/
diff --git a/drivers/net/wireless/cnss_genl/Kconfig b/drivers/net/wireless/cnss_genl/Kconfig
new file mode 100644
index 0000000..f1b8a58
--- /dev/null
+++ b/drivers/net/wireless/cnss_genl/Kconfig
@@ -0,0 +1,7 @@
+config CNSS_GENL
+ tristate "CNSS Generic Netlink Socket Driver"
+ ---help---
+ This module creates generic netlink family "CLD80211". This can be
+ used by cld driver and userspace utilities to communicate over
+ netlink sockets. This module creates different multicast groups to
+ facilitate the same.
diff --git a/drivers/net/wireless/cnss_genl/Makefile b/drivers/net/wireless/cnss_genl/Makefile
new file mode 100644
index 0000000..9431c9e
--- /dev/null
+++ b/drivers/net/wireless/cnss_genl/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CNSS_GENL) := cnss_nl.o
diff --git a/drivers/net/wireless/cnss_genl/cnss_nl.c b/drivers/net/wireless/cnss_genl/cnss_nl.c
new file mode 100644
index 0000000..78c885e
--- /dev/null
+++ b/drivers/net/wireless/cnss_genl/cnss_nl.c
@@ -0,0 +1,221 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <net/genetlink.h>
+#include <net/cnss_nl.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+#define CLD80211_GENL_NAME "cld80211"
+
+#define CLD80211_MULTICAST_GROUP_SVC_MSGS "svc_msgs"
+#define CLD80211_MULTICAST_GROUP_HOST_LOGS "host_logs"
+#define CLD80211_MULTICAST_GROUP_FW_LOGS "fw_logs"
+#define CLD80211_MULTICAST_GROUP_PER_PKT_STATS "per_pkt_stats"
+#define CLD80211_MULTICAST_GROUP_DIAG_EVENTS "diag_events"
+#define CLD80211_MULTICAST_GROUP_FATAL_EVENTS "fatal_events"
+#define CLD80211_MULTICAST_GROUP_OEM_MSGS "oem_msgs"
+
+static struct genl_multicast_group nl_mcgrps[] = {
+ [CLD80211_MCGRP_SVC_MSGS] = { .name =
+ CLD80211_MULTICAST_GROUP_SVC_MSGS},
+ [CLD80211_MCGRP_HOST_LOGS] = { .name =
+ CLD80211_MULTICAST_GROUP_HOST_LOGS},
+ [CLD80211_MCGRP_FW_LOGS] = { .name =
+ CLD80211_MULTICAST_GROUP_FW_LOGS},
+ [CLD80211_MCGRP_PER_PKT_STATS] = { .name =
+ CLD80211_MULTICAST_GROUP_PER_PKT_STATS},
+ [CLD80211_MCGRP_DIAG_EVENTS] = { .name =
+ CLD80211_MULTICAST_GROUP_DIAG_EVENTS},
+ [CLD80211_MCGRP_FATAL_EVENTS] = { .name =
+ CLD80211_MULTICAST_GROUP_FATAL_EVENTS},
+ [CLD80211_MCGRP_OEM_MSGS] = { .name =
+ CLD80211_MULTICAST_GROUP_OEM_MSGS},
+};
+
+struct cld_ops {
+ cld80211_cb cb;
+ void *cb_ctx;
+};
+
+struct cld80211_nl_data {
+ struct cld_ops cld_ops[CLD80211_MAX_COMMANDS];
+};
+
+static struct cld80211_nl_data nl_data;
+
+static inline struct cld80211_nl_data *get_local_ctx(void)
+{
+ return &nl_data;
+}
+
+static struct genl_ops nl_ops[CLD80211_MAX_COMMANDS];
+
+/* policy for the attributes */
+static const struct nla_policy cld80211_policy[CLD80211_ATTR_MAX+1] = {
+ [CLD80211_ATTR_VENDOR_DATA] = { .type = NLA_NESTED },
+ [CLD80211_ATTR_DATA] = { .type = NLA_BINARY,
+ .len = CLD80211_MAX_NL_DATA },
+};
+
+static int cld80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb,
+ struct genl_info *info)
+{
+ u8 cmd_id = ops->cmd;
+ struct cld80211_nl_data *nl = get_local_ctx();
+
+ if (cmd_id < 1 || cmd_id > CLD80211_MAX_COMMANDS) {
+ pr_err("CLD80211: Command Not supported: %u\n", cmd_id);
+ return -EOPNOTSUPP;
+ }
+ info->user_ptr[0] = nl->cld_ops[cmd_id - 1].cb;
+ info->user_ptr[1] = nl->cld_ops[cmd_id - 1].cb_ctx;
+
+ return 0;
+}
+
+/* The netlink family */
+static struct genl_family cld80211_fam = {
+ .id = GENL_ID_GENERATE,
+ .name = CLD80211_GENL_NAME,
+ .hdrsize = 0, /* no private header */
+ .version = 1, /* no particular meaning now */
+ .maxattr = CLD80211_ATTR_MAX,
+ .netnsok = true,
+ .pre_doit = cld80211_pre_doit,
+ .post_doit = NULL,
+};
+
+int register_cld_cmd_cb(u8 cmd_id, cld80211_cb func, void *cb_ctx)
+{
+ struct cld80211_nl_data *nl = get_local_ctx();
+
+ pr_debug("CLD80211: Registering command: %d\n", cmd_id);
+ if (!cmd_id || cmd_id > CLD80211_MAX_COMMANDS) {
+ pr_debug("CLD80211: invalid command: %d\n", cmd_id);
+ return -EINVAL;
+ }
+
+ nl->cld_ops[cmd_id - 1].cb = func;
+ nl->cld_ops[cmd_id - 1].cb_ctx = cb_ctx;
+
+ return 0;
+}
+EXPORT_SYMBOL(register_cld_cmd_cb);
+
+int deregister_cld_cmd_cb(u8 cmd_id)
+{
+ struct cld80211_nl_data *nl = get_local_ctx();
+
+ pr_debug("CLD80211: De-registering command: %d\n", cmd_id);
+ if (!cmd_id || cmd_id > CLD80211_MAX_COMMANDS) {
+ pr_debug("CLD80211: invalid command: %d\n", cmd_id);
+ return -EINVAL;
+ }
+
+ nl->cld_ops[cmd_id - 1].cb = NULL;
+ nl->cld_ops[cmd_id - 1].cb_ctx = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL(deregister_cld_cmd_cb);
+
+struct genl_family *cld80211_get_genl_family(void)
+{
+ return &cld80211_fam;
+}
+EXPORT_SYMBOL(cld80211_get_genl_family);
+
+int cld80211_get_mcgrp_id(enum cld80211_multicast_groups groupid)
+{
+ if (groupid > ARRAY_SIZE(nl_ops))
+ return -1;
+ return nl_mcgrps[groupid].id;
+}
+EXPORT_SYMBOL(cld80211_get_mcgrp_id);
+
+static int cld80211_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ cld80211_cb cld_cb;
+ void *cld_ctx;
+
+ cld_cb = info->user_ptr[0];
+
+ if (cld_cb == NULL) {
+ pr_err("CLD80211: Not supported\n");
+ return -EOPNOTSUPP;
+ }
+ cld_ctx = info->user_ptr[1];
+
+ if (info->attrs[CLD80211_ATTR_VENDOR_DATA]) {
+ cld_cb(nla_data(info->attrs[CLD80211_ATTR_VENDOR_DATA]),
+ nla_len(info->attrs[CLD80211_ATTR_VENDOR_DATA]),
+ cld_ctx, info->snd_portid);
+ } else {
+ pr_err("CLD80211: No CLD80211_ATTR_VENDOR_DATA\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int __cld80211_init(void)
+{
+ int err, i;
+
+ memset(&nl_ops[0], 0, sizeof(nl_ops));
+
+ pr_info("CLD80211: Initializing\n");
+ for (i = 0; i < CLD80211_MAX_COMMANDS; i++) {
+ nl_ops[i].cmd = i + 1;
+ nl_ops[i].doit = cld80211_doit;
+ nl_ops[i].policy = cld80211_policy;
+ }
+
+ err = genl_register_family_with_ops(&cld80211_fam, nl_ops,
+ ARRAY_SIZE(nl_ops));
+ if (err) {
+ pr_err("CLD80211: Failed to register cld80211 family: %d\n",
+ err);
+ return err;
+ }
+ for (i = 0; i < ARRAY_SIZE(nl_mcgrps); i++) {
+ err = genl_register_mc_group(&cld80211_fam, &nl_mcgrps[i]);
+ if (err)
+ goto err_out;
+ }
+
+ return 0;
+ err_out:
+ genl_unregister_family(&cld80211_fam);
+ return err;
+}
+
+static void __cld80211_exit(void)
+{
+ genl_unregister_family(&cld80211_fam);
+}
+
+static int __init cld80211_init(void)
+{
+ return __cld80211_init();
+}
+
+static void __exit cld80211_exit(void)
+{
+ __cld80211_exit();
+}
+
+module_init(cld80211_init);
+module_exit(cld80211_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CNSS generic netlink module");
diff --git a/drivers/platform/msm/ipa/ipa.c b/drivers/platform/msm/ipa/ipa.c
index 1985b35..f7889e6 100644
--- a/drivers/platform/msm/ipa/ipa.c
+++ b/drivers/platform/msm/ipa/ipa.c
@@ -1431,7 +1431,7 @@
u32 *entry;
mem.base = dma_alloc_coherent(ipa_ctx->pdev, 4, &mem.phys_base,
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!mem.base) {
IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
return -ENOMEM;
diff --git a/drivers/power/qcom/debug_core.c b/drivers/power/qcom/debug_core.c
index 70574ff..60cc819 100644
--- a/drivers/power/qcom/debug_core.c
+++ b/drivers/power/qcom/debug_core.c
@@ -290,7 +290,7 @@
msm_core_data = get_cpu_pwr_stats();
if (!msm_core_data)
- goto fail;
+ return PTR_ERR(NULL);
dir = debugfs_create_dir("msm_core", NULL);
if (IS_ERR_OR_NULL(dir))
diff --git a/drivers/power/qcom/msm-core.c b/drivers/power/qcom/msm-core.c
index 5e23c19..93248b9 100644
--- a/drivers/power/qcom/msm-core.c
+++ b/drivers/power/qcom/msm-core.c
@@ -347,11 +347,11 @@
{
int i;
int ret;
- int cpu;
struct cpu_activity_info *node;
struct cpu_static_info *sp, *clear_sp;
int mpidr = (argp->cluster << 8);
int cpumask = argp->cpumask;
+ int cpu = num_possible_cpus();
pr_debug("cpumask %d, cluster: %d\n", argp->cpumask, argp->cluster);
for (i = 0; i < MAX_CORES_PER_CLUSTER; i++, cpumask >>= 1) {
diff --git a/drivers/power/qpnp-fg.c b/drivers/power/qpnp-fg.c
index 4cac02a..e219e6a 100644
--- a/drivers/power/qpnp-fg.c
+++ b/drivers/power/qpnp-fg.c
@@ -1900,9 +1900,7 @@
}
battery_soc = get_battery_soc_raw(chip) * 100 / FULL_PERCENT_3B;
- if (rc) {
- goto error_done;
- } else if (battery_soc < 25 || battery_soc > 75) {
+ if (battery_soc < 25 || battery_soc > 75) {
if (fg_debug_mask & FG_AGING)
pr_info("Battery SoC (%d) out of range, aborting\n",
(int)battery_soc);
@@ -3487,6 +3485,7 @@
bool tried_again = false, vbat_in_range, profiles_same;
u8 reg = 0;
bool use_single_profile = 0;
+ old_batt_type = default_batt_type;
wait:
fg_stay_awake(&chip->profile_wakeup_source);
@@ -3520,7 +3519,6 @@
"bms", fg_batt_type);
if (!profile_node) {
pr_err("couldn't find profile handle\n");
- old_batt_type = default_batt_type;
rc = -ENODATA;
goto fail;
}
@@ -4951,7 +4949,6 @@
static int fg_setup_memif_offset(struct fg_chip *chip)
{
int rc;
- u8 dig_major;
rc = fg_read(chip, chip->revision, chip->mem_base + DIG_MINOR, 4);
if (rc) {
@@ -4965,7 +4962,7 @@
chip->offset = offset[0].address;
break;
default:
- pr_err("Digital Major rev=%d not supported\n", dig_major);
+ pr_err("Digital Major rev=%d not supported\n", chip->revision[DIG_MAJOR]);
return -EINVAL;
}
diff --git a/drivers/power/qpnp-smbcharger.c b/drivers/power/qpnp-smbcharger.c
index e5f6ac0..d517e17 100644
--- a/drivers/power/qpnp-smbcharger.c
+++ b/drivers/power/qpnp-smbcharger.c
@@ -3584,10 +3584,10 @@
atomic_set(&chip->discharge_while_plugged_event_count, 0);
}
discharge_while_plugged_check_next:
-discharge_while_plugged_check_error:
pr_smb(PR_MISC, "count:%d sf:%d ci:%d ct:%d vd:%d\n",
atomic_read(&chip->discharge_while_plugged_event_count),
status_full, chg_inhibit, chg_type, valid_chg_disabled);
+discharge_while_plugged_check_error:
schedule_delayed_work(&chip->discharge_while_plugged_work,
msecs_to_jiffies(DISCHARGE_WHILE_PLUGGED_LOOP_TIME_MS));
}
@@ -6161,6 +6161,8 @@
rc);
return rc;
}
+ } else {
+ vadc_dev = NULL;
}
chip = devm_kzalloc(&spmi->dev, sizeof(*chip), GFP_KERNEL);
diff --git a/drivers/power/smb1351-charger.c b/drivers/power/smb1351-charger.c
index 78a3843..d4a71d7 100644
--- a/drivers/power/smb1351-charger.c
+++ b/drivers/power/smb1351-charger.c
@@ -1575,7 +1575,7 @@
static void smb1351_chg_adc_notification(enum qpnp_tm_state state, void *ctx)
{
struct smb1351_charger *chip = ctx;
- struct battery_status *cur;
+ struct battery_status *cur = NULL;
int temp;
if (state >= ADC_TM_STATE_NUM) {
@@ -1675,6 +1675,11 @@
}
}
+ if (!cur) {
+ pr_err("invalid transaction: state %d, temp %d\n", state, temp);
+ return;
+ }
+
if (cur->batt_present)
chip->battery_missing = false;
else
diff --git a/drivers/soc/qcom/npa-dump.c b/drivers/soc/qcom/npa-dump.c
index 2b8cb3c..ef343f8 100644
--- a/drivers/soc/qcom/npa-dump.c
+++ b/drivers/soc/qcom/npa-dump.c
@@ -155,7 +155,7 @@
struct resource *res;
void __iomem *npa_base, *rpm_base;
struct dentry *dent;
- int ret;
+ int ret = 0;
/* Get the location of the NPA log's start address offset */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 62a6df9..e191f32 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -26,6 +26,18 @@
Enable to support an old 32-bit Android user-space. Breaks the new
Android user-space.
+config ANDROID_BINDER_DEVICES
+ string "Android Binder devices"
+ depends on ANDROID_BINDER_IPC
+ default "binder"
+ ---help---
+ Default value for the binder.devices parameter.
+
+ The binder.devices parameter is a comma-separated list of strings
+ that specifies the names of the binder device nodes that will be
+ created. Each binder device has its own context manager, and is
+ therefore logically separated from the other devices.
+
config ASHMEM
bool "Enable the Anonymous Shared Memory Subsystem"
default n
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index 7340ef7..0c757b7 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -46,14 +46,13 @@
static DEFINE_MUTEX(binder_deferred_lock);
static DEFINE_MUTEX(binder_mmap_lock);
+static HLIST_HEAD(binder_devices);
static HLIST_HEAD(binder_procs);
static HLIST_HEAD(binder_deferred_list);
static HLIST_HEAD(binder_dead_nodes);
static struct dentry *binder_debugfs_dir_entry_root;
static struct dentry *binder_debugfs_dir_entry_proc;
-static struct binder_node *binder_context_mgr_node;
-static kuid_t binder_context_mgr_uid = INVALID_UID;
static int binder_last_id;
static struct workqueue_struct *binder_deferred_workqueue;
@@ -112,6 +111,9 @@
static bool binder_debug_no_lock;
module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
+static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
+module_param_named(devices, binder_devices_param, charp, S_IRUGO);
+
static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
static int binder_stop_on_user_error;
@@ -141,6 +143,17 @@
binder_stop_on_user_error = 2; \
} while (0)
+#define to_flat_binder_object(hdr) \
+ container_of(hdr, struct flat_binder_object, hdr)
+
+#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
+
+#define to_binder_buffer_object(hdr) \
+ container_of(hdr, struct binder_buffer_object, hdr)
+
+#define to_binder_fd_array_object(hdr) \
+ container_of(hdr, struct binder_fd_array_object, hdr)
+
enum binder_stat_types {
BINDER_STAT_PROC,
BINDER_STAT_THREAD,
@@ -154,7 +167,7 @@
struct binder_stats {
int br[_IOC_NR(BR_FAILED_REPLY) + 1];
- int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
+ int bc[_IOC_NR(BC_REPLY_SG) + 1];
int obj_created[BINDER_STAT_COUNT];
int obj_deleted[BINDER_STAT_COUNT];
};
@@ -182,6 +195,7 @@
int to_node;
int data_size;
int offsets_size;
+ const char *context_name;
};
struct binder_transaction_log {
int next;
@@ -205,6 +219,18 @@
return e;
}
+struct binder_context {
+ struct binder_node *binder_context_mgr_node;
+ kuid_t binder_context_mgr_uid;
+ const char *name;
+};
+
+struct binder_device {
+ struct hlist_node hlist;
+ struct miscdevice miscdev;
+ struct binder_context context;
+};
+
struct binder_work {
struct list_head entry;
enum {
@@ -277,6 +303,7 @@
struct binder_node *target_node;
size_t data_size;
size_t offsets_size;
+ size_t extra_buffers_size;
uint8_t data[0];
};
@@ -320,6 +347,7 @@
int ready_threads;
long default_priority;
struct dentry *debugfs_entry;
+ struct binder_context *context;
};
enum {
@@ -715,7 +743,9 @@
static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
size_t data_size,
- size_t offsets_size, int is_async)
+ size_t offsets_size,
+ size_t extra_buffers_size,
+ int is_async)
{
struct rb_node *n = proc->free_buffers.rb_node;
struct binder_buffer *buffer;
@@ -723,7 +753,7 @@
struct rb_node *best_fit = NULL;
void *has_page_addr;
void *end_page_addr;
- size_t size;
+ size_t size, data_offsets_size;
if (proc->vma == NULL) {
pr_err("%d: binder_alloc_buf, no vma\n",
@@ -731,15 +761,20 @@
return NULL;
}
- size = ALIGN(data_size, sizeof(void *)) +
+ data_offsets_size = ALIGN(data_size, sizeof(void *)) +
ALIGN(offsets_size, sizeof(void *));
- if (size < data_size || size < offsets_size) {
+ if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
proc->pid, data_size, offsets_size);
return NULL;
}
-
+ size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
+ if (size < data_offsets_size || size < extra_buffers_size) {
+ binder_user_error("%d: got transaction with invalid extra_buffers_size %zd\n",
+ proc->pid, extra_buffers_size);
+ return NULL;
+ }
if (is_async &&
proc->free_async_space < size + sizeof(struct binder_buffer)) {
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
@@ -807,6 +842,7 @@
proc->pid, size, buffer);
buffer->data_size = data_size;
buffer->offsets_size = offsets_size;
+ buffer->extra_buffers_size = extra_buffers_size;
buffer->async_transaction = is_async;
if (is_async) {
proc->free_async_space -= size + sizeof(struct binder_buffer);
@@ -881,7 +917,8 @@
buffer_size = binder_buffer_size(proc, buffer);
size = ALIGN(buffer->data_size, sizeof(void *)) +
- ALIGN(buffer->offsets_size, sizeof(void *));
+ ALIGN(buffer->offsets_size, sizeof(void *)) +
+ ALIGN(buffer->extra_buffers_size, sizeof(void *));
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_free_buf %pK size %zd buffer_size %zd\n",
@@ -993,8 +1030,10 @@
if (internal) {
if (target_list == NULL &&
node->internal_strong_refs == 0 &&
- !(node == binder_context_mgr_node &&
- node->has_strong_ref)) {
+ !(node->proc &&
+ node == node->proc->context->
+ binder_context_mgr_node &&
+ node->has_strong_ref)) {
pr_err("invalid inc strong node for %d\n",
node->debug_id);
return -EINVAL;
@@ -1095,6 +1134,7 @@
struct rb_node **p = &proc->refs_by_node.rb_node;
struct rb_node *parent = NULL;
struct binder_ref *ref, *new_ref;
+ struct binder_context *context = proc->context;
while (*p) {
parent = *p;
@@ -1117,7 +1157,7 @@
rb_link_node(&new_ref->rb_node_node, parent, p);
rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
- new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
+ new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1;
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
if (ref->desc > new_ref->desc)
@@ -1303,11 +1343,158 @@
}
}
+/**
+ * binder_validate_object() - checks for a valid metadata object in a buffer.
+ * @buffer: binder_buffer that we're parsing.
+ * @offset: offset in the buffer at which to validate an object.
+ *
+ * Return: If there's a valid metadata object at @offset in @buffer, the
+ * size of that object. Otherwise, it returns zero.
+ */
+static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
+{
+ /* Check if we can read a header first */
+ struct binder_object_header *hdr;
+ size_t object_size = 0;
+
+ if (offset > buffer->data_size - sizeof(*hdr) ||
+ buffer->data_size < sizeof(*hdr) ||
+ !IS_ALIGNED(offset, sizeof(u32)))
+ return 0;
+
+ /* Ok, now see if we can read a complete object. */
+ hdr = (struct binder_object_header *)(buffer->data + offset);
+ switch (hdr->type) {
+ case BINDER_TYPE_BINDER:
+ case BINDER_TYPE_WEAK_BINDER:
+ case BINDER_TYPE_HANDLE:
+ case BINDER_TYPE_WEAK_HANDLE:
+ object_size = sizeof(struct flat_binder_object);
+ break;
+ case BINDER_TYPE_FD:
+ object_size = sizeof(struct binder_fd_object);
+ break;
+ case BINDER_TYPE_PTR:
+ object_size = sizeof(struct binder_buffer_object);
+ break;
+ case BINDER_TYPE_FDA:
+ object_size = sizeof(struct binder_fd_array_object);
+ break;
+ default:
+ return 0;
+ }
+ if (offset <= buffer->data_size - object_size &&
+ buffer->data_size >= object_size)
+ return object_size;
+ else
+ return 0;
+}
+
+/**
+ * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
+ * @b: binder_buffer containing the object
+ * @index: index in offset array at which the binder_buffer_object is
+ * located
+ * @start: points to the start of the offset array
+ * @num_valid: the number of valid offsets in the offset array
+ *
+ * Return: If @index is within the valid range of the offset array
+ * described by @start and @num_valid, and if there's a valid
+ * binder_buffer_object at the offset found in index @index
+ * of the offset array, that object is returned. Otherwise,
+ * %NULL is returned.
+ * Note that the offset found in index @index itself is not
+ * verified; this function assumes that @num_valid elements
+ * from @start were previously verified to have valid offsets.
+ */
+static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
+ binder_size_t index,
+ binder_size_t *start,
+ binder_size_t num_valid)
+{
+ struct binder_buffer_object *buffer_obj;
+ binder_size_t *offp;
+
+ if (index >= num_valid)
+ return NULL;
+
+ offp = start + index;
+ buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
+ if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
+ return NULL;
+
+ return buffer_obj;
+}
+
+/**
+ * binder_validate_fixup() - validates pointer/fd fixups happen in order.
+ * @b: transaction buffer
+ * @objects_start start of objects buffer
+ * @buffer: binder_buffer_object in which to fix up
+ * @offset: start offset in @buffer to fix up
+ * @last_obj: last binder_buffer_object that we fixed up in
+ * @last_min_offset: minimum fixup offset in @last_obj
+ *
+ * Return: %true if a fixup in buffer @buffer at offset @offset is
+ * allowed.
+ *
+ * For safety reasons, we only allow fixups inside a buffer to happen
+ * at increasing offsets; additionally, we only allow fixup on the last
+ * buffer object that was verified, or one of its parents.
+ *
+ * Example of what is allowed:
+ *
+ * A
+ * B (parent = A, offset = 0)
+ * C (parent = A, offset = 16)
+ * D (parent = C, offset = 0)
+ * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
+ *
+ * Examples of what is not allowed:
+ *
+ * Decreasing offsets within the same parent:
+ * A
+ * C (parent = A, offset = 16)
+ * B (parent = A, offset = 0) // decreasing offset within A
+ *
+ * Referring to a parent that wasn't the last object or any of its parents:
+ * A
+ * B (parent = A, offset = 0)
+ * C (parent = A, offset = 0)
+ * C (parent = A, offset = 16)
+ * D (parent = B, offset = 0) // B is not A or any of A's parents
+ */
+static bool binder_validate_fixup(struct binder_buffer *b,
+ binder_size_t *objects_start,
+ struct binder_buffer_object *buffer,
+ binder_size_t fixup_offset,
+ struct binder_buffer_object *last_obj,
+ binder_size_t last_min_offset)
+{
+ if (!last_obj) {
+ /* Nothing to fix up in */
+ return false;
+ }
+
+ while (last_obj != buffer) {
+ /*
+ * Safe to retrieve the parent of last_obj, since it
+ * was already previously verified by the driver.
+ */
+ if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
+ return false;
+ last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
+ last_obj = (struct binder_buffer_object *)
+ (b->data + *(objects_start + last_obj->parent));
+ }
+ return (fixup_offset >= last_min_offset);
+}
+
static void binder_transaction_buffer_release(struct binder_proc *proc,
struct binder_buffer *buffer,
binder_size_t *failed_at)
{
- binder_size_t *offp, *off_end;
+ binder_size_t *offp, *off_start, *off_end;
int debug_id = buffer->debug_id;
binder_debug(BINDER_DEBUG_TRANSACTION,
@@ -1318,26 +1505,30 @@
if (buffer->target_node)
binder_dec_node(buffer->target_node, 1, 0);
- offp = (binder_size_t *)(buffer->data +
- ALIGN(buffer->data_size, sizeof(void *)));
+ off_start = (binder_size_t *)(buffer->data +
+ ALIGN(buffer->data_size, sizeof(void *)));
if (failed_at)
off_end = failed_at;
else
- off_end = (void *)offp + buffer->offsets_size;
- for (; offp < off_end; offp++) {
- struct flat_binder_object *fp;
- if (*offp > buffer->data_size - sizeof(*fp) ||
- buffer->data_size < sizeof(*fp) ||
- !IS_ALIGNED(*offp, sizeof(u32))) {
- pr_err("transaction release %d bad offset %lld, size %zd\n",
+ off_end = (void *)off_start + buffer->offsets_size;
+ for (offp = off_start; offp < off_end; offp++) {
+ struct binder_object_header *hdr;
+ size_t object_size = binder_validate_object(buffer, *offp);
+
+ if (object_size == 0) {
+ pr_err("transaction release %d bad object at offset %lld, size %zd\n",
debug_id, (u64)*offp, buffer->data_size);
continue;
}
- fp = (struct flat_binder_object *)(buffer->data + *offp);
- switch (fp->type) {
+ hdr = (struct binder_object_header *)(buffer->data + *offp);
+ switch (hdr->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
- struct binder_node *node = binder_get_node(proc, fp->binder);
+ struct flat_binder_object *fp;
+ struct binder_node *node;
+
+ fp = to_flat_binder_object(hdr);
+ node = binder_get_node(proc, fp->binder);
if (node == NULL) {
pr_err("transaction release %d bad node %016llx\n",
debug_id, (u64)fp->binder);
@@ -1346,12 +1537,17 @@
binder_debug(BINDER_DEBUG_TRANSACTION,
" node %d u%016llx\n",
node->debug_id, (u64)node->ptr);
- binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
+ binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
+ 0);
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
- struct binder_ref *ref = binder_get_ref(proc, fp->handle,
- fp->type == BINDER_TYPE_HANDLE);
+ struct flat_binder_object *fp;
+ struct binder_ref *ref;
+
+ fp = to_flat_binder_object(hdr);
+ ref = binder_get_ref(proc, fp->handle,
+ hdr->type == BINDER_TYPE_HANDLE);
if (ref == NULL) {
pr_err("transaction release %d bad handle %d\n",
debug_id, fp->handle);
@@ -1360,32 +1556,348 @@
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d (node %d)\n",
ref->debug_id, ref->desc, ref->node->debug_id);
- binder_dec_ref(&ref, fp->type == BINDER_TYPE_HANDLE);
+ binder_dec_ref(&ref, hdr->type == BINDER_TYPE_HANDLE);
} break;
- case BINDER_TYPE_FD:
- binder_debug(BINDER_DEBUG_TRANSACTION,
- " fd %d\n", fp->handle);
- if (failed_at)
- task_close_fd(proc, fp->handle);
- break;
+ case BINDER_TYPE_FD: {
+ struct binder_fd_object *fp = to_binder_fd_object(hdr);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " fd %d\n", fp->fd);
+ if (failed_at)
+ task_close_fd(proc, fp->fd);
+ } break;
+ case BINDER_TYPE_PTR:
+ /*
+ * Nothing to do here, this will get cleaned up when the
+ * transaction buffer gets freed
+ */
+ break;
+ case BINDER_TYPE_FDA: {
+ struct binder_fd_array_object *fda;
+ struct binder_buffer_object *parent;
+ uintptr_t parent_buffer;
+ u32 *fd_array;
+ size_t fd_index;
+ binder_size_t fd_buf_size;
+
+ fda = to_binder_fd_array_object(hdr);
+ parent = binder_validate_ptr(buffer, fda->parent,
+ off_start,
+ offp - off_start);
+ if (!parent) {
+ pr_err("transaction release %d bad parent offset",
+ debug_id);
+ continue;
+ }
+ /*
+ * Since the parent was already fixed up, convert it
+ * back to kernel address space to access it
+ */
+ parent_buffer = parent->buffer -
+ proc->user_buffer_offset;
+
+ fd_buf_size = sizeof(u32) * fda->num_fds;
+ if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
+ pr_err("transaction release %d invalid number of fds (%lld)\n",
+ debug_id, (u64)fda->num_fds);
+ continue;
+ }
+ if (fd_buf_size > parent->length ||
+ fda->parent_offset > parent->length - fd_buf_size) {
+ /* No space for all file descriptors here. */
+ pr_err("transaction release %d not enough space for %lld fds in buffer\n",
+ debug_id, (u64)fda->num_fds);
+ continue;
+ }
+ fd_array = (u32 *)(parent_buffer + fda->parent_offset);
+ for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
+ task_close_fd(proc, fd_array[fd_index]);
+ } break;
default:
pr_err("transaction release %d bad object type %x\n",
- debug_id, fp->type);
+ debug_id, hdr->type);
break;
}
}
}
+static int binder_translate_binder(struct flat_binder_object *fp,
+ struct binder_transaction *t,
+ struct binder_thread *thread)
+{
+ struct binder_node *node;
+ struct binder_ref *ref;
+ struct binder_proc *proc = thread->proc;
+ struct binder_proc *target_proc = t->to_proc;
+
+ node = binder_get_node(proc, fp->binder);
+ if (!node) {
+ node = binder_new_node(proc, fp->binder, fp->cookie);
+ if (!node)
+ return -ENOMEM;
+
+ node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+ node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+ }
+ if (fp->cookie != node->cookie) {
+ binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
+ proc->pid, thread->pid, (u64)fp->binder,
+ node->debug_id, (u64)fp->cookie,
+ (u64)node->cookie);
+ return -EINVAL;
+ }
+ if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
+ return -EPERM;
+
+ ref = binder_get_ref_for_node(target_proc, node);
+ if (!ref)
+ return -EINVAL;
+
+ if (fp->hdr.type == BINDER_TYPE_BINDER)
+ fp->hdr.type = BINDER_TYPE_HANDLE;
+ else
+ fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
+ fp->binder = 0;
+ fp->handle = ref->desc;
+ fp->cookie = 0;
+ binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo);
+
+ trace_binder_transaction_node_to_ref(t, node, ref);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " node %d u%016llx -> ref %d desc %d\n",
+ node->debug_id, (u64)node->ptr,
+ ref->debug_id, ref->desc);
+
+ return 0;
+}
+
+static int binder_translate_handle(struct flat_binder_object *fp,
+ struct binder_transaction *t,
+ struct binder_thread *thread)
+{
+ struct binder_ref *ref;
+ struct binder_proc *proc = thread->proc;
+ struct binder_proc *target_proc = t->to_proc;
+
+ ref = binder_get_ref(proc, fp->handle,
+ fp->hdr.type == BINDER_TYPE_HANDLE);
+ if (!ref) {
+ binder_user_error("%d:%d got transaction with invalid handle, %d\n",
+ proc->pid, thread->pid, fp->handle);
+ return -EINVAL;
+ }
+ if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
+ return -EPERM;
+
+ if (ref->node->proc == target_proc) {
+ if (fp->hdr.type == BINDER_TYPE_HANDLE)
+ fp->hdr.type = BINDER_TYPE_BINDER;
+ else
+ fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
+ fp->binder = ref->node->ptr;
+ fp->cookie = ref->node->cookie;
+ binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER,
+ 0, NULL);
+ trace_binder_transaction_ref_to_node(t, ref);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " ref %d desc %d -> node %d u%016llx\n",
+ ref->debug_id, ref->desc, ref->node->debug_id,
+ (u64)ref->node->ptr);
+ } else {
+ struct binder_ref *new_ref;
+
+ new_ref = binder_get_ref_for_node(target_proc, ref->node);
+ if (!new_ref)
+ return -EINVAL;
+
+ fp->binder = 0;
+ fp->handle = new_ref->desc;
+ fp->cookie = 0;
+ binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE,
+ NULL);
+ trace_binder_transaction_ref_to_ref(t, ref, new_ref);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " ref %d desc %d -> ref %d desc %d (node %d)\n",
+ ref->debug_id, ref->desc, new_ref->debug_id,
+ new_ref->desc, ref->node->debug_id);
+ }
+ return 0;
+}
+
+static int binder_translate_fd(int fd,
+ struct binder_transaction *t,
+ struct binder_thread *thread,
+ struct binder_transaction *in_reply_to)
+{
+ struct binder_proc *proc = thread->proc;
+ struct binder_proc *target_proc = t->to_proc;
+ int target_fd;
+ struct file *file;
+ int ret;
+ bool target_allows_fd;
+
+ if (in_reply_to)
+ target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
+ else
+ target_allows_fd = t->buffer->target_node->accept_fds;
+ if (!target_allows_fd) {
+ binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
+ proc->pid, thread->pid,
+ in_reply_to ? "reply" : "transaction",
+ fd);
+ ret = -EPERM;
+ goto err_fd_not_accepted;
+ }
+
+ file = fget(fd);
+ if (!file) {
+ binder_user_error("%d:%d got transaction with invalid fd, %d\n",
+ proc->pid, thread->pid, fd);
+ ret = -EBADF;
+ goto err_fget;
+ }
+ ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
+ if (ret < 0) {
+ ret = -EPERM;
+ goto err_security;
+ }
+
+ target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
+ if (target_fd < 0) {
+ ret = -ENOMEM;
+ goto err_get_unused_fd;
+ }
+ task_fd_install(target_proc, target_fd, file);
+ trace_binder_transaction_fd(t, fd, target_fd);
+ binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
+ fd, target_fd);
+
+ return target_fd;
+
+err_get_unused_fd:
+err_security:
+ fput(file);
+err_fget:
+err_fd_not_accepted:
+ return ret;
+}
+
+static int binder_translate_fd_array(struct binder_fd_array_object *fda,
+ struct binder_buffer_object *parent,
+ struct binder_transaction *t,
+ struct binder_thread *thread,
+ struct binder_transaction *in_reply_to)
+{
+ binder_size_t fdi, fd_buf_size, num_installed_fds;
+ int target_fd;
+ uintptr_t parent_buffer;
+ u32 *fd_array;
+ struct binder_proc *proc = thread->proc;
+ struct binder_proc *target_proc = t->to_proc;
+
+ fd_buf_size = sizeof(u32) * fda->num_fds;
+ if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
+ binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
+ proc->pid, thread->pid, (u64)fda->num_fds);
+ return -EINVAL;
+ }
+ if (fd_buf_size > parent->length ||
+ fda->parent_offset > parent->length - fd_buf_size) {
+ /* No space for all file descriptors here. */
+ binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
+ proc->pid, thread->pid, (u64)fda->num_fds);
+ return -EINVAL;
+ }
+ /*
+ * Since the parent was already fixed up, convert it
+ * back to the kernel address space to access it
+ */
+ parent_buffer = parent->buffer - target_proc->user_buffer_offset;
+ fd_array = (u32 *)(parent_buffer + fda->parent_offset);
+ if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
+ binder_user_error("%d:%d parent offset not aligned correctly.\n",
+ proc->pid, thread->pid);
+ return -EINVAL;
+ }
+ for (fdi = 0; fdi < fda->num_fds; fdi++) {
+ target_fd = binder_translate_fd(fd_array[fdi], t, thread,
+ in_reply_to);
+ if (target_fd < 0)
+ goto err_translate_fd_failed;
+ fd_array[fdi] = target_fd;
+ }
+ return 0;
+
+err_translate_fd_failed:
+ /*
+ * Failed to allocate fd or security error, free fds
+ * installed so far.
+ */
+ num_installed_fds = fdi;
+ for (fdi = 0; fdi < num_installed_fds; fdi++)
+ task_close_fd(target_proc, fd_array[fdi]);
+ return target_fd;
+}
+
+static int binder_fixup_parent(struct binder_transaction *t,
+ struct binder_thread *thread,
+ struct binder_buffer_object *bp,
+ binder_size_t *off_start,
+ binder_size_t num_valid,
+ struct binder_buffer_object *last_fixup_obj,
+ binder_size_t last_fixup_min_off)
+{
+ struct binder_buffer_object *parent;
+ u8 *parent_buffer;
+ struct binder_buffer *b = t->buffer;
+ struct binder_proc *proc = thread->proc;
+ struct binder_proc *target_proc = t->to_proc;
+
+ if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
+ return 0;
+
+ parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
+ if (!parent) {
+ binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
+ proc->pid, thread->pid);
+ return -EINVAL;
+ }
+
+ if (!binder_validate_fixup(b, off_start,
+ parent, bp->parent_offset,
+ last_fixup_obj,
+ last_fixup_min_off)) {
+ binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
+ proc->pid, thread->pid);
+ return -EINVAL;
+ }
+
+ if (parent->length < sizeof(binder_uintptr_t) ||
+ bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
+ /* No space for a pointer here! */
+ binder_user_error("%d:%d got transaction with invalid parent offset\n",
+ proc->pid, thread->pid);
+ return -EINVAL;
+ }
+ parent_buffer = (u8 *)(parent->buffer -
+ target_proc->user_buffer_offset);
+ *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
+
+ return 0;
+}
+
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
- struct binder_transaction_data *tr, int reply)
+ struct binder_transaction_data *tr, int reply,
+ binder_size_t extra_buffers_size)
{
+ int ret;
struct binder_transaction *t;
struct binder_work *tcomplete;
- binder_size_t *offp, *off_end;
+ binder_size_t *offp, *off_end, *off_start;
binder_size_t off_min;
+ u8 *sg_bufp, *sg_buf_end;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
@@ -1394,6 +1906,9 @@
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
uint32_t return_error;
+ struct binder_buffer_object *last_fixup_obj = NULL;
+ binder_size_t last_fixup_min_off = 0;
+ struct binder_context *context = proc->context;
e = binder_transaction_log_add(&binder_transaction_log);
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
@@ -1402,6 +1917,7 @@
e->target_handle = tr->target.handle;
e->data_size = tr->data_size;
e->offsets_size = tr->offsets_size;
+ e->context_name = proc->context->name;
if (reply) {
in_reply_to = thread->transaction_stack;
@@ -1453,7 +1969,7 @@
}
target_node = ref->node;
} else {
- target_node = binder_context_mgr_node;
+ target_node = context->binder_context_mgr_node;
if (target_node == NULL) {
return_error = BR_DEAD_REPLY;
goto err_no_context_mgr_node;
@@ -1518,20 +2034,22 @@
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
+ "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_thread->pid,
(u64)tr->data.ptr.buffer,
(u64)tr->data.ptr.offsets,
- (u64)tr->data_size, (u64)tr->offsets_size);
+ (u64)tr->data_size, (u64)tr->offsets_size,
+ (u64)extra_buffers_size);
else
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
+ "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_node->debug_id,
(u64)tr->data.ptr.buffer,
(u64)tr->data.ptr.offsets,
- (u64)tr->data_size, (u64)tr->offsets_size);
+ (u64)tr->data_size, (u64)tr->offsets_size,
+ (u64)extra_buffers_size);
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;
@@ -1547,7 +2065,8 @@
trace_binder_transaction(reply, t, target_node);
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
- tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
+ tr->offsets_size, extra_buffers_size,
+ !reply && (t->flags & TF_ONE_WAY));
if (t->buffer == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_alloc_buf_failed;
@@ -1560,8 +2079,9 @@
if (target_node)
binder_inc_node(target_node, 1, 0, NULL);
- offp = (binder_size_t *)(t->buffer->data +
- ALIGN(tr->data_size, sizeof(void *)));
+ off_start = (binder_size_t *)(t->buffer->data +
+ ALIGN(tr->data_size, sizeof(void *)));
+ offp = off_start;
if (copy_from_user_preempt_disabled(t->buffer->data, (const void __user *)(uintptr_t)
tr->data.ptr.buffer, tr->data_size)) {
@@ -1583,166 +2103,139 @@
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
}
- off_end = (void *)offp + tr->offsets_size;
+ if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
+ binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
+ proc->pid, thread->pid,
+ extra_buffers_size);
+ return_error = BR_FAILED_REPLY;
+ goto err_bad_offset;
+ }
+ off_end = (void *)off_start + tr->offsets_size;
+ sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
+ sg_buf_end = sg_bufp + extra_buffers_size;
off_min = 0;
for (; offp < off_end; offp++) {
- struct flat_binder_object *fp;
- if (*offp > t->buffer->data_size - sizeof(*fp) ||
- *offp < off_min ||
- t->buffer->data_size < sizeof(*fp) ||
- !IS_ALIGNED(*offp, sizeof(u32))) {
- binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n",
+ struct binder_object_header *hdr;
+ size_t object_size = binder_validate_object(t->buffer, *offp);
+
+ if (object_size == 0 || *offp < off_min) {
+ binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
proc->pid, thread->pid, (u64)*offp,
(u64)off_min,
- (u64)(t->buffer->data_size -
- sizeof(*fp)));
+ (u64)t->buffer->data_size);
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
}
- fp = (struct flat_binder_object *)(t->buffer->data + *offp);
- off_min = *offp + sizeof(struct flat_binder_object);
- switch (fp->type) {
+
+ hdr = (struct binder_object_header *)(t->buffer->data + *offp);
+ off_min = *offp + object_size;
+ switch (hdr->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
- struct binder_ref *ref;
- struct binder_node *node = binder_get_node(proc, fp->binder);
- if (node == NULL) {
- node = binder_new_node(proc, fp->binder, fp->cookie);
- if (node == NULL) {
- return_error = BR_FAILED_REPLY;
- goto err_binder_new_node_failed;
- }
- node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
- node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
- }
- if (fp->cookie != node->cookie) {
- binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
- proc->pid, thread->pid,
- (u64)fp->binder, node->debug_id,
- (u64)fp->cookie, (u64)node->cookie);
- goto err_binder_get_ref_for_node_failed;
- }
- if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
- return_error = BR_FAILED_REPLY;
- goto err_binder_get_ref_for_node_failed;
- }
- ref = binder_get_ref_for_node(target_proc, node);
- if (ref == NULL) {
- return_error = BR_FAILED_REPLY;
- goto err_binder_get_ref_for_node_failed;
- }
- if (fp->type == BINDER_TYPE_BINDER)
- fp->type = BINDER_TYPE_HANDLE;
- else
- fp->type = BINDER_TYPE_WEAK_HANDLE;
- fp->binder = 0;
- fp->handle = ref->desc;
- fp->cookie = 0;
- binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
- &thread->todo);
+ struct flat_binder_object *fp;
- trace_binder_transaction_node_to_ref(t, node, ref);
- binder_debug(BINDER_DEBUG_TRANSACTION,
- " node %d u%016llx -> ref %d desc %d\n",
- node->debug_id, (u64)node->ptr,
- ref->debug_id, ref->desc);
+ fp = to_flat_binder_object(hdr);
+ ret = binder_translate_binder(fp, t, thread);
+ if (ret < 0) {
+ return_error = BR_FAILED_REPLY;
+ goto err_translate_failed;
+ }
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
- struct binder_ref *ref = binder_get_ref(proc, fp->handle,
- fp->type == BINDER_TYPE_HANDLE);
- if (ref == NULL) {
- binder_user_error("%d:%d got transaction with invalid handle, %d\n",
- proc->pid,
- thread->pid, fp->handle);
+ struct flat_binder_object *fp;
+
+ fp = to_flat_binder_object(hdr);
+ ret = binder_translate_handle(fp, t, thread);
+ if (ret < 0) {
return_error = BR_FAILED_REPLY;
- goto err_binder_get_ref_failed;
- }
- if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
- return_error = BR_FAILED_REPLY;
- goto err_binder_get_ref_failed;
- }
- if (ref->node->proc == target_proc) {
- if (fp->type == BINDER_TYPE_HANDLE)
- fp->type = BINDER_TYPE_BINDER;
- else
- fp->type = BINDER_TYPE_WEAK_BINDER;
- fp->binder = ref->node->ptr;
- fp->cookie = ref->node->cookie;
- binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
- trace_binder_transaction_ref_to_node(t, ref);
- binder_debug(BINDER_DEBUG_TRANSACTION,
- " ref %d desc %d -> node %d u%016llx\n",
- ref->debug_id, ref->desc, ref->node->debug_id,
- (u64)ref->node->ptr);
- } else {
- struct binder_ref *new_ref;
- new_ref = binder_get_ref_for_node(target_proc, ref->node);
- if (new_ref == NULL) {
- return_error = BR_FAILED_REPLY;
- goto err_binder_get_ref_for_node_failed;
- }
- fp->binder = 0;
- fp->handle = new_ref->desc;
- fp->cookie = 0;
- binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
- trace_binder_transaction_ref_to_ref(t, ref,
- new_ref);
- binder_debug(BINDER_DEBUG_TRANSACTION,
- " ref %d desc %d -> ref %d desc %d (node %d)\n",
- ref->debug_id, ref->desc, new_ref->debug_id,
- new_ref->desc, ref->node->debug_id);
+ goto err_translate_failed;
}
} break;
case BINDER_TYPE_FD: {
- int target_fd;
- struct file *file;
+ struct binder_fd_object *fp = to_binder_fd_object(hdr);
+ int target_fd = binder_translate_fd(fp->fd, t, thread,
+ in_reply_to);
- if (reply) {
- if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
- binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
- proc->pid, thread->pid, fp->handle);
- return_error = BR_FAILED_REPLY;
- goto err_fd_not_allowed;
- }
- } else if (!target_node->accept_fds) {
- binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
- proc->pid, thread->pid, fp->handle);
- return_error = BR_FAILED_REPLY;
- goto err_fd_not_allowed;
- }
-
- file = fget(fp->handle);
- if (file == NULL) {
- binder_user_error("%d:%d got transaction with invalid fd, %d\n",
- proc->pid, thread->pid, fp->handle);
- return_error = BR_FAILED_REPLY;
- goto err_fget_failed;
- }
- if (security_binder_transfer_file(proc->tsk, target_proc->tsk, file) < 0) {
- fput(file);
- return_error = BR_FAILED_REPLY;
- goto err_get_unused_fd_failed;
- }
- target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
if (target_fd < 0) {
- fput(file);
return_error = BR_FAILED_REPLY;
- goto err_get_unused_fd_failed;
+ goto err_translate_failed;
}
- task_fd_install(target_proc, target_fd, file);
- trace_binder_transaction_fd(t, fp->handle, target_fd);
- binder_debug(BINDER_DEBUG_TRANSACTION,
- " fd %d -> %d\n", fp->handle, target_fd);
- /* TODO: fput? */
- fp->binder = 0;
- fp->handle = target_fd;
+ fp->pad_binder = 0;
+ fp->fd = target_fd;
} break;
+ case BINDER_TYPE_FDA: {
+ struct binder_fd_array_object *fda =
+ to_binder_fd_array_object(hdr);
+ struct binder_buffer_object *parent =
+ binder_validate_ptr(t->buffer, fda->parent,
+ off_start,
+ offp - off_start);
+ if (!parent) {
+ binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ goto err_bad_parent;
+ }
+ if (!binder_validate_fixup(t->buffer, off_start,
+ parent, fda->parent_offset,
+ last_fixup_obj,
+ last_fixup_min_off)) {
+ binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ goto err_bad_parent;
+ }
+ ret = binder_translate_fd_array(fda, parent, t, thread,
+ in_reply_to);
+ if (ret < 0) {
+ return_error = BR_FAILED_REPLY;
+ goto err_translate_failed;
+ }
+ last_fixup_obj = parent;
+ last_fixup_min_off =
+ fda->parent_offset + sizeof(u32) * fda->num_fds;
+ } break;
+ case BINDER_TYPE_PTR: {
+ struct binder_buffer_object *bp =
+ to_binder_buffer_object(hdr);
+ size_t buf_left = sg_buf_end - sg_bufp;
+ if (bp->length > buf_left) {
+ binder_user_error("%d:%d got transaction with too large buffer\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ goto err_bad_offset;
+ }
+ if (copy_from_user_preempt_disabled(
+ sg_bufp,
+ (const void __user *)(uintptr_t)
+ bp->buffer, bp->length)) {
+ binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ goto err_copy_data_failed;
+ }
+ /* Fixup buffer pointer to target proc address space */
+ bp->buffer = (uintptr_t)sg_bufp +
+ target_proc->user_buffer_offset;
+ sg_bufp += ALIGN(bp->length, sizeof(u64));
+
+ ret = binder_fixup_parent(t, thread, bp, off_start,
+ offp - off_start,
+ last_fixup_obj,
+ last_fixup_min_off);
+ if (ret < 0) {
+ return_error = BR_FAILED_REPLY;
+ goto err_translate_failed;
+ }
+ last_fixup_obj = bp;
+ last_fixup_min_off = 0;
+ } break;
default:
binder_user_error("%d:%d got transaction with invalid object type, %x\n",
- proc->pid, thread->pid, fp->type);
+ proc->pid, thread->pid, hdr->type);
return_error = BR_FAILED_REPLY;
goto err_bad_object_type;
}
@@ -1778,14 +2271,10 @@
}
return;
-err_get_unused_fd_failed:
-err_fget_failed:
-err_fd_not_allowed:
-err_binder_get_ref_for_node_failed:
-err_binder_get_ref_failed:
-err_binder_new_node_failed:
+err_translate_failed:
err_bad_object_type:
err_bad_offset:
+err_bad_parent:
err_copy_data_failed:
trace_binder_transaction_failed_buffer_release(t->buffer);
binder_transaction_buffer_release(target_proc, t->buffer, offp);
@@ -1828,6 +2317,7 @@
binder_size_t *consumed)
{
uint32_t cmd;
+ struct binder_context *context = proc->context;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
@@ -1854,10 +2344,10 @@
if (get_user_preempt_disabled(target, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
- if (target == 0 && binder_context_mgr_node &&
+ if (target == 0 && context->binder_context_mgr_node &&
(cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
ref = binder_get_ref_for_node(proc,
- binder_context_mgr_node);
+ context->binder_context_mgr_node);
if (ref->desc != target) {
binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
proc->pid, thread->pid,
@@ -2008,6 +2498,18 @@
break;
}
+ case BC_TRANSACTION_SG:
+ case BC_REPLY_SG: {
+ struct binder_transaction_data_sg tr;
+
+ if (copy_from_user_preempt_disabled(&tr, ptr,
+ sizeof(tr)))
+ return -EFAULT;
+ ptr += sizeof(tr);
+ binder_transaction(proc, thread, &tr.transaction_data,
+ cmd == BC_REPLY_SG, tr.buffers_size);
+ break;
+ }
case BC_TRANSACTION:
case BC_REPLY: {
struct binder_transaction_data tr;
@@ -2015,7 +2517,8 @@
if (copy_from_user_preempt_disabled(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
- binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
+ binder_transaction(proc, thread, &tr,
+ cmd == BC_REPLY, 0);
break;
}
@@ -2689,6 +3192,7 @@
{
int ret;
struct binder_proc *proc = filp->private_data;
+ struct binder_context *context = proc->context;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
@@ -2764,7 +3268,7 @@
}
break;
case BINDER_SET_CONTEXT_MGR:
- if (binder_context_mgr_node != NULL) {
+ if (context->binder_context_mgr_node) {
pr_err("BINDER_SET_CONTEXT_MGR already set\n");
ret = -EBUSY;
goto err;
@@ -2772,25 +3276,27 @@
ret = security_binder_set_context_mgr(proc->tsk);
if (ret < 0)
goto err;
- if (uid_valid(binder_context_mgr_uid)) {
- if (!uid_eq(binder_context_mgr_uid, current->cred->euid)) {
+ if (uid_valid(context->binder_context_mgr_uid)) {
+ if (!uid_eq(context->binder_context_mgr_uid,
+ current->cred->euid)) {
pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
from_kuid(&init_user_ns, current->cred->euid),
- from_kuid(&init_user_ns, binder_context_mgr_uid));
+ from_kuid(&init_user_ns,
+ context->binder_context_mgr_uid));
ret = -EPERM;
goto err;
}
} else
- binder_context_mgr_uid = current->cred->euid;
- binder_context_mgr_node = binder_new_node(proc, 0, 0);
- if (binder_context_mgr_node == NULL) {
+ context->binder_context_mgr_uid = current->cred->euid;
+ context->binder_context_mgr_node = binder_new_node(proc, 0, 0);
+ if (!context->binder_context_mgr_node) {
ret = -ENOMEM;
goto err;
}
- binder_context_mgr_node->local_weak_refs++;
- binder_context_mgr_node->local_strong_refs++;
- binder_context_mgr_node->has_strong_ref = 1;
- binder_context_mgr_node->has_weak_ref = 1;
+ context->binder_context_mgr_node->local_weak_refs++;
+ context->binder_context_mgr_node->local_strong_refs++;
+ context->binder_context_mgr_node->has_strong_ref = 1;
+ context->binder_context_mgr_node->has_weak_ref = 1;
break;
case BINDER_THREAD_EXIT:
binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
@@ -2868,7 +3374,7 @@
const char *failure_string;
struct binder_buffer *buffer;
- if (proc->tsk != current)
+ if (proc->tsk != current->group_leader)
return -EINVAL;
if ((vma->vm_end - vma->vm_start) > SZ_4M)
@@ -2966,6 +3472,7 @@
static int binder_open(struct inode *nodp, struct file *filp)
{
struct binder_proc *proc;
+ struct binder_device *binder_dev;
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
current->group_leader->pid, current->pid);
@@ -2973,11 +3480,14 @@
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
if (proc == NULL)
return -ENOMEM;
- get_task_struct(current);
- proc->tsk = current;
+ get_task_struct(current->group_leader);
+ proc->tsk = current->group_leader;
INIT_LIST_HEAD(&proc->todo);
init_waitqueue_head(&proc->wait);
proc->default_priority = task_nice(current);
+ binder_dev = container_of(filp->private_data, struct binder_device,
+ miscdev);
+ proc->context = &binder_dev->context;
binder_lock(__func__);
@@ -2992,8 +3502,17 @@
if (binder_debugfs_dir_entry_proc) {
char strbuf[11];
snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
+ /*
+ * proc debug entries are shared between contexts, so
+ * this will fail if the process tries to open the driver
+ * again with a different context. The priting code will
+ * anyway print all contexts that a given PID has, so this
+ * is not a problem.
+ */
proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
- binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
+ binder_debugfs_dir_entry_proc,
+ (void *)(unsigned long)proc->pid,
+ &binder_proc_fops);
}
return 0;
@@ -3083,6 +3602,7 @@
static void binder_deferred_release(struct binder_proc *proc)
{
struct binder_transaction *t;
+ struct binder_context *context = proc->context;
struct rb_node *n;
int threads, nodes, incoming_refs, outgoing_refs, buffers,
active_transactions, page_count;
@@ -3092,11 +3612,12 @@
hlist_del(&proc->proc_node);
- if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
+ if (context->binder_context_mgr_node &&
+ context->binder_context_mgr_node->proc == proc) {
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"%s: %d context_mgr_node gone\n",
__func__, proc->pid);
- binder_context_mgr_node = NULL;
+ context->binder_context_mgr_node = NULL;
}
threads = 0;
@@ -3388,6 +3909,7 @@
size_t header_pos;
seq_printf(m, "proc %d\n", proc->pid);
+ seq_printf(m, "context %s\n", proc->context->name);
header_pos = m->count;
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
@@ -3457,7 +3979,9 @@
"BC_EXIT_LOOPER",
"BC_REQUEST_DEATH_NOTIFICATION",
"BC_CLEAR_DEATH_NOTIFICATION",
- "BC_DEAD_BINDER_DONE"
+ "BC_DEAD_BINDER_DONE",
+ "BC_TRANSACTION_SG",
+ "BC_REPLY_SG",
};
static const char * const binder_objstat_strings[] = {
@@ -3512,6 +4036,7 @@
int count, strong, weak;
seq_printf(m, "proc %d\n", proc->pid);
+ seq_printf(m, "context %s\n", proc->context->name);
count = 0;
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
count++;
@@ -3619,23 +4144,18 @@
static int binder_proc_show(struct seq_file *m, void *unused)
{
struct binder_proc *itr;
- struct binder_proc *proc = m->private;
+ int pid = (unsigned long)m->private;
int do_lock = !binder_debug_no_lock;
- bool valid_proc = false;
if (do_lock)
binder_lock(__func__);
hlist_for_each_entry(itr, &binder_procs, proc_node) {
- if (itr == proc) {
- valid_proc = true;
- break;
+ if (itr->pid == pid) {
+ seq_puts(m, "binder proc state:\n");
+ print_binder_proc(m, itr, 1);
}
}
- if (valid_proc) {
- seq_puts(m, "binder proc state:\n");
- print_binder_proc(m, proc, 1);
- }
if (do_lock)
binder_unlock(__func__);
return 0;
@@ -3645,11 +4165,11 @@
struct binder_transaction_log_entry *e)
{
seq_printf(m,
- "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
+ "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d\n",
e->debug_id, (e->call_type == 2) ? "reply" :
((e->call_type == 1) ? "async" : "call "), e->from_proc,
- e->from_thread, e->to_proc, e->to_thread, e->to_node,
- e->target_handle, e->data_size, e->offsets_size);
+ e->from_thread, e->to_proc, e->to_thread, e->context_name,
+ e->to_node, e->target_handle, e->data_size, e->offsets_size);
}
static int binder_transaction_log_show(struct seq_file *m, void *unused)
@@ -3677,20 +4197,44 @@
.release = binder_release,
};
-static struct miscdevice binder_miscdev = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "binder",
- .fops = &binder_fops
-};
-
BINDER_DEBUG_ENTRY(state);
BINDER_DEBUG_ENTRY(stats);
BINDER_DEBUG_ENTRY(transactions);
BINDER_DEBUG_ENTRY(transaction_log);
+static int __init init_binder_device(const char *name)
+{
+ int ret;
+ struct binder_device *binder_device;
+
+ binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
+ if (!binder_device)
+ return -ENOMEM;
+
+ binder_device->miscdev.fops = &binder_fops;
+ binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
+ binder_device->miscdev.name = name;
+
+ binder_device->context.binder_context_mgr_uid = INVALID_UID;
+ binder_device->context.name = name;
+
+ ret = misc_register(&binder_device->miscdev);
+ if (ret < 0) {
+ kfree(binder_device);
+ return ret;
+ }
+
+ hlist_add_head(&binder_device->hlist, &binder_devices);
+
+ return ret;
+}
+
static int __init binder_init(void)
{
int ret;
+ char *device_name, *device_names;
+ struct binder_device *device;
+ struct hlist_node *tmp;
binder_deferred_workqueue = create_singlethread_workqueue("binder");
if (!binder_deferred_workqueue)
@@ -3700,7 +4244,7 @@
if (binder_debugfs_dir_entry_root)
binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
binder_debugfs_dir_entry_root);
- ret = misc_register(&binder_miscdev);
+
if (binder_debugfs_dir_entry_root) {
debugfs_create_file("state",
S_IRUGO,
@@ -3728,6 +4272,37 @@
&binder_transaction_log_failed,
&binder_transaction_log_fops);
}
+
+ /*
+ * Copy the module_parameter string, because we don't want to
+ * tokenize it in-place.
+ */
+ device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
+ if (!device_names) {
+ ret = -ENOMEM;
+ goto err_alloc_device_names_failed;
+ }
+ strcpy(device_names, binder_devices_param);
+
+ while ((device_name = strsep(&device_names, ","))) {
+ ret = init_binder_device(device_name);
+ if (ret)
+ goto err_init_binder_device_failed;
+ }
+
+ return ret;
+
+err_init_binder_device_failed:
+ hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
+ misc_deregister(&device->miscdev);
+ hlist_del(&device->hlist);
+ kfree(device);
+ }
+err_alloc_device_names_failed:
+ debugfs_remove_recursive(binder_debugfs_dir_entry_root);
+
+ destroy_workqueue(binder_deferred_workqueue);
+
return ret;
}
diff --git a/drivers/staging/android/uapi/binder.h b/drivers/staging/android/uapi/binder.h
index 04c735d..52cd3f4 100644
--- a/drivers/staging/android/uapi/binder.h
+++ b/drivers/staging/android/uapi/binder.h
@@ -32,6 +32,8 @@
BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
+ BINDER_TYPE_FDA = B_PACK_CHARS('f', 'd', 'a', B_TYPE_LARGE),
+ BINDER_TYPE_PTR = B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE),
};
enum {
@@ -47,6 +49,14 @@
typedef __u64 binder_uintptr_t;
#endif
+/**
+ * struct binder_object_header - header shared by all binder metadata objects.
+ * @type: type of the object
+ */
+struct binder_object_header {
+ __u32 type;
+};
+
/*
* This is the flattened representation of a Binder object for transfer
* between processes. The 'offsets' supplied as part of a binder transaction
@@ -55,9 +65,8 @@
* between processes.
*/
struct flat_binder_object {
- /* 8 bytes for large_flat_header. */
- __u32 type;
- __u32 flags;
+ struct binder_object_header hdr;
+ __u32 flags;
/* 8 bytes of data. */
union {
@@ -69,6 +78,86 @@
binder_uintptr_t cookie;
};
+/**
+ * struct binder_fd_object - describes a filedescriptor to be fixed up.
+ * @hdr: common header structure
+ * @pad_flags: padding to remain compatible with old userspace code
+ * @pad_binder: padding to remain compatible with old userspace code
+ * @fd: file descriptor
+ * @cookie: opaque data, used by user-space
+ */
+struct binder_fd_object {
+ struct binder_object_header hdr;
+ __u32 pad_flags;
+ union {
+ binder_uintptr_t pad_binder;
+ __u32 fd;
+ };
+
+ binder_uintptr_t cookie;
+};
+
+/* struct binder_buffer_object - object describing a userspace buffer
+ * @hdr: common header structure
+ * @flags: one or more BINDER_BUFFER_* flags
+ * @buffer: address of the buffer
+ * @length: length of the buffer
+ * @parent: index in offset array pointing to parent buffer
+ * @parent_offset: offset in @parent pointing to this buffer
+ *
+ * A binder_buffer object represents an object that the
+ * binder kernel driver can copy verbatim to the target
+ * address space. A buffer itself may be pointed to from
+ * within another buffer, meaning that the pointer inside
+ * that other buffer needs to be fixed up as well. This
+ * can be done by setting the BINDER_BUFFER_FLAG_HAS_PARENT
+ * flag in @flags, by setting @parent buffer to the index
+ * in the offset array pointing to the parent binder_buffer_object,
+ * and by setting @parent_offset to the offset in the parent buffer
+ * at which the pointer to this buffer is located.
+ */
+struct binder_buffer_object {
+ struct binder_object_header hdr;
+ __u32 flags;
+ binder_uintptr_t buffer;
+ binder_size_t length;
+ binder_size_t parent;
+ binder_size_t parent_offset;
+};
+
+enum {
+ BINDER_BUFFER_FLAG_HAS_PARENT = 0x01,
+};
+
+/* struct binder_fd_array_object - object describing an array of fds in a buffer
+ * @hdr: common header structure
+ * @pad: padding to ensure correct alignment
+ * @num_fds: number of file descriptors in the buffer
+ * @parent: index in offset array to buffer holding the fd array
+ * @parent_offset: start offset of fd array in the buffer
+ *
+ * A binder_fd_array object represents an array of file
+ * descriptors embedded in a binder_buffer_object. It is
+ * different from a regular binder_buffer_object because it
+ * describes a list of file descriptors to fix up, not an opaque
+ * blob of memory, and hence the kernel needs to treat it differently.
+ *
+ * An example of how this would be used is with Android's
+ * native_handle_t object, which is a struct with a list of integers
+ * and a list of file descriptors. The native_handle_t struct itself
+ * will be represented by a struct binder_buffer_objct, whereas the
+ * embedded list of file descriptors is represented by a
+ * struct binder_fd_array_object with that binder_buffer_object as
+ * a parent.
+ */
+struct binder_fd_array_object {
+ struct binder_object_header hdr;
+ __u32 pad;
+ binder_size_t num_fds;
+ binder_size_t parent;
+ binder_size_t parent_offset;
+};
+
/*
* On 64-bit platforms where user code may run in 32-bits the driver must
* translate the buffer (and local binder) addresses appropriately.
@@ -159,6 +248,11 @@
} data;
};
+struct binder_transaction_data_sg {
+ struct binder_transaction_data transaction_data;
+ binder_size_t buffers_size;
+};
+
struct binder_ptr_cookie {
binder_uintptr_t ptr;
binder_uintptr_t cookie;
@@ -341,6 +435,12 @@
/*
* void *: cookie
*/
+
+ BC_TRANSACTION_SG = _IOW('c', 17, struct binder_transaction_data_sg),
+ BC_REPLY_SG = _IOW('c', 18, struct binder_transaction_data_sg),
+ /*
+ * binder_transaction_data_sg: the sent command.
+ */
};
#endif /* _UAPI_LINUX_BINDER_H */
diff --git a/drivers/staging/nanohub/spi.c b/drivers/staging/nanohub/spi.c
index 30c0bf8..c25e02b 100644
--- a/drivers/staging/nanohub/spi.c
+++ b/drivers/staging/nanohub/spi.c
@@ -321,7 +321,11 @@
if (ret == 0) {
if (offset > 0) {
packet = (struct nanohub_packet *)rx;
- memcpy(&rx[offset], comms->rx_buffer, xfer.len);
+ if (offset + xfer.len > max_length)
+ memcpy(&rx[offset], comms->rx_buffer,
+ max_length - offset);
+ else
+ memcpy(&rx[offset], comms->rx_buffer, xfer.len);
spi_data->rx_offset = spi_data->rx_length = 0;
} else {
for (i = 0; i < xfer.len; i++) {
diff --git a/drivers/staging/qcacld-2.0/CORE/CLD_TXRX/HTT/htt.h b/drivers/staging/qcacld-2.0/CORE/CLD_TXRX/HTT/htt.h
index aa96850..c608cf5 100644
--- a/drivers/staging/qcacld-2.0/CORE/CLD_TXRX/HTT/htt.h
+++ b/drivers/staging/qcacld-2.0/CORE/CLD_TXRX/HTT/htt.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -135,9 +135,13 @@
* 3.27 Add a new interface for flow-control. The following t2h messages have
* been included: HTT_T2H_MSG_TYPE_FLOW_POOL_MAP and
* HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP
+ * 3.28 Add a new interface for ring interface change. The following two h2t
+ * and one t2h messages have been included:
+ * HTT_H2T_MSG_TYPE_SRING_SETUP, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
+ * and HTT_T2H_MSG_TYPE_SRING_SETUP_DONE
*/
#define HTT_CURRENT_VERSION_MAJOR 3
-#define HTT_CURRENT_VERSION_MINOR 27
+#define HTT_CURRENT_VERSION_MINOR 28
#define HTT_NUM_TX_FRAG_DESC 1024
@@ -458,17 +462,19 @@
/*=== host -> target messages ===============================================*/
enum htt_h2t_msg_type {
- HTT_H2T_MSG_TYPE_VERSION_REQ = 0x0,
- HTT_H2T_MSG_TYPE_TX_FRM = 0x1,
- HTT_H2T_MSG_TYPE_RX_RING_CFG = 0x2,
- HTT_H2T_MSG_TYPE_STATS_REQ = 0x3,
- HTT_H2T_MSG_TYPE_SYNC = 0x4,
- HTT_H2T_MSG_TYPE_AGGR_CFG = 0x5,
- HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 0x6,
- DEPRECATED_HTT_H2T_MSG_TYPE_MGMT_TX = 0x7, /* no longer used */
- HTT_H2T_MSG_TYPE_WDI_IPA_CFG = 0x8,
- HTT_H2T_MSG_TYPE_WDI_IPA_OP_REQ = 0x9,
- HTT_H2T_MSG_TYPE_AGGR_CFG_EX = 0xa, /* per vdev amsdu subfrm limit */
+ HTT_H2T_MSG_TYPE_VERSION_REQ = 0x0,
+ HTT_H2T_MSG_TYPE_TX_FRM = 0x1,
+ HTT_H2T_MSG_TYPE_RX_RING_CFG = 0x2,
+ HTT_H2T_MSG_TYPE_STATS_REQ = 0x3,
+ HTT_H2T_MSG_TYPE_SYNC = 0x4,
+ HTT_H2T_MSG_TYPE_AGGR_CFG = 0x5,
+ HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 0x6,
+ DEPRECATED_HTT_H2T_MSG_TYPE_MGMT_TX = 0x7, /* no longer used */
+ HTT_H2T_MSG_TYPE_WDI_IPA_CFG = 0x8,
+ HTT_H2T_MSG_TYPE_WDI_IPA_OP_REQ = 0x9,
+ HTT_H2T_MSG_TYPE_AGGR_CFG_EX = 0xa, /* per vdev amsdu subfrm limit */
+ HTT_H2T_MSG_TYPE_SRING_SETUP = 0xb,
+ HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG = 0xc,
/* keep this last */
HTT_H2T_NUM_MSGS
};
@@ -2908,6 +2914,1125 @@
((_var) |= ((_val) << HTT_WDI_IPA_OP_REQUEST_OP_CODE_S)); \
} while (0)
+/*
+ * @brief host -> target HTT_SRING_SETUP message
+ *
+ * @details
+ * After target is booted up, Host can send SRING setup message for
+ * each host facing LMAC SRING. Target setups up HW registers based
+ * on setup message and confirms back to Host if response_required is set.
+ * Host should wait for confirmation message before sending new SRING
+ * setup message
+ *
+ * The message would appear as follows:
+ * |31 24|23 20|19|18 16|15|14 8|7 0|
+ * |--------------- +-----------------+----------------+------------------|
+ * | ring_type | ring_id | pdev_id | msg_type |
+ * |----------------------------------------------------------------------|
+ * | ring_base_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_base_addr_hi |
+ * |----------------------------------------------------------------------|
+ * |ring_misc_cfg_flag|ring_entry_size| ring_size |
+ * |----------------------------------------------------------------------|
+ * | ring_head_offset32_remote_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_head_offset32_remote_addr_hi |
+ * |----------------------------------------------------------------------|
+ * | ring_tail_offset32_remote_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_tail_offset32_remote_addr_hi |
+ * |----------------------------------------------------------------------|
+ * | ring_msi_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_msi_addr_hi |
+ * |----------------------------------------------------------------------|
+ * | ring_msi_data |
+ * |----------------------------------------------------------------------|
+ * | intr_timer_th |IM| intr_batch_counter_th |
+ * |----------------------------------------------------------------------|
+ * | reserved |RR|PTCF| intr_low_threshold |
+ * |----------------------------------------------------------------------|
+ * Where
+ * IM = sw_intr_mode
+ * RR = response_required
+ * PTCF = prefetch_timer_cfg
+ *
+ * The message is interpreted as follows:
+ * dword0 - b'0:7 - msg_type: This will be set to
+ * HTT_H2T_MSG_TYPE_SRING_SETUP
+ * b'8:15 - pdev_id:
+ * 0 (for rings at SOC/UMAC level),
+ * 1/2/3 mac id (for rings at LMAC level)
+ * b'16:23 - ring_id: identify which ring is to setup,
+ * more details can be got from enum htt_srng_ring_id
+ * b'24:31 - ring_type: identify type of host rings,
+ * more details can be got from enum htt_srng_ring_type
+ * dword1 - b'0:31 - ring_base_addr_lo: Lower 32bits of ring base address
+ * dword2 - b'0:31 - ring_base_addr_hi: Upper 32bits of ring base address
+ * dword3 - b'0:15 - ring_size: size of the ring in unit of 4-bytes words
+ * b'16:23 - ring_entry_size: Size of each entry in 4-byte word units
+ * b'24:31 - ring_misc_cfg_flag: Valid only for HW_TO_SW_RING and
+ * SW_TO_HW_RING.
+ * Refer to HTT_SRING_SETUP_RING_MISC_CFG_RING defs.
+ * dword4 - b'0:31 - ring_head_offset32_remote_addr_lo:
+ * Lower 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the head
+ * element within the ring.
+ * (The head offset variable has type A_UINT32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword5 - b'0:31 - ring_head_offset32_remote_addr_hi:
+ * Upper 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the head
+ * element within the ring.
+ * (The head offset variable has type A_UINT32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword6 - b'0:31 - ring_tail_offset32_remote_addr_lo:
+ * Lower 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the tail
+ * element within the ring.
+ * (The tail offset variable has type A_UINT32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword7 - b'0:31 - ring_tail_offset32_remote_addr_hi:
+ * Upper 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the tail
+ * element within the ring.
+ * (The tail offset variable has type A_UINT32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword8 - b'0:31 - ring_msi_addr_lo: Lower 32bits of MSI cfg address
+ * valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword9 - b'0:31 - ring_msi_addr_hi: Upper 32bits of MSI cfg address
+ * valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword10 - b'0:31 - ring_msi_data: MSI data
+ * Refer to HTT_SRING_SETUP_RING_MSC_CFG_xxx defs
+ * valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword11 - b'0:14 - intr_batch_counter_th:
+ * batch counter threshold is in units of 4-byte words.
+ * HW internally maintains and increments batch count.
+ * (see SRING spec for detail description).
+ * When batch count reaches threshold value, an interrupt
+ * is generated by HW.
+ * b'15 - sw_intr_mode:
+ * This configuration shall be static.
+ * Only programmed at power up.
+ * 0: generate pulse style sw interrupts
+ * 1: generate level style sw interrupts
+ * b'16:31 - intr_timer_th:
+ * The timer init value when timer is idle or is
+ * initialized to start downcounting.
+ * In 8us units (to cover a range of 0 to 524 ms)
+ * dword12 - b'0:15 - intr_low_threshold:
+ * Used only by Consumer ring to generate ring_sw_int_p.
+ * Ring entries low threshold water mark, that is used
+ * in combination with the interrupt timer as well as
+ * the the clearing of the level interrupt.
+ * b'16:18 - prefetch_timer_cfg:
+ * Used only by Consumer ring to set timer mode to
+ * support Application prefetch handling.
+ * The external tail offset/pointer will be updated
+ * at following intervals:
+ * 3'b000: (Prefetch feature disabled; used only for debug)
+ * 3'b001: 1 usec
+ * 3'b010: 4 usec
+ * 3'b011: 8 usec (default)
+ * 3'b100: 16 usec
+ * Others: Reserverd
+ * b'19 - response_required:
+ * Host needs HTT_T2H_MSG_TYPE_SRING_SETUP_DONE as response
+ * b'20:31 - reserved: reserved for future use
+ */
+PREPACK struct htt_sring_setup_t {
+ A_UINT32 msg_type: 8,
+ pdev_id: 8,
+ ring_id: 8,
+ ring_type: 8;
+ A_UINT32 ring_base_addr_lo;
+ A_UINT32 ring_base_addr_hi;
+ A_UINT32 ring_size: 16,
+ ring_entry_size: 8,
+ ring_misc_cfg_flag: 8;
+ A_UINT32 ring_head_offset32_remote_addr_lo;
+ A_UINT32 ring_head_offset32_remote_addr_hi;
+ A_UINT32 ring_tail_offset32_remote_addr_lo;
+ A_UINT32 ring_tail_offset32_remote_addr_hi;
+ A_UINT32 ring_msi_addr_lo;
+ A_UINT32 ring_msi_addr_hi;
+ A_UINT32 ring_msi_data;
+ A_UINT32 intr_batch_counter_th: 15,
+ sw_intr_mode: 1,
+ intr_timer_th: 16;
+ A_UINT32 intr_low_threshold: 16,
+ prefetch_timer_cfg: 3,
+ response_required: 1,
+ reserved1: 12;
+} POSTPACK;
+
+enum htt_srng_ring_type {
+ HTT_HW_TO_SW_RING = 0,
+ HTT_SW_TO_HW_RING,
+ HTT_SW_TO_SW_RING,
+ /* Insert new ring types above this line */
+};
+
+enum htt_srng_ring_id {
+ HTT_RXDMA_HOST_BUF_RING = 0, /* Used by FW to feed remote buffers and update remote packets */
+ HTT_RXDMA_MONITOR_STATUS_RING, /* For getting all PPDU/MPDU/MSDU status deescriptors on host for monitor VAP or packet log purposes */
+ HTT_RXDMA_MONITOR_BUF_RING, /* For feeding free host buffers to RxDMA for monitor traffic upload */
+ HTT_RXDMA_MONITOR_DESC_RING, /* For providing free LINK_DESC to RXDMA for monitor traffic upload */
+ HTT_RXDMA_MONITOR_DEST_RING, /* Per MPDU indication to host for monitor traffic upload */
+ HTT_HOST1_TO_FW_RXBUF_RING, /* (mobile only) used by host to provide remote RX buffers */
+ HTT_HOST2_TO_FW_RXBUF_RING, /* (mobile only) second ring used by host to provide remote RX buffers */
+ /* Add Other SRING which can't be directly configured by host software above this line */
+};
+
+#define HTT_SRING_SETUP_SZ (sizeof(struct htt_sring_setup_t))
+
+#define HTT_SRING_SETUP_PDEV_ID_M 0x0000ff00
+#define HTT_SRING_SETUP_PDEV_ID_S 8
+#define HTT_SRING_SETUP_PDEV_ID_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_PDEV_ID_M) >> \
+ HTT_SRING_SETUP_PDEV_ID_S)
+#define HTT_SRING_SETUP_PDEV_ID_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_PDEV_ID, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_PDEV_ID_S)); \
+ } while (0)
+
+#define HTT_SRING_SETUP_RING_ID_M 0x00ff0000
+#define HTT_SRING_SETUP_RING_ID_S 16
+#define HTT_SRING_SETUP_RING_ID_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_RING_ID_M) >> \
+ HTT_SRING_SETUP_RING_ID_S)
+#define HTT_SRING_SETUP_RING_ID_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_RING_ID, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_RING_ID_S)); \
+ } while (0)
+
+#define HTT_SRING_SETUP_RING_TYPE_M 0xff000000
+#define HTT_SRING_SETUP_RING_TYPE_S 24
+#define HTT_SRING_SETUP_RING_TYPE_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_RING_TYPE_M) >> \
+ HTT_SRING_SETUP_RING_TYPE_S)
+#define HTT_SRING_SETUP_RING_TYPE_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_RING_TYPE, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_RING_TYPE_S)); \
+ } while (0)
+
+#define HTT_SRING_SETUP_RING_BASE_ADDR_LO_M 0xffffffff
+#define HTT_SRING_SETUP_RING_BASE_ADDR_LO_S 0
+#define HTT_SRING_SETUP_RING_BASE_ADDR_LO_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_RING_BASE_ADDR_LO_M) >> \
+ HTT_SRING_SETUP_RING_BASE_ADDR_LO_S)
+#define HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_RING_BASE_ADDR_LO, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_RING_BASE_ADDR_LO_S)); \
+ } while (0)
+
+#define HTT_SRING_SETUP_RING_BASE_ADDR_HI_M 0xffffffff
+#define HTT_SRING_SETUP_RING_BASE_ADDR_HI_S 0
+#define HTT_SRING_SETUP_RING_BASE_ADDR_HI_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_RING_BASE_ADDR_HI_M) >> \
+ HTT_SRING_SETUP_RING_BASE_ADDR_HI_S)
+#define HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_RING_BASE_ADDR_HI, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_RING_BASE_ADDR_HI_S)); \
+ } while (0)
+
+#define HTT_SRING_SETUP_RING_SIZE_M 0x0000ffff
+#define HTT_SRING_SETUP_RING_SIZE_S 0
+#define HTT_SRING_SETUP_RING_SIZE_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_RING_SIZE_M) >> \
+ HTT_SRING_SETUP_RING_SIZE_S)
+#define HTT_SRING_SETUP_RING_SIZE_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_RING_SIZE, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_RING_SIZE_S)); \
+ } while (0)
+
+#define HTT_SRING_SETUP_ENTRY_SIZE_M 0x00ff00000
+#define HTT_SRING_SETUP_ENTRY_SIZE_S 16
+#define HTT_SRING_SETUP_ENTRY_SIZE_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_ENTRY_SIZE_M) >> \
+ HTT_SRING_SETUP_ENTRY_SIZE_S)
+#define HTT_SRING_SETUP_ENTRY_SIZE_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_ENTRY_SIZE, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_ENTRY_SIZE_S)); \
+ } while (0)
+
+#define HTT_SRING_SETUP_MISC_CFG_FLAG_M 0xff0000000
+#define HTT_SRING_SETUP_MISC_CFG_FLAG_S 24
+#define HTT_SRING_SETUP_MISC_CFG_FLAG_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_MISC_CFG_FLAG_M) >> \
+ HTT_SRING_SETUP_MISC_CFG_FLAG_S)
+#define HTT_SRING_SETUP_MISC_CFG_FLAG_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_MISC_CFG_FLAG, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_MISC_CFG_FLAG_S)); \
+ } while (0)
+
+#define HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_M 0xffffffff
+#define HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_S 0
+#define HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_M) >> \
+ HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_S)
+#define HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_S)); \
+ } while (0)
+
+#define HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_M 0xffffffff
+#define HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_S 0
+#define HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_M) >> \
+ HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_S)
+#define HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_S)); \
+ } while (0)
+
+#define HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_M 0xffffffff
+#define HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_S 0
+#define HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_M) >> \
+ HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_S)
+#define HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_S)); \
+ } while (0)
+
+#define HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_M 0xffffffff
+#define HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_S 0
+#define HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_M) >> \
+ HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_S)
+#define HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_S)); \
+ } while (0)
+
+#define HTT_SRING_SETUP_RING_MISC_ADDR_LO_M 0xffffffff
+#define HTT_SRING_SETUP_RING_MISC_ADDR_LO_S 0
+#define HTT_SRING_SETUP_RING_MISC_ADDR_LO_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_RING_MISC_ADDR_LO_M) >> \
+ HTT_SRING_SETUP_RING_MISC_ADDR_LO_S)
+#define HTT_SRING_SETUP_RING_MISC_ADDR_LO_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_RING_MISC_ADDR_LO, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_RING_MISC_ADDR_LO_S)); \
+ } while (0)
+
+#define HTT_SRING_SETUP_RING_MISC_ADDR_HI_M 0xffffffff
+#define HTT_SRING_SETUP_RING_MISC_ADDR_HI_S 0
+#define HTT_SRING_SETUP_RING_MISC_ADDR_HI_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_RING_MISC_ADDR_HI_M) >> \
+ HTT_SRING_SETUP_RING_MISC_ADDR_HI_S)
+#define HTT_SRING_SETUP_RING_MISC_ADDR_HI_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_RING_MISC_ADDR_HI, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_RING_MISC_ADDR_HI_S)); \
+ } while (0)
+
+#define HTT_SRING_SETUP_RING_MISC_DATA_M 0xffffffff
+#define HTT_SRING_SETUP_RING_MISC_DATA_S 0
+#define HTT_SRING_SETUP_RING_MISC_DATA_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_RING_MISC_DATA_M) >> \
+ HTT_SRING_SETUP_RING_MISC_DATA_S)
+#define HTT_SRING_SETUP_RING_MISC_DATA_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_RING_MISC_DATA, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_RING_MISC_DATA_S)); \
+ } while (0)
+
+/* This control bit is applicable to only Producer, which updates Ring ID field
+ * of each descriptor before pushing into the ring.
+ * 0: updates ring_id(default)
+ * 1: ring_id updating disabled */
+#define HTT_SRING_SETUP_RING_MISC_DATA_RING_ID_DISABLE_M 0x01
+#define HTT_SRING_SETUP_RING_MISC_DATA_RING_ID_DISABLE_S 0
+#define HTT_SRING_SETUP_RING_MISC_DATA_RING_ID_DISABLE_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_RING_MISC_DATA_RING_ID_DISABLE_M) >> \
+ HTT_SRING_SETUP_RING_MISC_DATA_RING_ID_DISABLE_S)
+#define HTT_SRING_SETUP_RING_MISC_DATA_RING_ID_DISABLE_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_RING_MISC_DATA_RING_ID_DISABLE, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_RING_MISC_DATA_RING_ID_DISABLE_S)); \
+ } while (0)
+
+/* This control bit is applicable to only Producer, which updates Loopcnt field
+ * of each descriptor before pushing into the ring.
+ * 0: updates Loopcnt(default)
+ * 1: Loopcnt updating disabled */
+#define HTT_SRING_SETUP_RING_MISC_DATA_LOOPCOUNT_DISABLE_M 0x02
+#define HTT_SRING_SETUP_RING_MISC_DATA_LOOPCOUNT_DISABLE_S 1
+#define HTT_SRING_SETUP_RING_MISC_DATA_LOOPCOUNT_DISABLE_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_RING_MISC_DATA_LOOPCOUNT_DISABLE_M) >> \
+ HTT_SRING_SETUP_RING_MISC_DATA_LOOPCOUNT_DISABLE_S)
+#define HTT_SRING_SETUP_RING_MISC_DATA_LOOPCOUNT_DISABLE_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_RING_MISC_DATA_LOOPCOUNT_DISABLE, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_RING_MISC_DATA_LOOPCOUNT_DISABLE_S)); \
+ } while (0)
+
+/* Secured access enable/disable bit. SRNG drives value of this register bit
+ * into security_id port of GXI/AXI. */
+#define HTT_SRING_SETUP_RING_MISC_DATA_SECURITY_M 0x04
+#define HTT_SRING_SETUP_RING_MISC_DATA_SECURITY_S 2
+#define HTT_SRING_SETUP_RING_MISC_DATA_SECURITY_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_RING_MISC_DATA_SECURITY_M) >> \
+ HTT_SRING_SETUP_RING_MISC_DATA_SECURITY_S)
+#define HTT_SRING_SETUP_RING_MISC_DATA_SECURITY_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_RING_MISC_DATA_SECURITY, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_RING_MISC_DATA_SECURITY_S)); \
+ } while (0)
+
+/* During MSI write operation, SRNG drives value of this register bit into
+ * swap bit of GXI/AXI. */
+#define HTT_SRING_SETUP_RING_MISC_DATA_MSI_SWAP_M 0x08
+#define HTT_SRING_SETUP_RING_MISC_DATA_MSI_SWAP_S 3
+#define HTT_SRING_SETUP_RING_MISC_DATA_MSI_SWAP_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_RING_MISC_DATA_MSI_SWAP_M) >> \
+ HTT_SRING_SETUP_RING_MISC_DATA_MSI_SWAP_S)
+#define HTT_SRING_SETUP_RING_MISC_DATA_MSI_SWAP_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_RING_MISC_DATA_MSI_SWAP, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_RING_MISC_DATA_MSI_SWAP_S)); \
+ } while (0)
+
+/* During Pointer write operation, SRNG drives value of this register bit into
+ * swap bit of GXI/AXI. */
+#define HTT_SRING_SETUP_RING_MISC_DATA_HOST_FW_SWAP_M 0x10
+#define HTT_SRING_SETUP_RING_MISC_DATA_HOST_FW_SWAP_S 4
+#define HTT_SRING_SETUP_RING_MISC_DATA_HOST_FW_SWAP_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_RING_MISC_DATA_HOST_FW_SWAP_M) >> \
+ HTT_SRING_SETUP_RING_MISC_DATA_HOST_FW_SWAP_S)
+#define HTT_SRING_SETUP_RING_MISC_DATA_HOST_FW_SWAP_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_RING_MISC_DATA_HOST_FW_SWAP, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_RING_MISC_DATA_HOST_FW_SWAP_S)); \
+ } while (0)
+
+/* During any data or TLV write operation, SRNG drives value of this register
+ * bit into swap bit of GXI/AXI. */
+#define HTT_SRING_SETUP_RING_MISC_DATA_TLV_SWAP_M 0x20
+#define HTT_SRING_SETUP_RING_MISC_DATA_TLV_SWAP_S 5
+#define HTT_SRING_SETUP_RING_MISC_DATA_TLV_SWAP_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_RING_MISC_DATA_TLV_SWAP_M) >> \
+ HTT_SRING_SETUP_RING_MISC_DATA_TLV_SWAP_S)
+#define HTT_SRING_SETUP_RING_MISC_DATA_TLV_SWAP_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_RING_MISC_DATA_TLV_SWAP, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_RING_MISC_DATA_TLV_SWAP_S)); \
+ } while (0)
+
+#define HTT_SRING_SETUP_RING_MISC_CFG_RESERVED1 0x40
+#define HTT_SRING_SETUP_RING_MISC_CFG_RESERVED2 0x80
+
+#define HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_M 0x00007fff
+#define HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_S 0
+#define HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_M) >> \
+ HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_S)
+#define HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_S)); \
+ } while (0)
+
+#define HTT_SRING_SETUP_SW_INTR_MODE_M 0x00008000
+#define HTT_SRING_SETUP_SW_INTR_MODE_S 15
+#define HTT_SRING_SETUP_SW_INTR_MODE_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_SW_INTR_MODE_M) >> \
+ HTT_SRING_SETUP_SW_INTR_MODE_S)
+#define HTT_SRING_SETUP_SW_INTR_MODE_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_SW_INTR_MODE, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_SW_INTR_MODE_S)); \
+ } while (0)
+
+#define HTT_SRING_SETUP_INTR_TIMER_TH_M 0xffff0000
+#define HTT_SRING_SETUP_INTR_TIMER_TH_S 16
+#define HTT_SRING_SETUP_INTR_TIMER_TH_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_INTR_TIMER_TH_M) >> \
+ HTT_SRING_SETUP_INTR_TIMER_TH_S)
+#define HTT_SRING_SETUP_INTR_TIMER_TH_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_INTR_TIMER_TH, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_INTR_TIMER_TH_S)); \
+ } while (0)
+
+#define HTT_SRING_SETUP_INTR_LOW_TH_M 0x0000ffff
+#define HTT_SRING_SETUP_INTR_LOW_TH_S 0
+#define HTT_SRING_SETUP_INTR_LOW_TH_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_INTR_LOW_TH_M) >> \
+ HTT_SRING_SETUP_INTR_LOW_TH_S)
+#define HTT_SRING_SETUP_INTR_LOW_TH_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_INTR_LOW_TH, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_INTR_LOW_TH_S)); \
+ } while (0)
+
+#define HTT_SRING_SETUP_PREFETCH_TIMER_CFG_M 0x00070000
+#define HTT_SRING_SETUP_PREFETCH_TIMER_CFG_S 16
+#define HTT_SRING_SETUP_PREFETCH_TIMER_CFG_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_PREFETCH_TIMER_CFG_M) >> \
+ HTT_SRING_SETUP_PREFETCH_TIMER_CFG_S)
+#define HTT_SRING_SETUP_PREFETCH_TIMER_CFG_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_PREFETCH_TIMER_CFG, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_PREFETCH_TIMER_CFG_S)); \
+ } while (0)
+
+#define HTT_SRING_SETUP_RESPONSE_REQUIRED_M 0x00080000
+#define HTT_SRING_SETUP_RESPONSE_REQUIRED_S 19
+#define HTT_SRING_SETUP_RESPONSE_REQUIRED_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_RESPONSE_REQUIRED_M) >> \
+ HTT_SRING_SETUP_RESPONSE_REQUIRED_S)
+#define HTT_SRING_SETUP_RESPONSE_REQUIRED_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_RESPONSE_REQUIRED, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_RESPONSE_REQUIRED_S)); \
+ } while (0)
+
+
+/**
+ * @brief HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG Message
+ *
+ * @details
+ * HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG message is sent by host to
+ * configure RXDMA rings.
+ * The configuration is per ring based and includes both packet subtypes
+ * and PPDU/MPDU TLVs.
+ *
+ * The message would appear as follows:
+ *
+ * |31 26|25|24|23 16|15 8|7 0|
+ * |-----------------+----------------+----------------+---------------|
+ * | rsvd1 |PS|SS| ring_id | pdev_id | msg_type |
+ * |-------------------------------------------------------------------|
+ * | rsvd2 | ring_buffer_size |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_0 |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_1 |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_2 |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_3 |
+ * |-------------------------------------------------------------------|
+ * | tlv_filter_in_flags |
+ * |-------------------------------------------------------------------|
+ * Where:
+ * PS = pkt_swap
+ * SS = status_swap
+ * The message is interpreted as follows:
+ * dword0 - b'0:7 - msg_type: This will be set to
+ * HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG
+ * b'8:15 - pdev_id:
+ * 0 (for rings at SOC/UMAC level),
+ * 1/2/3 mac id (for rings at LMAC level)
+ * b'16:23 - ring_id : Identify the ring to configure.
+ * More details can be got from enum htt_srng_ring_id
+ * b'24 - status_swap: 1 is to swap status TLV
+ * b'25 - pkt_swap: 1 is to swap packet TLV
+ * b'26:31 - rsvd1: reserved for future use
+ * dword1 - b'0:16 - ring_buffer_size: size of bufferes referenced by rx ring,
+ * in byte units.
+ * Valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * - b'16:31 - rsvd2: Reserved for future use
+ * dword2 - b'0:31 - packet_type_enable_flags_0:
+ * Enable MGMT packet from 0b0000 to 0b1001
+ * bits from low to high: FP, MD, MO - 3 bits
+ * FP: Filter_Pass
+ * MD: Monitor_Direct
+ * MO: Monitor_Other
+ * 10 mgmt subtypes * 3 bits -> 30 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG0_xxx_MGMT_xxx defs
+ * dword3 - b'0:31 - packet_type_enable_flags_1:
+ * Enable MGMT packet from 0b1010 to 0b1111
+ * bits from low to high: FP, MD, MO - 3 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG1_xxx_MGMT_xxx defs
+ * dword4 - b'0:31 - packet_type_enable_flags_2:
+ * Enable CTRL packet from 0b0000 to 0b1001
+ * bits from low to high: FP, MD, MO - 3 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG2_xxx_CTRL_xxx defs
+ * dword5 - b'0:31 - packet_type_enable_flags_3:
+ * Enable CTRL packet from 0b1010 to 0b1111,
+ * MCAST_DATA, UCAST_DATA, NULL_DATA
+ * bits from low to high: FP, MD, MO - 3 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG3_xxx_CTRL_xxx defs
+ * dword6 - b'0:31 - tlv_filter_in_flags:
+ * Filter in Attention/MPDU/PPDU/Header/User tlvs
+ * Refer to CFG_TLV_FILTER_IN_FLAG defs
+ */
+PREPACK struct htt_rx_ring_selection_cfg_t {
+ A_UINT32 msg_type: 8,
+ pdev_id: 8,
+ ring_id: 8,
+ status_swap: 1,
+ pkt_swap: 1,
+ rsvd1: 6;
+ A_UINT32 ring_buffer_size: 16,
+ rsvd2: 16;
+ A_UINT32 packet_type_enable_flags_0;
+ A_UINT32 packet_type_enable_flags_1;
+ A_UINT32 packet_type_enable_flags_2;
+ A_UINT32 packet_type_enable_flags_3;
+ A_UINT32 tlv_filter_in_flags;
+} POSTPACK;
+
+#define HTT_RX_RING_SELECTION_CFG_SZ (sizeof(struct htt_rx_ring_selection_cfg_t))
+
+#define HTT_RX_RING_SELECTION_CFG_PDEV_ID_M 0x0000ff00
+#define HTT_RX_RING_SELECTION_CFG_PDEV_ID_S 8
+#define HTT_RX_RING_SELECTION_CFG_PDEV_ID_GET(_var) \
+ (((_var) & HTT_RX_RING_SELECTION_CFG_PDEV_ID_M) >> \
+ HTT_RX_RING_SELECTION_CFG_PDEV_ID_S)
+#define HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_RX_RING_SELECTION_CFG_PDEV_ID, _val); \
+ ((_var) |= ((_val) << HTT_RX_RING_SELECTION_CFG_PDEV_ID_S)); \
+ } while (0)
+
+#define HTT_RX_RING_SELECTION_CFG_RING_ID_M 0x00ff0000
+#define HTT_RX_RING_SELECTION_CFG_RING_ID_S 16
+#define HTT_RX_RING_SELECTION_CFG_RING_ID_GET(_var) \
+ (((_var) & HTT_RX_RING_SELECTION_CFG_RING_ID_M) >> \
+ HTT_RX_RING_SELECTION_CFG_RING_ID_S)
+#define HTT_RX_RING_SELECTION_CFG_RING_ID_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_RX_RING_SELECTION_CFG_RING_ID, _val); \
+ ((_var) |= ((_val) << HTT_RX_RING_SELECTION_CFG_RING_ID_S)); \
+ } while (0)
+
+#define HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SWAP_M 0x01000000
+#define HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SWAP_S 24
+#define HTT_RX_RING_SELECTION_CFG_STATUS_TLV_GET(_var) \
+ (((_var) & HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SWAP_M) >> \
+ HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SWAP_S)
+#define HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SWAP, _val); \
+ ((_var) |= ((_val) << HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SWAP_S)); \
+ } while (0)
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TLV_SWAP_M 0x02000000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TLV_SWAP_S 25
+#define HTT_RX_RING_SELECTION_CFG_PKT_TLV_GET(_var) \
+ (((_var) & HTT_RX_RING_SELECTION_CFG_PKT_TLV_SWAP_M) >> \
+ HTT_RX_RING_SELECTION_CFG_PKT_TLV_SWAP_S)
+#define HTT_RX_RING_SELECTION_CFG_PKT_TLV_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_RX_RING_SELECTION_CFG_PKT_TLV_SWAP, _val); \
+ ((_var) |= ((_val) << HTT_RX_RING_SELECTION_CFG_PKT_TLV_SWAP_S)); \
+ } while (0)
+
+#define HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_M 0x0000ffff
+#define HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_S 0
+#define HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_GET(_var) \
+ (((_var) & HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_M) >> \
+ HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_S)
+#define HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE, _val); \
+ ((_var) |= ((_val) << HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_S)); \
+ } while (0)
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_0_M 0xffffffff
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_0_S 0
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_0_GET(_var) \
+ (((_var) & HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_0_M) >> \
+ HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_0_S)
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_0_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_0, _val); \
+ ((_var) |= ((_val) << HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_0_S)); \
+ } while (0)
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_1_M 0xffffffff
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_1_S 0
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_1_GET(_var) \
+ (((_var) & HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_1_M) >> \
+ HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_1_S)
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_1_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_1, _val); \
+ ((_var) |= ((_val) << HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_1_S)); \
+ } while (0)
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_2_M 0xffffffff
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_2_S 0
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_2_GET(_var) \
+ (((_var) & HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_2_M) >> \
+ HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_2_S)
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_2_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_2, _val); \
+ ((_var) |= ((_val) << HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_2_S)); \
+ } while (0)
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_3_M 0xffffffff
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_3_S 0
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_3_GET(_var) \
+ (((_var) & HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_3_M) >> \
+ HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_3_S)
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_3_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_3, _val); \
+ ((_var) |= ((_val) << HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG_3_S)); \
+ } while (0)
+
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_M 0xffffffff
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_S 0
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_GET(_var) \
+ (((_var) & HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_M) >> \
+ HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_S)
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG, _val); \
+ ((_var) |= ((_val) << HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_S)); \
+ } while (0)
+
+/*
+ * Subtype based MGMT frames enable bits.
+ * FP: Filter_Pass, MD: Monitor_Direct MO: Monitor_Other
+ */
+/* association request */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_FP_MGMT_0000_M 0x00000001
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_FP_MGMT_0000_S 0
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MD_MGMT_0000_M 0x00000002
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MD_MGMT_0000_S 1
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MO_MGMT_0000_M 0x00000004
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MO_MGMT_0000_S 2
+
+/* association response */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_FP_MGMT_0001_M 0x00000008
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_FP_MGMT_0001_S 3
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MD_MGMT_0001_M 0x00000010
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MD_MGMT_0001_S 4
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MO_MGMT_0001_M 0x00000020
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MO_MGMT_0001_S 5
+
+/* Reassociation request */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_FP_MGMT_0010_M 0x00000040
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_FP_MGMT_0010_S 6
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MD_MGMT_0010_M 0x00000080
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MD_MGMT_0010_S 7
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MO_MGMT_0010_M 0x00000100
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MO_MGMT_0010_S 8
+
+/* Reassociation response */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_FP_MGMT_0011_M 0x00000200
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_FP_MGMT_0011_S 9
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MD_MGMT_0011_M 0x00000400
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MD_MGMT_0011_S 10
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MO_MGMT_0011_M 0x00000800
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MO_MGMT_0011_S 11
+
+/* Probe request */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_FP_MGMT_0100_M 0x00001000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_FP_MGMT_0100_S 12
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MD_MGMT_0100_M 0x00002000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MD_MGMT_0100_S 13
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MO_MGMT_0100_M 0x00004000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MO_MGMT_0100_S 14
+
+/* Probe response */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_FP_MGMT_0101_M 0x00008000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_FP_MGMT_0101_S 15
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MD_MGMT_0101_M 0x00010000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MD_MGMT_0101_S 16
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MO_MGMT_0101_M 0x00020000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MO_MGMT_0101_S 17
+
+/* Timing Advertisement */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_FP_MGMT_0110_M 0x00040000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_FP_MGMT_0110_S 18
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MD_MGMT_0110_M 0x00080000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MD_MGMT_0110_S 19
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MO_MGMT_0110_M 0x00100000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MO_MGMT_0110_S 20
+
+/* Reserved */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_FP_MGMT_0111_M 0x00200000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_FP_MGMT_0111_S 21
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MD_MGMT_0111_M 0x00400000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MD_MGMT_0111_S 22
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MO_MGMT_0111_M 0x00800000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MO_MGMT_0111_S 23
+
+/* Beacon */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_FP_MGMT_1000_M 0x01000001
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_FP_MGMT_1000_S 24
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MD_MGMT_1000_M 0x02000001
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MD_MGMT_1000_S 25
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MO_MGMT_1000_M 0x00000001
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MO_MGMT_1000_S 26
+
+/* ATIM */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_FP_MGMT_1001_M 0x00000001
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_FP_MGMT_1001_S 27
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MD_MGMT_1001_M 0x00000001
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MD_MGMT_1001_S 28
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MO_MGMT_1001_M 0x00000001
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG0_MO_MGMT_1001_S 29
+
+/* Disassociation */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_FP_MGMT_1010_M 0x00000001
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_FP_MGMT_1010_S 0
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_MD_MGMT_1010_M 0x00000002
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_MD_MGMT_1010_S 1
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_MO_MGMT_1010_M 0x00000004
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_MO_MGMT_1010_S 2
+
+/* Authentication */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_FP_MGMT_1011_M 0x00000008
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_FP_MGMT_1011_S 3
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_MD_MGMT_1011_M 0x00000010
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_MD_MGMT_1011_S 4
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_MO_MGMT_1011_M 0x00000020
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_MO_MGMT_1011_S 5
+
+/* Deauthentication */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_FP_MGMT_1100_M 0x00000040
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_FP_MGMT_1100_S 6
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_MD_MGMT_1100_M 0x00000080
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_MD_MGMT_1100_S 7
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_MO_MGMT_1100_M 0x00000100
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_MO_MGMT_1100_S 8
+
+/* Action */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_FP_MGMT_1101_M 0x00000200
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_FP_MGMT_1101_S 9
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_MD_MGMT_1101_M 0x00000400
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_MD_MGMT_1101_S 10
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_MO_MGMT_1101_M 0x00000800
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_MO_MGMT_1101_S 11
+
+/* Action No Ack */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_FP_MGMT_1110_M 0x00001000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_FP_MGMT_1110_S 12
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_MD_MGMT_1110_M 0x00002000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_MD_MGMT_1110_S 13
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_MO_MGMT_1110_M 0x00004000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_MO_MGMT_1110_S 14
+
+/* Reserved */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_FP_MGMT_1111_M 0x00008000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_FP_MGMT_1111_S 15
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_MD_MGMT_1111_M 0x00010000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_MD_MGMT_1111_S 16
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_MO_MGMT_1111_M 0x00020000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG1_MO_MGMT_1111_S 17
+
+/*
+ * Subtype based CTRL frames enable bits.
+ * FP: Filter_Pass, MD: Monitor_Direct, MO: Monitor_Other
+ */
+/* Reserved */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_0000_M 0x00000001
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_0000_S 0
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_0000_M 0x00000002
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_0000_S 1
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_0000_M 0x00000004
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_0000_S 2
+
+/* Reserved */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_0001_M 0x00000008
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_0001_S 3
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_0001_M 0x00000010
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_0001_S 4
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_0001_M 0x00000020
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_0001_S 5
+
+/* Reserved */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_0010_M 0x00000040
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_0010_S 6
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_0010_M 0x00000080
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_0010_S 7
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_0010_M 0x00000100
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_0010_S 8
+
+/* Reserved */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_0011_M 0x00000200
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_0011_S 9
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_0011_M 0x00000400
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_0011_S 10
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_0011_M 0x00000800
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_0011_S 11
+
+/* Reserved */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_0100_M 0x00001000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_0100_S 12
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_0100_M 0x00002000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_0100_S 13
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_0100_M 0x00004000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_0100_S 14
+
+/* Reserved */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_0101_M 0x00008000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_0101_S 15
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_0101_M 0x00010000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_0101_S 16
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_0101_M 0x00020000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_0101_S 17
+
+/* Reserved */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_0110_M 0x00040000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_0110_S 18
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_0110_M 0x00080000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_0110_S 19
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_0110_M 0x00100000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_0110_S 20
+
+/* Control Wrapper */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_0111_M 0x00200000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_0111_S 21
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_0111_M 0x00400000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_0111_S 22
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_0111_M 0x00800000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_0111_S 23
+
+/* Block Ack Request */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_1000_M 0x01000001
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_1000_S 24
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_1000_M 0x02000001
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_1000_S 25
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_1000_M 0x00000001
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_1000_S 26
+
+/* Block Ack*/
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_1001_M 0x00000001
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_1001_S 27
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_1001_M 0x00000001
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_1001_S 28
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_1001_M 0x00000001
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_1001_S 29
+
+/* PS-POLL */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_FP_CTRL_1010_M 0x00000001
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_FP_CTRL_1010_S 0
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MD_CTRL_1010_M 0x00000002
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MD_CTRL_1010_S 1
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MO_CTRL_1010_M 0x00000004
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MO_CTRL_1010_S 2
+
+/* RTS */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_FP_CTRL_1011_M 0x00000008
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_FP_CTRL_1011_S 3
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MD_CTRL_1011_M 0x00000010
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MD_CTRL_1011_S 4
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MO_CTRL_1011_M 0x00000020
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MO_CTRL_1011_S 5
+
+/* CTS */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_FP_CTRL_1100_M 0x00000040
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_FP_CTRL_1100_S 6
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MD_CTRL_1100_M 0x00000080
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MD_CTRL_1100_S 7
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MO_CTRL_1100_M 0x00000100
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MO_CTRL_1100_S 8
+
+/* ACK */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_FP_CTRL_1101_M 0x00000200
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_FP_CTRL_1101_S 9
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MD_CTRL_1101_M 0x00000400
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MD_CTRL_1101_S 10
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MO_CTRL_1101_M 0x00000800
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MO_CTRL_1101_S 11
+
+/* CF-END */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_FP_CTRL_1110_M 0x00001000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_FP_CTRL_1110_S 12
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MD_CTRL_1110_M 0x00002000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MD_CTRL_1110_S 13
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MO_CTRL_1110_M 0x00004000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MO_CTRL_1110_S 14
+
+/* CF-END + CF-ACK */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_FP_CTRL_1111_M 0x00008000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_FP_CTRL_1111_S 15
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MD_CTRL_1111_M 0x00010000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MD_CTRL_1111_S 16
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MO_CTRL_1111_M 0x00020000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MO_CTRL_1111_S 17
+
+/* Multicast data */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_FP_DATA_MCAST_M 0x00040000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_FP_DATA_MCAST_S 18
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MD_DATA_MCAST_M 0x00080000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MD_DATA_MCAST_S 19
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MO_DATA_MCAST_M 0x00100000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MO_DATA_MCAST_S 20
+
+/* Unicast data */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_FP_DATA_UCAST_M 0x00200000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_FP_DATA_UCAST_S 21
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MD_DATA_UCAST_M 0x00400000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MD_DATA_UCAST_S 22
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MO_DATA_UCAST_M 0x00800000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MO_DATA_UCAST_S 23
+
+/* NULL data */
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_FP_DATA_NULL_M 0x01000000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_FP_DATA_NULL_S 24
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MD_DATA_NULL_M 0x02000000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MD_DATA_NULL_S 25
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MO_DATA_NULL_M 0x04000000
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG3_MO_DATA_NULL_S 26
+
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_SET(word, httsym, value) \
+ do { \
+ HTT_CHECK_SET_VAL(httsym, value); \
+ (word) |= (value) << httsym##_S; \
+ } while (0)
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_GET(word, httsym) \
+ (((word) & httsym##_M) >> httsym##_S)
+
+#define htt_rx_ring_pkt_enable_subtype_set( \
+ word, flag, mode, type, subtype, val) \
+ HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_SET( \
+ word, HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_##flag##_##mode##_##type##_##subtype, val)
+
+#define htt_rx_ring_pkt_enable_subtype_get( \
+ word, flag, mode, type, subtype) \
+ HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_GET( \
+ word, HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_##flag##_##mode##_##type##_##subtype)
+
+/* Definition to filter in TLVs */
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_RX_MPDU_START_M 0x00000001
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_RX_MPDU_START_S 0
+
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_RX_MSDU_START_M 0x00000002
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_RX_MSDU_START_S 1
+
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_RX_PACKET_M 0x00000004
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_RX_PACKET_S 2
+
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_RX_MSDU_END_M 0x00000008
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_RX_MSDU_END_S 3
+
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_RX_MPDU_END_M 0x00000010
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_RX_MPDU_END_S 4
+
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_RX_PACKET_HEADER_M 0x00000020
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_RX_PACKET_HEADER_S 5
+
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_RX_RESERVED_M 0x00000040
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_RX_RESERVED_S 6
+
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_RX_ATTENTION_M 0x00000080
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_RX_ATTENTION_S 7
+
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_RX_PPDU_START_M 0x00000100
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_RX_PPDU_START_S 8
+
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_RX_PPDU_END_M 0x00000200
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_RX_PPDU_END_S 9
+
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_RX_PPDU_END_USER_STATS_M 0x00000400
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_RX_PPDU_END_USER_STATS_S 10
+
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_RX_PPDU_END_USER_STATS_EXT_M 0x00000800
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_RX_PPDU_END_USER_STATS_EXT_S 11
+
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_RX_PPDU_END_STATUS_DONE_M 0x00001000
+#define HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_RX_PPDU_END_STATUS_DONE_S 12
+
+#define HTT_RX_RING_TLV_ENABLE_SET(word, httsym, enable) \
+ do { \
+ HTT_CHECK_SET_VAL(httsym, enable); \
+ (word) |= (enable) << httsym##_S; \
+ } while (0)
+#define HTT_RX_RING_TLV_ENABLE_GET(word, httsym) \
+ (((word) & httsym##_M) >> httsym##_S)
+
+#define htt_rx_ring_tlv_filter_in_enable_set(word, tlv, enable) \
+ HTT_RX_RING_TLV_ENABLE_SET( \
+ word, HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_RX_##tlv, enable)
+
+#define htt_rx_ring_tlv_filter_in_enable_get(word, tlv) \
+ HTT_RX_RING_TLV_ENABLE_GET( \
+ word, HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_RX_##tlv)
@@ -2942,6 +4067,7 @@
HTT_T2H_MSG_TYPE_RATE_REPORT = 0x17,
HTT_T2H_MSG_TYPE_FLOW_POOL_MAP = 0x18,
HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP = 0x19,
+ HTT_T2H_MSG_TYPE_SRING_SETUP_DONE = 0x1a,
HTT_T2H_MSG_TYPE_TEST,
/* keep this last */
@@ -6921,4 +8047,80 @@
((_var) |= ((_val) << HTT_FLOW_POOL_UNMAP_FLOW_POOL_ID_S)); \
} while (0)
+/**
+ * @brief HTT_T2H_MSG_TYPE_SRING_SETUP_DONE Message
+ *
+ * @details
+ * HTT_T2H_MSG_TYPE_SRING_SETUP_DONE message is sent by the target when
+ * SRNG ring setup is done
+ *
+ * This message indicates whether the last setup operation is successful.
+ * It will be sent to host when host set respose_required bit in
+ * HTT_H2T_MSG_TYPE_SRING_SETUP.
+ * The message would appear as follows:
+ *
+ * |31 24|23 16|15 8|7 0|
+ * |--------------- +----------------+----------------+----------------|
+ * | setup_status | ring_id | pdev_id | msg_type |
+ * |-------------------------------------------------------------------|
+ *
+ * The message is interpreted as follows:
+ * dword0 - b'0:7 - msg_type: This will be set to
+ * HTT_T2H_MSG_TYPE_SRING_SETUP_DONE
+ * b'8:15 - pdev_id:
+ * 0 (for rings at SOC/UMAC level),
+ * 1/2/3 mac id (for rings at LMAC level)
+ * b'16:23 - ring_id: Identify the ring which is set up
+ * More details can be got from enum htt_srng_ring_id
+ * b'24:31 - setup_status: Indicate status of setup operation
+ * Refer to htt_ring_setup_status
+ */
+
+PREPACK struct htt_sring_setup_done_t {
+ A_UINT32 msg_type: 8,
+ pdev_id: 8,
+ ring_id: 8,
+ setup_status: 8;
+} POSTPACK;
+
+enum htt_ring_setup_status {
+ htt_ring_setup_status_ok = 0,
+ htt_ring_setup_status_error,
+};
+
+#define HTT_SRING_SETUP_DONE_SZ (sizeof(struct htt_sring_setup_done_t))
+
+#define HTT_SRING_SETUP_DONE_PDEV_ID_M 0x0000ff00
+#define HTT_SRING_SETUP_DONE_PDEV_ID_S 8
+#define HTT_SRING_SETUP_DONE_PDEV_ID_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_DONE_PDEV_ID_M) >> \
+ HTT_SRING_SETUP_DONE_PDEV_ID_S)
+#define HTT_SRING_SETUP_DONE_PDEV_ID_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_DONE_PDEV_ID, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_DONE_PDEV_ID_S)); \
+ } while (0)
+
+#define HTT_SRING_SETUP_DONE_RING_ID_M 0x00ff0000
+#define HTT_SRING_SETUP_DONE_RING_ID_S 16
+#define HTT_SRING_SETUP_DONE_RING_ID_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_DONE_RING_ID_M) >> \
+ HTT_SRING_SETUP_DONE_RING_ID_S)
+#define HTT_SRING_SETUP_DONE_RING_ID_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_DONE_RING_ID, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_DONE_RING_ID_S)); \
+ } while (0)
+
+#define HTT_SRING_SETUP_DONE_STATUS_M 0xff000000
+#define HTT_SRING_SETUP_DONE_STATUS_S 24
+#define HTT_SRING_SETUP_DONE_STATUS_GET(_var) \
+ (((_var) & HTT_SRING_SETUP_DONE_STATUS_M) >> \
+ HTT_SRING_SETUP_DONE_STATUS_S)
+#define HTT_SRING_SETUP_DONE_STATUS_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_SRING_SETUP_DONE_STATUS, _val); \
+ ((_var) |= ((_val) << HTT_SRING_SETUP_DONE_STATUS_S)); \
+ } while (0)
+
#endif
diff --git a/drivers/staging/qcacld-2.0/CORE/EPPING/src/epping_helper.c b/drivers/staging/qcacld-2.0/CORE/EPPING/src/epping_helper.c
index 59ab798..52e14f2 100644
--- a/drivers/staging/qcacld-2.0/CORE/EPPING/src/epping_helper.c
+++ b/drivers/staging/qcacld-2.0/CORE/EPPING/src/epping_helper.c
@@ -46,7 +46,6 @@
#include <wlan_hdd_tx_rx.h>
#include <wniApi.h>
#include <wlan_nlink_srv.h>
-#include <wlan_btc_svc.h>
#include <wlan_hdd_cfg.h>
#include <wlan_ptt_sock_svc.h>
#include <wlan_hdd_wowl.h>
diff --git a/drivers/staging/qcacld-2.0/CORE/EPPING/src/epping_main.c b/drivers/staging/qcacld-2.0/CORE/EPPING/src/epping_main.c
index 26d05f7..f7b649d 100644
--- a/drivers/staging/qcacld-2.0/CORE/EPPING/src/epping_main.c
+++ b/drivers/staging/qcacld-2.0/CORE/EPPING/src/epping_main.c
@@ -45,7 +45,6 @@
#include <wlan_hdd_tx_rx.h>
#include <wniApi.h>
#include <wlan_nlink_srv.h>
-#include <wlan_btc_svc.h>
#include <wlan_hdd_cfg.h>
#include <wlan_ptt_sock_svc.h>
#include <wlan_hdd_wowl.h>
diff --git a/drivers/staging/qcacld-2.0/CORE/EPPING/src/epping_rx.c b/drivers/staging/qcacld-2.0/CORE/EPPING/src/epping_rx.c
index 679cf25..c6f2d1e 100644
--- a/drivers/staging/qcacld-2.0/CORE/EPPING/src/epping_rx.c
+++ b/drivers/staging/qcacld-2.0/CORE/EPPING/src/epping_rx.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -45,7 +45,6 @@
#include <wlan_hdd_tx_rx.h>
#include <wniApi.h>
#include <wlan_nlink_srv.h>
-#include <wlan_btc_svc.h>
#include <wlan_hdd_cfg.h>
#include <wlan_ptt_sock_svc.h>
#include <wlan_hdd_wowl.h>
diff --git a/drivers/staging/qcacld-2.0/CORE/EPPING/src/epping_tx.c b/drivers/staging/qcacld-2.0/CORE/EPPING/src/epping_tx.c
index 7427714..ab842fe 100644
--- a/drivers/staging/qcacld-2.0/CORE/EPPING/src/epping_tx.c
+++ b/drivers/staging/qcacld-2.0/CORE/EPPING/src/epping_tx.c
@@ -45,7 +45,6 @@
#include <wlan_hdd_tx_rx.h>
#include <wniApi.h>
#include <wlan_nlink_srv.h>
-#include <wlan_btc_svc.h>
#include <wlan_hdd_cfg.h>
#include <wlan_ptt_sock_svc.h>
#include <wlan_hdd_wowl.h>
diff --git a/drivers/staging/qcacld-2.0/CORE/EPPING/src/epping_txrx.c b/drivers/staging/qcacld-2.0/CORE/EPPING/src/epping_txrx.c
index bceba3d..b2f9039 100644
--- a/drivers/staging/qcacld-2.0/CORE/EPPING/src/epping_txrx.c
+++ b/drivers/staging/qcacld-2.0/CORE/EPPING/src/epping_txrx.c
@@ -45,7 +45,6 @@
#include <wlan_hdd_tx_rx.h>
#include <wniApi.h>
#include <wlan_nlink_srv.h>
-#include <wlan_btc_svc.h>
#include <wlan_hdd_cfg.h>
#include <wlan_ptt_sock_svc.h>
#include <wlan_hdd_wowl.h>
diff --git a/drivers/staging/qcacld-2.0/CORE/HDD/inc/wlan_hdd_cfg.h b/drivers/staging/qcacld-2.0/CORE/HDD/inc/wlan_hdd_cfg.h
index c99c344..8ab268d8 100644
--- a/drivers/staging/qcacld-2.0/CORE/HDD/inc/wlan_hdd_cfg.h
+++ b/drivers/staging/qcacld-2.0/CORE/HDD/inc/wlan_hdd_cfg.h
@@ -56,6 +56,8 @@
//Number of items that can be configured
#define MAX_CFG_INI_ITEMS 1024
+#define MAX_PRB_REQ_VENDOR_OUI_INI_LEN 160
+#define VENDOR_SPECIFIC_IE_BITMAP 0x20000000
#ifdef SAP_AUTH_OFFLOAD
/* 802.11 pre-share key length */
@@ -540,16 +542,6 @@
#define CFG_ACTIVE_MIN_CHANNEL_TIME_MAX ( 10000 )
#define CFG_ACTIVE_MIN_CHANNEL_TIME_DEFAULT ( 20 )
-#define CFG_ACTIVE_MAX_CHANNEL_TIME_BTC_NAME "gActiveMaxChannelTimeBtc"
-#define CFG_ACTIVE_MAX_CHANNEL_TIME_BTC_MIN ( 0 )
-#define CFG_ACTIVE_MAX_CHANNEL_TIME_BTC_MAX ( 10000 )
-#define CFG_ACTIVE_MAX_CHANNEL_TIME_BTC_DEFAULT ( 120 )
-
-#define CFG_ACTIVE_MIN_CHANNEL_TIME_BTC_NAME "gActiveMinChannelTimeBtc"
-#define CFG_ACTIVE_MIN_CHANNEL_TIME_BTC_MIN ( 0 )
-#define CFG_ACTIVE_MIN_CHANNEL_TIME_BTC_MAX ( 10000 )
-#define CFG_ACTIVE_MIN_CHANNEL_TIME_BTC_DEFAULT ( 60 )
-
#define CFG_RETRY_LIMIT_ZERO_NAME "gRetryLimitZero"
#define CFG_RETRY_LIMIT_ZERO_MIN ( 0 )
#define CFG_RETRY_LIMIT_ZERO_MAX ( 15 )
@@ -3838,6 +3830,66 @@
#define CFG_ACTIVE_MODE_OFFLOAD_MAX (1)
#define CFG_ACTIVE_MODE_OFFLOAD_DEFAULT (0)
+/* enable/disable probe request whiltelist IE feature */
+#define CFG_PRB_REQ_IE_WHITELIST_NAME "g_enable_probereq_whitelist_ies"
+#define CFG_PRB_REQ_IE_WHITELIST_MIN (0)
+#define CFG_PRB_REQ_IE_WHITELIST_MAX (1)
+#define CFG_PRB_REQ_IE_WHITELIST_DEFAULT (0)
+/*
+ * For IE white listing in Probe Req, following ini parameters from
+ * g_probe_req_ie_bitmap_0 to g_probe_req_ie_bitmap_7 are used. User needs to
+ * input this values in hexa decimal format, when bit is set, corresponding ie
+ * needs to be included in probe request.
+ */
+#define CFG_PRB_REQ_IE_BIT_MAP0_NAME "g_probe_req_ie_bitmap_0"
+#define CFG_PRB_REQ_IE_BIT_MAP0_MIN (0x00000000)
+#define CFG_PRB_REQ_IE_BIT_MAP0_MAX (0xFFFFFFFF)
+#define CFG_PRB_REQ_IE_BIT_MAP0_DEFAULT (0x00000000)
+
+#define CFG_PRB_REQ_IE_BIT_MAP1_NAME "g_probe_req_ie_bitmap_1"
+#define CFG_PRB_REQ_IE_BIT_MAP1_MIN (0x00000000)
+#define CFG_PRB_REQ_IE_BIT_MAP1_MAX (0xFFFFFFFF)
+#define CFG_PRB_REQ_IE_BIT_MAP1_DEFAULT (0x00000000)
+
+#define CFG_PRB_REQ_IE_BIT_MAP2_NAME "g_probe_req_ie_bitmap_2"
+#define CFG_PRB_REQ_IE_BIT_MAP2_MIN (0x00000000)
+#define CFG_PRB_REQ_IE_BIT_MAP2_MAX (0xFFFFFFFF)
+#define CFG_PRB_REQ_IE_BIT_MAP2_DEFAULT (0x00000000)
+
+#define CFG_PRB_REQ_IE_BIT_MAP3_NAME "g_probe_req_ie_bitmap_3"
+#define CFG_PRB_REQ_IE_BIT_MAP3_MIN (0x00000000)
+#define CFG_PRB_REQ_IE_BIT_MAP3_MAX (0xFFFFFFFF)
+#define CFG_PRB_REQ_IE_BIT_MAP3_DEFAULT (0x00000000)
+
+#define CFG_PRB_REQ_IE_BIT_MAP4_NAME "g_probe_req_ie_bitmap_4"
+#define CFG_PRB_REQ_IE_BIT_MAP4_MIN (0x00000000)
+#define CFG_PRB_REQ_IE_BIT_MAP4_MAX (0xFFFFFFFF)
+#define CFG_PRB_REQ_IE_BIT_MAP4_DEFAULT (0x00000000)
+
+#define CFG_PRB_REQ_IE_BIT_MAP5_NAME "g_probe_req_ie_bitmap_5"
+#define CFG_PRB_REQ_IE_BIT_MAP5_MIN (0x00000000)
+#define CFG_PRB_REQ_IE_BIT_MAP5_MAX (0xFFFFFFFF)
+#define CFG_PRB_REQ_IE_BIT_MAP5_DEFAULT (0x00000000)
+
+#define CFG_PRB_REQ_IE_BIT_MAP6_NAME "g_probe_req_ie_bitmap_6"
+#define CFG_PRB_REQ_IE_BIT_MAP6_MIN (0x00000000)
+#define CFG_PRB_REQ_IE_BIT_MAP6_MAX (0xFFFFFFFF)
+#define CFG_PRB_REQ_IE_BIT_MAP6_DEFAULT (0x00000000)
+
+#define CFG_PRB_REQ_IE_BIT_MAP7_NAME "g_probe_req_ie_bitmap_7"
+#define CFG_PRB_REQ_IE_BIT_MAP7_MIN (0x00000000)
+#define CFG_PRB_REQ_IE_BIT_MAP7_MAX (0xFFFFFFFF)
+#define CFG_PRB_REQ_IE_BIT_MAP7_DEFAULT (0x00000000)
+
+/*
+ * For vendor specific IE, Probe Req OUI types and sub types which are
+ * to be white listed are specifed in gProbeReqOUIs in the following
+ * example format - gProbeReqOUIs=AABBCCDD EEFF1122
+ */
+#define CFG_PROBE_REQ_OUI_NAME "gProbeReqOUIs"
+#define CFG_PROBE_REQ_OUI_DEFAULT ""
+
+
/*---------------------------------------------------------------------------
Type declarations
-------------------------------------------------------------------------*/
@@ -3963,8 +4015,6 @@
v_U32_t nInitialDwellTime; //in units of milliseconds
bool initial_scan_no_dfs_chnl;
- v_U32_t nActiveMinChnTimeBtc; //in units of milliseconds
- v_U32_t nActiveMaxChnTimeBtc; //in units of milliseconds
#ifdef WLAN_AP_STA_CONCURRENCY
v_U32_t nPassiveMinChnTimeConc; //in units of milliseconds
v_U32_t nPassiveMaxChnTimeConc; //in units of milliseconds
@@ -4593,6 +4643,20 @@
uint32_t edca_be_aifs;
uint32_t rx_wakelock_timeout;
bool active_mode_offload;
+
+ bool probe_req_ie_whitelist;
+ /* probe request bit map ies */
+ uint32_t probe_req_ie_bitmap_0;
+ uint32_t probe_req_ie_bitmap_1;
+ uint32_t probe_req_ie_bitmap_2;
+ uint32_t probe_req_ie_bitmap_3;
+ uint32_t probe_req_ie_bitmap_4;
+ uint32_t probe_req_ie_bitmap_5;
+ uint32_t probe_req_ie_bitmap_6;
+ uint32_t probe_req_ie_bitmap_7;
+
+ /* Probe Request multiple vendor OUIs */
+ uint8_t probe_req_ouis[MAX_PRB_REQ_VENDOR_OUI_INI_LEN];
};
typedef struct hdd_config hdd_config_t;
@@ -4710,6 +4774,9 @@
Function declarations and documentation
-------------------------------------------------------------------------*/
VOS_STATUS hdd_parse_config_ini(hdd_context_t *pHddCtx);
+uint32_t hdd_validate_prb_req_ie_bitmap(hdd_context_t* pHddCtx);
+VOS_STATUS hdd_parse_probe_req_ouis(hdd_context_t* pHddCtx);
+void hdd_free_probe_req_ouis(hdd_context_t* pHddCtx);
VOS_STATUS hdd_update_mac_config(hdd_context_t *pHddCtx);
VOS_STATUS hdd_set_sme_config( hdd_context_t *pHddCtx );
VOS_STATUS hdd_set_sme_chan_list(hdd_context_t *hdd_ctx);
diff --git a/drivers/staging/qcacld-2.0/CORE/HDD/inc/wlan_hdd_cfg80211.h b/drivers/staging/qcacld-2.0/CORE/HDD/inc/wlan_hdd_cfg80211.h
index bf3b4fe..b4963f7 100644
--- a/drivers/staging/qcacld-2.0/CORE/HDD/inc/wlan_hdd_cfg80211.h
+++ b/drivers/staging/qcacld-2.0/CORE/HDD/inc/wlan_hdd_cfg80211.h
@@ -1882,6 +1882,20 @@
int wlan_hdd_cfg80211_del_station(struct wiphy *wiphy,
struct net_device *dev, u8 *mac);
#endif
+
+/**
+ * enum wlan_hdd_scan_type_for_randomization - type of scan
+ * @WLAN_HDD_HOST_SCAN: refers to scan request from cfg80211_ops "scan"
+ * @WLAN_HDD_PNO_SCAN: refers to scan request is from "sched_scan_start"
+ *
+ * driver uses this enum to identify source of scan
+ *
+ */
+enum wlan_hdd_scan_type_for_randomization {
+ WLAN_HDD_HOST_SCAN,
+ WLAN_HDD_PNO_SCAN,
+};
+
#endif
#if defined(QCA_WIFI_FTM) && defined(CONFIG_NL80211_TESTMODE)
diff --git a/drivers/staging/qcacld-2.0/CORE/HDD/inc/wlan_hdd_main.h b/drivers/staging/qcacld-2.0/CORE/HDD/inc/wlan_hdd_main.h
index 1c3ad77..d527b37 100644
--- a/drivers/staging/qcacld-2.0/CORE/HDD/inc/wlan_hdd_main.h
+++ b/drivers/staging/qcacld-2.0/CORE/HDD/inc/wlan_hdd_main.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -247,6 +247,8 @@
typedef v_U8_t tWlanHddMacAddr[HDD_MAC_ADDR_LEN];
+#define MAX_PROBE_REQ_OUIS 16
+
/*
* Generic asynchronous request/response support
*
@@ -1751,6 +1753,9 @@
unsigned int last_scan_bug_report_timestamp;
bool driver_being_stopped; /* Track if DRIVER STOP cmd is sent */
uint8_t max_mc_addr_list;
+
+ uint32_t no_of_probe_req_ouis;
+ struct vendor_oui *probe_req_voui;
};
/*---------------------------------------------------------------------------
@@ -1930,10 +1935,6 @@
void wlan_hdd_cfg80211_stats_ext_init(hdd_context_t *pHddCtx);
#endif
-#ifdef WLAN_FEATURE_LINK_LAYER_STATS
-void wlan_hdd_cfg80211_link_layer_stats_init(hdd_context_t *pHddCtx);
-#endif
-
void hdd_update_macaddr(hdd_config_t *cfg_ini, v_MACADDR_t hw_macaddr);
#if defined(FEATURE_WLAN_LFR) && defined(WLAN_FEATURE_ROAM_SCAN_OFFLOAD)
void wlan_hdd_disable_roaming(hdd_adapter_t *pAdapter);
@@ -1980,15 +1981,30 @@
return;
}
+
+/**
+ * wlan_hdd_cfg80211_link_layer_stats_init() - Initialize llstats callbacks
+ * @pHddCtx: HDD context
+ *
+ * Return: none
+ */
+void wlan_hdd_cfg80211_link_layer_stats_init(hdd_context_t *pHddCtx);
+
#else
static inline bool hdd_link_layer_stats_supported(void)
{
return false;
}
+
static inline void hdd_init_ll_stats_ctx(hdd_context_t *hdd_ctx)
{
return;
}
+
+void wlan_hdd_cfg80211_link_layer_stats_init(hdd_context_t *pHddCtx)
+{
+ return;
+}
#endif /* WLAN_FEATURE_LINK_LAYER_STATS */
void hdd_get_fw_version(hdd_context_t *hdd_ctx,
diff --git a/drivers/staging/qcacld-2.0/CORE/HDD/inc/wlan_hdd_oemdata.h b/drivers/staging/qcacld-2.0/CORE/HDD/inc/wlan_hdd_oemdata.h
index c348be5..6e6d284 100644
--- a/drivers/staging/qcacld-2.0/CORE/HDD/inc/wlan_hdd_oemdata.h
+++ b/drivers/staging/qcacld-2.0/CORE/HDD/inc/wlan_hdd_oemdata.h
@@ -42,7 +42,7 @@
#define __WLAN_HDD_OEM_DATA_H__
#ifndef OEM_DATA_REQ_SIZE
-#define OEM_DATA_REQ_SIZE 280
+#define OEM_DATA_REQ_SIZE 500
#endif
#ifndef OEM_DATA_RSP_SIZE
diff --git a/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_assoc.c b/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_assoc.c
index c2248ed..2991836 100644
--- a/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_assoc.c
+++ b/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_assoc.c
@@ -51,7 +51,6 @@
#include <aniGlobal.h>
#include "dot11f.h"
#include "wlan_nlink_common.h"
-#include "wlan_btc_svc.h"
#include "wlan_hdd_power.h"
#include "wlan_hdd_trace.h"
#include <linux/ieee80211.h>
@@ -874,7 +873,6 @@
}
#endif
}
- send_btc_nlink_msg(type, 0);
}
static void hdd_connRemoveConnectInfo(hdd_station_ctx_t *pHddStaCtx)
diff --git a/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_cfg.c b/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_cfg.c
index e31e70d..49bae87 100644
--- a/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_cfg.c
+++ b/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_cfg.c
@@ -873,20 +873,6 @@
CFG_ACTIVE_MIN_CHANNEL_TIME_MIN,
CFG_ACTIVE_MIN_CHANNEL_TIME_MAX ),
- REG_VARIABLE( CFG_ACTIVE_MAX_CHANNEL_TIME_BTC_NAME, WLAN_PARAM_Integer,
- hdd_config_t, nActiveMaxChnTimeBtc,
- VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
- CFG_ACTIVE_MAX_CHANNEL_TIME_BTC_DEFAULT,
- CFG_ACTIVE_MAX_CHANNEL_TIME_BTC_MIN,
- CFG_ACTIVE_MAX_CHANNEL_TIME_BTC_MAX ),
-
- REG_VARIABLE( CFG_ACTIVE_MIN_CHANNEL_TIME_BTC_NAME, WLAN_PARAM_Integer,
- hdd_config_t, nActiveMinChnTimeBtc,
- VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
- CFG_ACTIVE_MIN_CHANNEL_TIME_BTC_DEFAULT,
- CFG_ACTIVE_MIN_CHANNEL_TIME_BTC_MIN,
- CFG_ACTIVE_MIN_CHANNEL_TIME_BTC_MAX ),
-
REG_VARIABLE( CFG_RETRY_LIMIT_ZERO_NAME, WLAN_PARAM_Integer,
hdd_config_t, retryLimitZero,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
@@ -4568,6 +4554,74 @@
CFG_ACTIVE_MODE_OFFLOAD_DEFAULT,
CFG_ACTIVE_MODE_OFFLOAD_MIN,
CFG_ACTIVE_MODE_OFFLOAD_MAX),
+
+ REG_VARIABLE(CFG_PRB_REQ_IE_WHITELIST_NAME, WLAN_PARAM_Integer,
+ hdd_config_t, probe_req_ie_whitelist,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_PRB_REQ_IE_WHITELIST_DEFAULT,
+ CFG_PRB_REQ_IE_WHITELIST_MIN,
+ CFG_PRB_REQ_IE_WHITELIST_MAX),
+
+ REG_VARIABLE(CFG_PRB_REQ_IE_BIT_MAP0_NAME, WLAN_PARAM_HexInteger,
+ hdd_config_t, probe_req_ie_bitmap_0,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_PRB_REQ_IE_BIT_MAP0_DEFAULT,
+ CFG_PRB_REQ_IE_BIT_MAP0_MIN,
+ CFG_PRB_REQ_IE_BIT_MAP0_MAX),
+
+ REG_VARIABLE(CFG_PRB_REQ_IE_BIT_MAP1_NAME, WLAN_PARAM_HexInteger,
+ hdd_config_t, probe_req_ie_bitmap_1,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_PRB_REQ_IE_BIT_MAP1_DEFAULT,
+ CFG_PRB_REQ_IE_BIT_MAP1_MIN,
+ CFG_PRB_REQ_IE_BIT_MAP1_MAX),
+
+ REG_VARIABLE(CFG_PRB_REQ_IE_BIT_MAP2_NAME, WLAN_PARAM_HexInteger,
+ hdd_config_t, probe_req_ie_bitmap_2,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_PRB_REQ_IE_BIT_MAP2_DEFAULT,
+ CFG_PRB_REQ_IE_BIT_MAP2_MIN,
+ CFG_PRB_REQ_IE_BIT_MAP2_MAX),
+
+ REG_VARIABLE(CFG_PRB_REQ_IE_BIT_MAP3_NAME, WLAN_PARAM_HexInteger,
+ hdd_config_t, probe_req_ie_bitmap_3,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_PRB_REQ_IE_BIT_MAP3_DEFAULT,
+ CFG_PRB_REQ_IE_BIT_MAP3_MIN,
+ CFG_PRB_REQ_IE_BIT_MAP3_MAX),
+
+ REG_VARIABLE(CFG_PRB_REQ_IE_BIT_MAP4_NAME, WLAN_PARAM_HexInteger,
+ hdd_config_t, probe_req_ie_bitmap_4,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_PRB_REQ_IE_BIT_MAP4_DEFAULT,
+ CFG_PRB_REQ_IE_BIT_MAP4_MIN,
+ CFG_PRB_REQ_IE_BIT_MAP4_MAX),
+
+ REG_VARIABLE(CFG_PRB_REQ_IE_BIT_MAP5_NAME, WLAN_PARAM_HexInteger,
+ hdd_config_t, probe_req_ie_bitmap_5,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_PRB_REQ_IE_BIT_MAP5_DEFAULT,
+ CFG_PRB_REQ_IE_BIT_MAP5_MIN,
+ CFG_PRB_REQ_IE_BIT_MAP5_MAX),
+
+ REG_VARIABLE(CFG_PRB_REQ_IE_BIT_MAP6_NAME, WLAN_PARAM_HexInteger,
+ hdd_config_t, probe_req_ie_bitmap_6,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_PRB_REQ_IE_BIT_MAP6_DEFAULT,
+ CFG_PRB_REQ_IE_BIT_MAP6_MIN,
+ CFG_PRB_REQ_IE_BIT_MAP6_MAX),
+
+ REG_VARIABLE(CFG_PRB_REQ_IE_BIT_MAP7_NAME, WLAN_PARAM_HexInteger,
+ hdd_config_t, probe_req_ie_bitmap_7,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_PRB_REQ_IE_BIT_MAP7_DEFAULT,
+ CFG_PRB_REQ_IE_BIT_MAP7_MIN,
+ CFG_PRB_REQ_IE_BIT_MAP7_MAX),
+
+ REG_VARIABLE_STRING(CFG_PROBE_REQ_OUI_NAME, WLAN_PARAM_String,
+ hdd_config_t, probe_req_ouis,
+ VAR_FLAGS_OPTIONAL,
+ (void *)CFG_PROBE_REQ_OUI_DEFAULT),
};
@@ -5374,6 +5428,46 @@
CFG_ACTIVE_MODE_OFFLOAD,
pHddCtx->cfg_ini->active_mode_offload);
hdd_ndp_print_ini_config(pHddCtx);
+
+ hddLog(LOG2, "Name = [%s] Value = [%x] ",
+ CFG_PRB_REQ_IE_WHITELIST_NAME,
+ pHddCtx->cfg_ini->probe_req_ie_whitelist);
+
+ hddLog(LOG2, "Name = [%s] Value = [%x] ",
+ CFG_PRB_REQ_IE_BIT_MAP0_NAME,
+ pHddCtx->cfg_ini->probe_req_ie_bitmap_0);
+
+ hddLog(LOG2, "Name = [%s] Value = [%x] ",
+ CFG_PRB_REQ_IE_BIT_MAP1_NAME,
+ pHddCtx->cfg_ini->probe_req_ie_bitmap_1);
+
+ hddLog(LOG2, "Name = [%s] Value = [%x] ",
+ CFG_PRB_REQ_IE_BIT_MAP2_NAME,
+ pHddCtx->cfg_ini->probe_req_ie_bitmap_2);
+
+ hddLog(LOG2, "Name = [%s] Value = [%x] ",
+ CFG_PRB_REQ_IE_BIT_MAP3_NAME,
+ pHddCtx->cfg_ini->probe_req_ie_bitmap_3);
+
+ hddLog(LOG2, "Name = [%s] Value = [%x] ",
+ CFG_PRB_REQ_IE_BIT_MAP4_NAME,
+ pHddCtx->cfg_ini->probe_req_ie_bitmap_4);
+
+ hddLog(LOG2, "Name = [%s] Value = [%x] ",
+ CFG_PRB_REQ_IE_BIT_MAP5_NAME,
+ pHddCtx->cfg_ini->probe_req_ie_bitmap_5);
+
+ hddLog(LOG2, "Name = [%s] Value = [%x] ",
+ CFG_PRB_REQ_IE_BIT_MAP6_NAME,
+ pHddCtx->cfg_ini->probe_req_ie_bitmap_6);
+
+ hddLog(LOG2, "Name = [%s] Value = [%x] ",
+ CFG_PRB_REQ_IE_BIT_MAP7_NAME,
+ pHddCtx->cfg_ini->probe_req_ie_bitmap_7);
+
+ hddLog(LOG2, "Name = [%s] Value =[%s]",
+ CFG_PROBE_REQ_OUI_NAME,
+ pHddCtx->cfg_ini->probe_req_ouis);
}
#define CFG_VALUE_MAX_LEN 256
@@ -5938,23 +6032,6 @@
}
-static void hdd_set_btc_config(hdd_context_t *pHddCtx)
-{
- hdd_config_t *pConfig = pHddCtx->cfg_ini;
- tSmeBtcConfig btcParams;
- int i;
-
- sme_BtcGetConfig(pHddCtx->hHal, &btcParams);
-
- btcParams.btcExecutionMode = pConfig->btcExecutionMode;
-
- for (i = 0; i < 6; i++) {
- btcParams.mwsCoexConfig[i] = pConfig->mwsCoexConfig[i];
- }
-
- sme_BtcSetConfig(pHddCtx->hHal, &btcParams);
-}
-
static void hdd_set_power_save_config(hdd_context_t *pHddCtx, tSmeConfigParams *smeConfig)
{
hdd_config_t *pConfig = pHddCtx->cfg_ini;
@@ -7014,8 +7091,6 @@
smeConfig->csrConfig.nActiveMinChnTime = pConfig->nActiveMinChnTime;
smeConfig->csrConfig.nPassiveMaxChnTime = pConfig->nPassiveMaxChnTime;
smeConfig->csrConfig.nPassiveMinChnTime = pConfig->nPassiveMinChnTime;
- smeConfig->csrConfig.nActiveMaxChnTimeBtc = pConfig->nActiveMaxChnTimeBtc;
- smeConfig->csrConfig.nActiveMinChnTimeBtc = pConfig->nActiveMinChnTimeBtc;
smeConfig->csrConfig.disableAggWithBtc = pConfig->disableAggWithBtc;
#ifdef WLAN_AP_STA_CONCURRENCY
smeConfig->csrConfig.nActiveMaxChnTimeConc = pConfig->nActiveMaxChnTimeConc;
@@ -7117,8 +7192,6 @@
hdd_set_power_save_offload_config(pHddCtx);
}
- hdd_set_btc_config(pHddCtx);
-
#ifdef WLAN_FEATURE_VOWIFI_11R
smeConfig->csrConfig.csr11rConfig.IsFTResourceReqSupported = pConfig->fFTResourceReqSupported;
#endif
@@ -7805,3 +7878,214 @@
hddLog(LOGE, "Fail to set coex page sap bt interval parameters");
}
}
+
+/**
+ * hdd_validate_prb_req_ie_bitmap - validates user input for ie bit map
+ * @hdd_ctx: the pointer to hdd context
+ *
+ * This function checks whether user have entered valid probe request
+ * ie bitmap and also verifies vendor ouis if vendor specific ie is set
+ *
+ * Return: status of verification
+ * 1 - valid input
+ * 0 - invalid input
+ */
+uint32_t hdd_validate_prb_req_ie_bitmap(hdd_context_t* pHddCtx)
+{
+ if (!(pHddCtx->cfg_ini->probe_req_ie_bitmap_0 ||
+ pHddCtx->cfg_ini->probe_req_ie_bitmap_1 ||
+ pHddCtx->cfg_ini->probe_req_ie_bitmap_2 ||
+ pHddCtx->cfg_ini->probe_req_ie_bitmap_3 ||
+ pHddCtx->cfg_ini->probe_req_ie_bitmap_4 ||
+ pHddCtx->cfg_ini->probe_req_ie_bitmap_5 ||
+ pHddCtx->cfg_ini->probe_req_ie_bitmap_6 ||
+ pHddCtx->cfg_ini->probe_req_ie_bitmap_7))
+ return 0;
+
+ /**
+ * check whether vendor oui IE is set and OUIs are present, each OUI
+ * is eneterd in the form of string of 8 characters from ini, therefore,
+ * for atleast one OUI, minimum length is 8 and hence this string length
+ * is checked for minimum of 8
+ */
+ if ((pHddCtx->cfg_ini->probe_req_ie_bitmap_6 &
+ VENDOR_SPECIFIC_IE_BITMAP) &&
+ (strlen(pHddCtx->cfg_ini->probe_req_ouis) < 8))
+ return 0;
+
+ /* check whether vendor oui IE is not set but OUIs are present */
+ if (!(pHddCtx->cfg_ini->probe_req_ie_bitmap_6 &
+ VENDOR_SPECIFIC_IE_BITMAP) &&
+ (strlen(pHddCtx->cfg_ini->probe_req_ouis) > 0))
+ return 0;
+
+ return 1;
+}
+
+/**
+ * probe_req_voui_convert_to_hex - converts str of 8 chars into two hex values
+ * @temp: string to be converted
+ * @voui: contains the type and subtype values
+ *
+ * This function converts the string length of 8 characters into two
+ * hexa-decimal values, oui_type and oui_subtype, where oui_type is the
+ * hexa decimal value converted from first 6 characters and oui_subtype is
+ * hexa decimal value converted from last 2 characters.
+ * strings which doesn't match with the specified pattern are ignored.
+ *
+ * Return: status of conversion
+ * 1 - if conversion is successful
+ * 0 - if conversion is failed
+ */
+static uint32_t hdd_probe_req_voui_convert_to_hex(uint8_t *temp,
+ struct vendor_oui *voui)
+{
+ uint32_t hex_value[4];
+ uint32_t i = 0;
+ uint32_t indx = 0;
+
+ memset(hex_value, 0x00, sizeof(hex_value));
+ memset(voui, 0x00, sizeof(*voui));
+
+ /* convert string to hex */
+ for (i = 0; i < 8; i++) {
+ if (temp[i] >= '0' && temp[i] <= '9') {
+ hex_value[indx] = (temp[i] - '0') << 4;
+ } else if (temp[i] >= 'A' && temp[i] <= 'F') {
+ hex_value[indx] = (temp[i] - 'A') + 0xA;
+ hex_value[indx] = hex_value[indx] << 4;
+ } else {
+ /* invalid character in oui */
+ return 0;
+ }
+
+ if (temp[i + 1] >= '0' && temp[i + 1] <= '9') {
+ hex_value[indx] |= (temp[i + 1] - '0');
+ i = i + 1;
+ indx = indx + 1;
+ } else if (temp[i + 1] >= 'A' && temp[i + 1] <= 'F') {
+ hex_value[indx] |= ((temp[i + 1] - 'A') + 0xA);
+ i = i + 1;
+ indx = indx + 1;
+ } else {
+ /* invalid character in oui */
+ return 0;
+ }
+ }
+
+ voui->oui_type = (hex_value[0] | (hex_value[1] << 8) |
+ (hex_value[2] << 16));
+ voui->oui_subtype = hex_value[3];
+
+ hddLog(LOG1, FL("OUI_type = %x and OUI_subtype = %x"), voui->oui_type,
+ voui->oui_subtype);
+ return 1;
+}
+
+/**
+ * hdd_parse_probe_req_ouis - form ouis from ini gProbeReqOUIs
+ * @hdd_ctx: the pointer to hdd context
+ *
+ * This function parses the ini string gProbeReqOUIs which needs to in the
+ * following format:
+ * "<8 characters of [0-9] or [A-F]>space<8 characters from [0-9] etc.,"
+ * example: "AABBCCDD 1122EEFF"
+ * and the logic counts the number of OUIS and allocates the memory
+ * for every valid OUI and is stored in hdd_context_t
+ *
+ * Return: status of parsing
+ */
+VOS_STATUS hdd_parse_probe_req_ouis(hdd_context_t* pHddCtx)
+{
+ struct vendor_oui voui[MAX_PROBE_REQ_OUIS];
+ uint8_t *str;
+ uint8_t temp[9];
+ uint32_t start = 0, end = 0;
+ uint32_t oui_indx = 0;
+ uint32_t i = 0;
+
+ pHddCtx->cfg_ini->probe_req_ouis[MAX_PRB_REQ_VENDOR_OUI_INI_LEN - 1] =
+ '\0';
+ if (!strlen(pHddCtx->cfg_ini->probe_req_ouis)) {
+ pHddCtx->no_of_probe_req_ouis = 0;
+ pHddCtx->probe_req_voui = NULL;
+ hddLog(LOG1, FL("NO OUIS to parse"));
+ return VOS_STATUS_SUCCESS;
+ }
+
+ str = (uint8_t *)(pHddCtx->cfg_ini->probe_req_ouis);
+
+ while(str[i] != '\0') {
+ if (str[i] == ' ') {
+ if ((end - start) != 8)
+ {
+ end = start = 0;
+ i++;
+ continue;
+ } else {
+ memcpy(temp, &str[i - 8], 8);
+ i++;
+ temp[8] = '\0';
+ if (hdd_probe_req_voui_convert_to_hex(temp,
+ &voui[oui_indx]) == 0) {
+ continue;
+ }
+ oui_indx++;
+ if (oui_indx > MAX_PROBE_REQ_OUIS) {
+ hddLog(LOGE, "Max no.of OUIS supported "
+ "is 16. ignoring the rest");
+ return VOS_STATUS_SUCCESS;
+ }
+ }
+ start = end = 0;
+ } else {
+ i++;
+ end++;
+ }
+ }
+
+ if ((end - start) == 8) {
+ memcpy(temp, &str[i - 8], 8);
+ temp[8] = '\0';
+ if (hdd_probe_req_voui_convert_to_hex(temp,
+ &voui[oui_indx]) == 1)
+ oui_indx++;
+ }
+
+ if (!oui_indx)
+ return VOS_STATUS_SUCCESS;
+
+ pHddCtx->probe_req_voui = (struct vendor_oui *)vos_mem_malloc(oui_indx *
+ sizeof(struct vendor_oui));
+ if (pHddCtx->probe_req_voui == NULL) {
+ hddLog(LOGE,"Not Enough memory for OUI");
+ pHddCtx->no_of_probe_req_ouis = 0;
+ return VOS_STATUS_E_FAILURE;
+ }
+ vos_mem_zero(pHddCtx->probe_req_voui,
+ oui_indx * sizeof(struct vendor_oui));
+ pHddCtx->no_of_probe_req_ouis = oui_indx;
+ vos_mem_copy(pHddCtx->probe_req_voui, voui,
+ oui_indx * sizeof(struct vendor_oui));
+
+ return VOS_STATUS_SUCCESS;
+}
+
+/**
+ * hdd_free_probe_req_ouis - de-allocates the probe req ouis
+ * @hdd_ctx: the pointer to hdd context
+ *
+ * This function de-alloactes the probe req ouis which are
+ * allocated while parsing of ini string gProbeReqOUIs
+ *
+ * Return: None
+ */
+void hdd_free_probe_req_ouis(hdd_context_t* pHddCtx)
+{
+ if (pHddCtx->probe_req_voui) {
+ vos_mem_free(pHddCtx->probe_req_voui);
+ pHddCtx->probe_req_voui = NULL;
+ }
+
+ pHddCtx->no_of_probe_req_ouis = 0;
+}
diff --git a/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_cfg80211.c b/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_cfg80211.c
index 178cf32..14b6bd7 100644
--- a/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_cfg80211.c
+++ b/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_cfg80211.c
@@ -1596,6 +1596,49 @@
}
/**
+ * wlan_hdd_fill_whitelist_ie_attrs - fill the white list members
+ * @ie_whitelist: enables whitelist
+ * @probe_req_ie_bitmap: bitmap to be filled
+ * @num_vendor_oui: pointer to no of ouis
+ * @voui: pointer to ouis to be filled
+ * @pHddCtx: pointer to hdd ctx
+ *
+ * This function fills the ie bitmap and vendor oui fields with the
+ * corresponding values present in cfg_ini and PHddCtx
+ *
+ * Return: Return none
+ */
+static void wlan_hdd_fill_whitelist_ie_attrs(bool *ie_whitelist,
+ uint32_t *probe_req_ie_bitmap,
+ uint32_t *num_vendor_oui,
+ struct vendor_oui *voui,
+ hdd_context_t *pHddCtx)
+{
+ uint32_t i = 0;
+
+ *ie_whitelist = true;
+ probe_req_ie_bitmap[0] = pHddCtx->cfg_ini->probe_req_ie_bitmap_0;
+ probe_req_ie_bitmap[1] = pHddCtx->cfg_ini->probe_req_ie_bitmap_1;
+ probe_req_ie_bitmap[2] = pHddCtx->cfg_ini->probe_req_ie_bitmap_2;
+ probe_req_ie_bitmap[3] = pHddCtx->cfg_ini->probe_req_ie_bitmap_3;
+ probe_req_ie_bitmap[4] = pHddCtx->cfg_ini->probe_req_ie_bitmap_4;
+ probe_req_ie_bitmap[5] = pHddCtx->cfg_ini->probe_req_ie_bitmap_5;
+ probe_req_ie_bitmap[6] = pHddCtx->cfg_ini->probe_req_ie_bitmap_6;
+ probe_req_ie_bitmap[7] = pHddCtx->cfg_ini->probe_req_ie_bitmap_7;
+
+ *num_vendor_oui = 0;
+
+ if ((pHddCtx->no_of_probe_req_ouis != 0) && (voui != NULL)) {
+ *num_vendor_oui = pHddCtx->no_of_probe_req_ouis;
+ for (i = 0; i < pHddCtx->no_of_probe_req_ouis; i++) {
+ voui[i].oui_type = pHddCtx->probe_req_voui[i].oui_type;
+ voui[i].oui_subtype =
+ pHddCtx->probe_req_voui[i].oui_subtype;
+ }
+ }
+}
+
+/**
* __wlan_hdd_cfg80211_set_scanning_mac_oui() - set scan MAC
* @wiphy: pointer to wireless wiphy structure.
* @wdev: pointer to wireless_dev structure.
@@ -1617,6 +1660,8 @@
struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_SET_SCANNING_MAC_OUI_MAX + 1];
eHalStatus status;
int ret;
+ struct net_device *ndev = wdev->netdev;
+ hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(ndev);
ENTER();
@@ -1641,11 +1686,16 @@
return -EINVAL;
}
- pReqMsg = vos_mem_malloc(sizeof(*pReqMsg));
+ pReqMsg = vos_mem_malloc(sizeof(*pReqMsg) +
+ (pHddCtx->no_of_probe_req_ouis) *
+ (sizeof(struct vendor_oui)));
if (!pReqMsg) {
hddLog(LOGE, FL("vos_mem_malloc failed"));
return -ENOMEM;
}
+ vos_mem_zero(pReqMsg, sizeof(*pReqMsg) +
+ (pHddCtx->no_of_probe_req_ouis) *
+ (sizeof(struct vendor_oui)));
/* Parse and fetch oui */
if (!tb[QCA_WLAN_VENDOR_ATTR_SET_SCANNING_MAC_OUI]) {
@@ -1657,8 +1707,20 @@
tb[QCA_WLAN_VENDOR_ATTR_SET_SCANNING_MAC_OUI],
sizeof(pReqMsg->oui));
- hddLog(LOG1, FL("Oui (%02x:%02x:%02x)"), pReqMsg->oui[0], pReqMsg->oui[1],
- pReqMsg->oui[2]);
+ /* populate pReqMsg for mac addr randomization */
+ pReqMsg->vdev_id = pAdapter->sessionId;
+ pReqMsg->enb_probe_req_sno_randomization = 1;
+
+ hddLog(LOG1, FL("Oui (%02x:%02x:%02x), vdev_id = %d"), pReqMsg->oui[0],
+ pReqMsg->oui[1], pReqMsg->oui[2], pReqMsg->vdev_id);
+
+ if (pHddCtx->cfg_ini->probe_req_ie_whitelist)
+ wlan_hdd_fill_whitelist_ie_attrs(&pReqMsg->ie_whitelist,
+ pReqMsg->probe_req_ie_bitmap,
+ &pReqMsg->num_vendor_oui,
+ (struct vendor_oui *)((uint8_t *)pReqMsg +
+ sizeof(*pReqMsg)),
+ pHddCtx);
status = sme_SetScanningMacOui(pHddCtx->hHal, pReqMsg);
if (!HAL_STATUS_SUCCESS(status)) {
@@ -5897,8 +5959,7 @@
* after receiving Link Layer indications from FW.This callback converts the
* firmware data to the NL data and send the same to the kernel/upper layers.
*/
-static void wlan_hdd_cfg80211_link_layer_stats_callback(void *ctx,
- int indType,
+static void wlan_hdd_cfg80211_link_layer_stats_callback(void *ctx, int indType,
void *pRsp)
{
hdd_adapter_t *pAdapter = NULL;
@@ -10409,8 +10470,7 @@
.info.vendor_id = QCA_NL80211_VENDOR_ID,
.info.subcmd = QCA_NL80211_VENDOR_SUBCMD_EXTSCAN_GET_CAPABILITIES,
.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
- WIPHY_VENDOR_CMD_NEED_NETDEV |
- WIPHY_VENDOR_CMD_NEED_RUNNING,
+ WIPHY_VENDOR_CMD_NEED_NETDEV,
.doit = wlan_hdd_cfg80211_extscan_get_capabilities
},
{
@@ -10784,8 +10844,7 @@
.info.vendor_id = QCA_NL80211_VENDOR_ID,
.info.subcmd = QCA_NL80211_VENDOR_SUBCMD_GET_BUS_SIZE,
.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
- WIPHY_VENDOR_CMD_NEED_NETDEV |
- WIPHY_VENDOR_CMD_NEED_RUNNING,
+ WIPHY_VENDOR_CMD_NEED_NETDEV,
.doit = wlan_hdd_cfg80211_get_bus_size
},
{
@@ -10835,6 +10894,19 @@
return wiphy;
}
+#ifdef CFG80211_SCAN_RANDOM_MAC_ADDR
+static void wlan_hdd_cfg80211_scan_randomization_init(struct wiphy *wiphy)
+{
+ wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+ wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR;
+}
+#else
+static void wlan_hdd_cfg80211_scan_randomization_init(struct wiphy *wiphy)
+{
+ return;
+}
+#endif
+
/*
* FUNCTION: wlan_hdd_cfg80211_init
* This function is called by hdd_wlan_startup()
@@ -11075,6 +11147,7 @@
#ifdef CHANNEL_SWITCH_SUPPORTED
wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
#endif
+ wlan_hdd_cfg80211_scan_randomization_init(wiphy);
EXIT();
return 0;
@@ -16217,6 +16290,89 @@
}
}
+#ifdef CFG80211_SCAN_RANDOM_MAC_ADDR
+/**
+ * wlan_hdd_update_scan_rand_attrs - fill the host/pno scan rand attrs
+ * @scan_req: pointer for destination mac addr and mac mask
+ * @cfg_scan_req: pointer for source mac addr and mac mask
+ * @scan_type: type of scan from enum wlan_hdd_scan_type_for_randomization
+ *
+ * If scan randomize flag is set in cfg scan request flags, this function
+ * copies mac addr and mac mask in cfg80211 scan/sched scan request to
+ * randomization attributes in tCsrScanRequest (normal scan) or
+ * tpSirPNOScanReq (sched scan). Based on the type of scan, scan_req and
+ * cfg_scan_req are type casted accordingly.
+ *
+ * Return: Return none
+ */
+static void wlan_hdd_update_scan_rand_attrs(void *scan_req,
+ void *cfg_scan_req,
+ uint32_t scan_type)
+{
+ uint32_t flags = 0;
+ uint8_t *cfg_mac_addr = NULL;
+ uint8_t *cfg_mac_addr_mask = NULL;
+ uint32_t *scan_randomization = NULL;
+ uint8_t *scan_mac_addr = NULL;
+ uint8_t *scan_mac_addr_mask = NULL;
+
+ if (scan_type == WLAN_HDD_HOST_SCAN) {
+ tCsrScanRequest *csr_scan_req = NULL;
+ struct cfg80211_scan_request *request = NULL;
+
+ csr_scan_req = (tCsrScanRequest *)scan_req;
+ request = (struct cfg80211_scan_request *)cfg_scan_req;
+
+ flags = request->flags;
+ if (!(flags & NL80211_SCAN_FLAG_RANDOM_ADDR))
+ return;
+
+ cfg_mac_addr = request->mac_addr;
+ cfg_mac_addr_mask = request->mac_addr_mask;
+ scan_randomization = &csr_scan_req->enable_scan_randomization;
+ scan_mac_addr = csr_scan_req->mac_addr;
+ scan_mac_addr_mask = csr_scan_req->mac_addr_mask;
+ } else if (scan_type == WLAN_HDD_PNO_SCAN) {
+ tpSirPNOScanReq pno_scan_req = NULL;
+ struct cfg80211_sched_scan_request *request = NULL;
+
+ pno_scan_req = (tpSirPNOScanReq)scan_req;
+ request = (struct cfg80211_sched_scan_request *)cfg_scan_req;
+
+ flags = request->flags;
+ if (!(flags & NL80211_SCAN_FLAG_RANDOM_ADDR))
+ return;
+
+ cfg_mac_addr = request->mac_addr;
+ cfg_mac_addr_mask = request->mac_addr_mask;
+ scan_randomization =
+ &pno_scan_req->enable_pno_scan_randomization;
+ scan_mac_addr = pno_scan_req->mac_addr;
+ scan_mac_addr_mask = pno_scan_req->mac_addr_mask;
+ } else {
+ hddLog(LOGE, FL("invalid scan type for randomization"));
+ return;
+ }
+
+ /* enable mac randomization */
+ *scan_randomization = 1;
+ memcpy(scan_mac_addr, cfg_mac_addr, VOS_MAC_ADDR_SIZE);
+ memcpy(scan_mac_addr_mask, cfg_mac_addr_mask, VOS_MAC_ADDR_SIZE);
+
+ hddLog(LOG1, FL("Mac Addr: "MAC_ADDRESS_STR
+ " and Mac Mask: " MAC_ADDRESS_STR),
+ MAC_ADDR_ARRAY(scan_mac_addr),
+ MAC_ADDR_ARRAY(scan_mac_addr_mask));
+}
+#else
+static void wlan_hdd_update_scan_rand_attrs(void *scan_req,
+ void *cfg_scan_req,
+ uint32_t scan_type)
+{
+ return;
+}
+#endif
+
/*
* FUNCTION: __wlan_hdd_cfg80211_scan
* this scan respond to scan trigger and update cfg80211 scan database
@@ -16234,6 +16390,7 @@
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR( dev );
hdd_context_t *pHddCtx = WLAN_HDD_GET_CTX( pAdapter );
hdd_wext_state_t *pwextBuf = WLAN_HDD_GET_WEXT_STATE_PTR(pAdapter);
+ hdd_station_ctx_t *station_ctx = WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
hdd_config_t *cfg_param = NULL;
tCsrScanRequest scanRequest;
tANI_U8 *channelList = NULL, i;
@@ -16593,6 +16750,32 @@
pAdapter->sessionId);
#endif
+ wlan_hdd_update_scan_rand_attrs((void *)&scanRequest, (void *)request,
+ WLAN_HDD_HOST_SCAN);
+
+ if (pAdapter->device_mode == WLAN_HDD_INFRA_STATION &&
+ !is_p2p_scan &&
+ !hdd_connIsConnected(station_ctx) &&
+ (pHddCtx->cfg_ini->probe_req_ie_whitelist)) {
+ if (pHddCtx->no_of_probe_req_ouis != 0) {
+ scanRequest.voui = (struct vendor_oui *)vos_mem_malloc(
+ pHddCtx->no_of_probe_req_ouis *
+ sizeof(struct vendor_oui));
+ if (!scanRequest.voui) {
+ hddLog(LOGE, FL("Not enough memory for voui"));
+ scanRequest.num_vendor_oui = 0;
+ status = -ENOMEM;
+ goto free_mem;
+ }
+ }
+
+ wlan_hdd_fill_whitelist_ie_attrs(&scanRequest.ie_whitelist,
+ scanRequest.probe_req_ie_bitmap,
+ &scanRequest.num_vendor_oui,
+ scanRequest.voui,
+ pHddCtx);
+ }
+
vos_runtime_pm_prevent_suspend(pHddCtx->runtime_context.scan);
status = sme_ScanRequest( WLAN_HDD_GET_HAL_CTX(pAdapter),
pAdapter->sessionId, &scanRequest, &scanId,
@@ -16634,6 +16817,9 @@
if( channelList )
vos_mem_free( channelList );
+ if(scanRequest.voui)
+ vos_mem_free(scanRequest.voui);
+
EXIT();
return status;
}
@@ -20290,6 +20476,7 @@
hdd_scaninfo_t *pScanInfo = &pAdapter->scan_info;
hdd_config_t *config = NULL;
v_U32_t num_ignore_dfs_ch = 0;
+ hdd_station_ctx_t *station_ctx = WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
ENTER();
@@ -20351,7 +20538,14 @@
return -ENOTSUPP;
}
- pPnoRequest = (tpSirPNOScanReq) vos_mem_malloc(sizeof (tSirPNOScanReq));
+ if (!hdd_connIsConnected(station_ctx) &&
+ pHddCtx->cfg_ini->probe_req_ie_whitelist)
+ pPnoRequest = (tpSirPNOScanReq) vos_mem_malloc(sizeof(tSirPNOScanReq) +
+ (pHddCtx->no_of_probe_req_ouis) *
+ (sizeof(struct vendor_oui)));
+ else
+ pPnoRequest = (tpSirPNOScanReq) vos_mem_malloc(sizeof(tSirPNOScanReq));
+
if (NULL == pPnoRequest)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
@@ -20359,7 +20553,14 @@
return -ENOMEM;
}
- memset(pPnoRequest, 0, sizeof (tSirPNOScanReq));
+ if (!hdd_connIsConnected(station_ctx) &&
+ pHddCtx->cfg_ini->probe_req_ie_whitelist)
+ memset(pPnoRequest, 0, sizeof (tSirPNOScanReq) +
+ (pHddCtx->no_of_probe_req_ouis) *
+ (sizeof(struct vendor_oui)));
+ else
+ memset(pPnoRequest, 0, sizeof (tSirPNOScanReq));
+
pPnoRequest->enable = 1; /*Enable PNO */
pPnoRequest->ucNetworksCount = request->n_match_sets;
if ((!pPnoRequest->ucNetworksCount ) ||
@@ -20526,6 +20727,19 @@
"SessionId %d, enable %d, modePNO %d",
pAdapter->sessionId, pPnoRequest->enable, pPnoRequest->modePNO);
+ wlan_hdd_update_scan_rand_attrs((void *)pPnoRequest, (void *)request,
+ WLAN_HDD_PNO_SCAN);
+
+ if (pHddCtx->cfg_ini->probe_req_ie_whitelist &&
+ !hdd_connIsConnected(station_ctx))
+ wlan_hdd_fill_whitelist_ie_attrs(&pPnoRequest->ie_whitelist,
+ pPnoRequest->probe_req_ie_bitmap,
+ &pPnoRequest->num_vendor_oui,
+ (struct vendor_oui *)(
+ (uint8_t *)pPnoRequest +
+ sizeof(*pPnoRequest)),
+ pHddCtx);
+
status = sme_SetPreferredNetworkList(WLAN_HDD_GET_HAL_CTX(pAdapter),
pPnoRequest, pAdapter->sessionId,
hdd_cfg80211_sched_scan_done_callback, pAdapter);
diff --git a/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_early_suspend.c b/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_early_suspend.c
index e40bff1..a62182b 100644
--- a/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_early_suspend.c
+++ b/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_early_suspend.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -51,7 +51,6 @@
#include <vos_sched.h>
#include <macInitApi.h>
#include <wlan_qct_sys.h>
-#include <wlan_btc_svc.h>
#include <wlan_nlink_common.h>
#include <wlan_hdd_main.h>
#include <wlan_hdd_assoc.h>
@@ -2332,6 +2331,8 @@
wlan_hdd_cfg80211_extscan_callback);
#endif /* FEATURE_WLAN_EXTSCAN */
sme_set_rssi_threshold_breached_cb(pHddCtx->hHal, hdd_rssi_threshold_breached);
+ wlan_hdd_cfg80211_link_layer_stats_init(pHddCtx);
+ sme_bpf_offload_register_callback(pHddCtx->hHal, hdd_get_bpf_offload_cb);
#ifdef WLAN_FEATURE_LPSS
wlan_hdd_send_all_scan_intf_info(pHddCtx);
@@ -2359,7 +2360,6 @@
/* Unregister the Net Device Notifier */
unregister_netdevice_notifier(&hdd_netdev_notifier);
/* Clean up HDD Nlink Service */
- send_btc_nlink_msg(WLAN_MODULE_DOWN_IND, 0);
#ifdef WLAN_KD_READY_NOTIFIER
nl_srv_exit(pHddCtx->ptt_pid);
#else
@@ -2387,8 +2387,6 @@
return -EPERM;
success:
- /* Trigger replay of BTC events */
- send_btc_nlink_msg(WLAN_MODULE_DOWN_IND, 0);
pHddCtx->isLogpInProgress = FALSE;
hdd_ssr_timer_del();
return VOS_STATUS_SUCCESS;
diff --git a/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_hostapd.c b/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_hostapd.c
index f65dbcd..d2e8d58 100644
--- a/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_hostapd.c
+++ b/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_hostapd.c
@@ -71,7 +71,6 @@
#include <linux/netdevice.h>
#include <linux/mmc/sdio_func.h>
#include "wlan_nlink_common.h"
-#include "wlan_btc_svc.h"
#include "wlan_hdd_p2p.h"
#ifdef IPA_OFFLOAD
#include <wlan_hdd_ipa.h>
@@ -570,10 +569,9 @@
goto exit;
}
- if ((!ifr) || (!ifr->ifr_data))
- {
- VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
- FL("ifr or ifr->ifr_data is NULL"));
+ if ((!ifr) || (!ifr->ifr_data)) {
+ hddLog(LOGE,
+ FL("ifr or ifr->ifr_data is NULL cmd: %d"), cmd);
ret = -EINVAL;
goto exit;
}
@@ -1453,9 +1451,6 @@
hdd_wlan_green_ap_start_bss(pHddCtx);
- // Send current operating channel of SoftAP to BTC-ES
- send_btc_nlink_msg(WLAN_BTC_SOFTAP_BSS_START, 0);
-
/* Set default key index */
VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"%s: default key index %hu", __func__,
diff --git a/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_main.c b/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_main.c
index 2a298855..ecd9fc55 100755
--- a/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_main.c
+++ b/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_main.c
@@ -68,7 +68,6 @@
#include <wlan_hdd_tx_rx.h>
#include <wniApi.h>
#include <wlan_nlink_srv.h>
-#include <wlan_btc_svc.h>
#include <wlan_hdd_cfg.h>
#include <wlan_ptt_sock_svc.h>
#include <dbglog_host.h>
@@ -136,6 +135,10 @@
#include "tl_shim.h"
#include "wlan_hdd_oemdata.h"
+#ifdef CNSS_GENL
+#include <net/cnss_nl.h>
+#endif
+
#if defined(LINUX_QCMBR)
#define SIOCIOCTLTX99 (SIOCDEVPRIVATE+13)
#endif
@@ -12899,9 +12902,6 @@
if (pConfig && pConfig->fIsLogpEnabled)
vos_watchdog_close(pVosContext);
- //Clean up HDD Nlink Service
- send_btc_nlink_msg(WLAN_MODULE_DOWN_IND, 0);
-
if (VOS_FTM_MODE != hdd_get_conparam())
wlan_hdd_logging_sock_deactivate_svc(pHddCtx);
@@ -12944,6 +12944,7 @@
}
wlan_hdd_deinit_tx_rx_histogram(pHddCtx);
+ hdd_free_probe_req_ouis(pHddCtx);
wiphy_unregister(wiphy) ;
wlan_hdd_cfg80211_deinit(wiphy);
wiphy_free(wiphy) ;
@@ -14281,6 +14282,27 @@
goto err_config;
}
+ if (pHddCtx->cfg_ini->probe_req_ie_whitelist)
+ {
+ if (hdd_validate_prb_req_ie_bitmap(pHddCtx))
+ {
+ /* parse ini string probe req oui */
+ status = hdd_parse_probe_req_ouis(pHddCtx);
+ if (VOS_STATUS_SUCCESS != status)
+ {
+ hddLog(LOGE, FL("Error parsing probe req ouis - Ignoring them"
+ " disabling white list"));
+ pHddCtx->cfg_ini->probe_req_ie_whitelist = false;
+ }
+ }
+ else
+ {
+ hddLog(LOGE, FL("invalid probe req ie bitmap and ouis,"
+ " disabling white list"));
+ pHddCtx->cfg_ini->probe_req_ie_whitelist = false;
+ }
+ }
+
((VosContextType*)pVosContext)->pHIFContext = hif_sc;
/* store target type and target version info in hdd ctx */
@@ -14866,13 +14888,6 @@
pHddCtx->kd_nl_init = 1;
#endif /* WLAN_KD_READY_NOTIFIER */
- //Initialize the BTC service
- if(btc_activate_service(pHddCtx) != 0)
- {
- hddLog(VOS_TRACE_LEVEL_FATAL,"%s: btc_activate_service failed",__func__);
- goto err_reg_netdev;
- }
-
#ifdef FEATURE_OEM_DATA_SUPPORT
//Initialize the OEM service
if (oem_activate_service(pHddCtx) != 0)
@@ -15060,9 +15075,7 @@
wlan_hdd_cfg80211_extscan_callback);
#endif /* FEATURE_WLAN_EXTSCAN */
sme_set_rssi_threshold_breached_cb(pHddCtx->hHal, hdd_rssi_threshold_breached);
-#ifdef WLAN_FEATURE_LINK_LAYER_STATS
- wlan_hdd_cfg80211_link_layer_stats_init(pHddCtx);
-#endif
+ wlan_hdd_cfg80211_link_layer_stats_init(pHddCtx);
wlan_hdd_tsf_init(pHddCtx);
@@ -15228,8 +15241,10 @@
err_free_hdd_context:
/* wiphy_free() will free the HDD context so remove global reference */
- if (pVosContext)
+ if (pVosContext) {
+ hdd_free_probe_req_ouis(pHddCtx);
((VosContextType*)(pVosContext))->pHDDContext = NULL;
+ }
wiphy_free(wiphy) ;
//kfree(wdev) ;
@@ -16619,6 +16634,23 @@
}
#endif
+/**
+ * nl_srv_bcast_svc() - Wrapper function to send bcast msgs to SVC mcast group
+ * @skb: sk buffer pointer
+ *
+ * Sends the bcast message to SVC multicast group with generic nl socket
+ * if CNSS_GENL is enabled. Else, use the legacy netlink socket to send.
+ *
+ * Return: None
+ */
+static void nl_srv_bcast_svc(struct sk_buff *skb)
+{
+#ifdef CNSS_GENL
+ nl_srv_bcast(skb, CLD80211_MCGRP_SVC_MSGS, WLAN_NL_MSG_SVC);
+#else
+ nl_srv_bcast(skb);
+#endif
+}
void wlan_hdd_send_svc_nlink_msg(int type, void *data, int len)
{
@@ -16681,7 +16713,7 @@
return;
}
- nl_srv_bcast(skb);
+ nl_srv_bcast_svc(skb);
return;
}
@@ -16918,7 +16950,6 @@
}
#endif
-
#ifdef FEATURE_WLAN_MCC_TO_SCC_SWITCH
void wlan_hdd_check_sta_ap_concurrent_ch_intf(void *data)
{
diff --git a/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_oemdata.c b/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_oemdata.c
index 65fafbf..b0c844e 100644
--- a/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_oemdata.c
+++ b/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_oemdata.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -48,6 +48,9 @@
#include "vos_utils.h"
#include "wma.h"
#include "wlan_hdd_oemdata.h"
+#ifdef CNSS_GENL
+#include <net/cnss_nl.h>
+#endif
static struct hdd_context_s *pHddCtx;
@@ -158,6 +161,26 @@
return 0;
}
+/**
+ * nl_srv_ucast_oem() - Wrapper function to send ucast msgs to OEM
+ * @skb: sk buffer pointer
+ * @dst_pid: Destination PID
+ * @flag: flags
+ *
+ * Sends the ucast message to OEM with generic nl socket if CNSS_GENL
+ * is enabled. Else, use the legacy netlink socket to send.
+ *
+ * Return: None
+ */
+static void nl_srv_ucast_oem(struct sk_buff *skb, int dst_pid, int flag)
+{
+#ifdef CNSS_GENL
+ nl_srv_ucast(skb, dst_pid, flag, WLAN_NL_MSG_OEM,
+ CLD80211_MCGRP_OEM_MSGS);
+#else
+ nl_srv_ucast(skb, dst_pid, flag);
+#endif
+}
/**---------------------------------------------------------------------------
\brief send_oem_reg_rsp_nlink_msg() - send oem registration response
@@ -247,7 +270,7 @@
"%s: sending App Reg Response length (%d) to process pid (%d)",
__func__, aniHdr->length, pHddCtx->oem_pid);
- (void)nl_srv_ucast(skb, pHddCtx->oem_pid, MSG_DONTWAIT);
+ (void)nl_srv_ucast_oem(skb, pHddCtx->oem_pid, MSG_DONTWAIT);
return;
}
@@ -299,7 +322,7 @@
"%s: sending oem error response to process pid (%d)",
__func__, app_pid);
- (void)nl_srv_ucast(skb, app_pid, MSG_DONTWAIT);
+ (void)nl_srv_ucast_oem(skb, app_pid, MSG_DONTWAIT);
return;
}
@@ -367,7 +390,7 @@
"%s: sending Oem Data Response of len (%d) to process pid (%d)",
__func__, length, pHddCtx->oem_pid);
- (void)nl_srv_ucast(skb, pHddCtx->oem_pid, MSG_DONTWAIT);
+ (void)nl_srv_ucast_oem(skb, pHddCtx->oem_pid, MSG_DONTWAIT);
return;
}
@@ -601,7 +624,7 @@
"%s: sending channel info resp for num channels (%d) to pid (%d)",
__func__, numOfChannels, pHddCtx->oem_pid);
- (void)nl_srv_ucast(skb, pHddCtx->oem_pid, MSG_DONTWAIT);
+ (void)nl_srv_ucast_oem(skb, pHddCtx->oem_pid, MSG_DONTWAIT);
return 0;
}
@@ -720,11 +743,202 @@
pPeerInfo->peer_chan_info.reg_info_2,
pPeerInfo->reserved0);
- (void)nl_srv_ucast(skb, pHddCtx->oem_pid, MSG_DONTWAIT);
+ (void)nl_srv_ucast_oem(skb, pHddCtx->oem_pid, MSG_DONTWAIT);
return;
}
+#ifdef CNSS_GENL
+/*
+ * Callback function invoked by Netlink service for all netlink
+ * messages (from user space) addressed to WLAN_NL_MSG_OEM
+ */
+
+/**
+ * oem_msg_callback() - callback invoked by netlink service
+ * @skb: skb with netlink message
+ *
+ * This function gets invoked by netlink service when a message
+ * is received from user space addressed to WLAN_NL_MSG_OEM
+ *
+ * Return: zero on success
+ * On error, error number will be returned.
+ */
+static int oem_msg_callback(tAniMsgHdr *msg_hdr, int len, int pid)
+{
+ int ret;
+ char *sign_str = NULL;
+
+ ret = wlan_hdd_validate_context(pHddCtx);
+ if (0 != ret)
+ return ret;
+
+ if (!msg_hdr) {
+ hddLog(LOGE, FL("Message header null"));
+ send_oem_err_rsp_nlink_msg(pid, OEM_ERR_NULL_MESSAGE_HEADER);
+ return -EPERM;
+ }
+
+ if (len < sizeof(tAniMsgHdr) + msg_hdr->length) {
+ hddLog(LOGE, FL("Invalid msg len, len (%d), msg_hdr->len (%d)"),
+ len, msg_hdr->length);
+ send_oem_err_rsp_nlink_msg(pid,
+ OEM_ERR_INVALID_MESSAGE_LENGTH);
+ return -EPERM;
+ }
+
+ switch (msg_hdr->type) {
+ case ANI_MSG_APP_REG_REQ:
+ /* Registration request is only allowed for Qualcomm Application */
+ hddLog(LOG1, FL("Received App Req Req from App process pid(%d), len(%d)"),
+ pid, msg_hdr->length);
+
+ sign_str = (char *)((char *)msg_hdr + sizeof(tAniMsgHdr));
+ if ((OEM_APP_SIGNATURE_LEN == msg_hdr->length) &&
+ (0 == strncmp(sign_str, OEM_APP_SIGNATURE_STR,
+ OEM_APP_SIGNATURE_LEN))) {
+ hddLog(LOG1, FL("Valid App Req Req from oem app process pid(%d)"),
+ pid);
+
+ pHddCtx->oem_app_registered = TRUE;
+ pHddCtx->oem_pid = pid;
+ send_oem_reg_rsp_nlink_msg();
+ } else {
+ hddLog(LOGE, FL("Invalid signature in App Reg Request from pid(%d)"),
+ pid);
+ send_oem_err_rsp_nlink_msg(pid,
+ OEM_ERR_INVALID_SIGNATURE);
+ return -EPERM;
+ }
+ break;
+
+ case ANI_MSG_OEM_DATA_REQ:
+ hddLog(LOG1, FL("Received Oem Data Request length(%d) from pid: %d"),
+ msg_hdr->length, pid);
+
+ if ((!pHddCtx->oem_app_registered) ||
+ (pid != pHddCtx->oem_pid)) {
+ /* either oem app is not registered yet or pid is different */
+ hddLog(LOGE, FL("OEM DataReq: app not registered(%d) or incorrect pid(%d)"),
+ pHddCtx->oem_app_registered, pid);
+ send_oem_err_rsp_nlink_msg(pid,
+ OEM_ERR_APP_NOT_REGISTERED);
+ return -EPERM;
+ }
+
+ if ((!msg_hdr->length) || (OEM_DATA_REQ_SIZE < msg_hdr->length)) {
+ hddLog(LOGE, FL("Invalid length (%d) in Oem Data Request"),
+ msg_hdr->length);
+ send_oem_err_rsp_nlink_msg(pid,
+ OEM_ERR_INVALID_MESSAGE_LENGTH);
+ return -EPERM;
+ }
+ oem_process_data_req_msg(msg_hdr->length,
+ (char *) ((char *)msg_hdr +
+ sizeof(tAniMsgHdr)));
+ break;
+
+ case ANI_MSG_CHANNEL_INFO_REQ:
+ hddLog(LOG1,
+ FL("Received channel info request, num channel(%d) from pid: %d"),
+ msg_hdr->length, pid);
+
+ if ((!pHddCtx->oem_app_registered) ||
+ (pid != pHddCtx->oem_pid)) {
+ /* either oem app is not registered yet or pid is different */
+ hddLog(LOGE,
+ FL("Chan InfoReq: app not registered(%d) or incorrect pid(%d)"),
+ pHddCtx->oem_app_registered, pid);
+ send_oem_err_rsp_nlink_msg(pid,
+ OEM_ERR_APP_NOT_REGISTERED);
+ return -EPERM;
+ }
+
+ /* message length contains list of channel ids */
+ if ((!msg_hdr->length) ||
+ (WNI_CFG_VALID_CHANNEL_LIST_LEN < msg_hdr->length)) {
+ hddLog(LOGE,
+ FL("Invalid length (%d) in channel info request"),
+ msg_hdr->length);
+ send_oem_err_rsp_nlink_msg(pid,
+ OEM_ERR_INVALID_MESSAGE_LENGTH);
+ return -EPERM;
+ }
+ oem_process_channel_info_req_msg(msg_hdr->length,
+ (char *)((char*)msg_hdr + sizeof(tAniMsgHdr)));
+ break;
+
+ default:
+ hddLog(LOGE,
+ FL("Received Invalid message type (%d), length (%d)"),
+ msg_hdr->type, msg_hdr->length);
+ send_oem_err_rsp_nlink_msg(pid,
+ OEM_ERR_INVALID_MESSAGE_TYPE);
+ return -EPERM;
+ }
+ return 0;
+}
+/**
+ * oem_cmd_handler() - API to handle OEM commands
+ * @data: Pointer to data
+ * @data_len: length of the received data
+ * @ctx: Pointer to the context
+ * @pid: Process id
+ *
+ * This API handles the command from OEM application from user space and
+ * send back event to user space if necessary.
+ *
+ * Return: None
+ */
+static void oem_cmd_handler(const void *data, int data_len, void *ctx, int pid)
+{
+ tAniMsgHdr *msg_hdr;
+ int ret;
+ struct nlattr *tb[CLD80211_ATTR_MAX + 1];
+
+ ret = wlan_hdd_validate_context(pHddCtx);
+ if (ret) {
+ hddLog(LOGE, FL("hdd ctx validate fails"));
+ return;
+ }
+
+ if (nla_parse(tb, CLD80211_ATTR_MAX, data, data_len, NULL)) {
+ hddLog(LOGE, FL("Invalid ATTR"));
+ return;
+ }
+
+ if (!tb[CLD80211_ATTR_DATA]) {
+ hddLog(LOGE, FL("attr ATTR_DATA failed"));
+ return;
+ }
+
+ msg_hdr = (tAniMsgHdr *)nla_data(tb[CLD80211_ATTR_DATA]);
+ if (!msg_hdr) {
+ hddLog(LOGE, FL("msg_hdr null"));
+ send_oem_err_rsp_nlink_msg(pid, OEM_ERR_NULL_MESSAGE_HEADER);
+ return;
+ }
+ oem_msg_callback(msg_hdr, nla_len(tb[CLD80211_ATTR_DATA]), pid);
+
+ return;
+}
+
+/**
+ * oem_activate_service() - API to register the oem command handler
+ * @hdd_ctx: Pointer to HDD Context
+ *
+ * This API is used to register the oem app command handler. Argument
+ * @pAdapter is given for prototype compatibility with legacy code.
+ *
+ * Return: 0
+ */
+int oem_activate_service(void *hdd_ctx)
+{
+ pHddCtx = (struct hdd_context_s *) hdd_ctx;
+ register_cld_cmd_cb(WLAN_NL_MSG_OEM, oem_cmd_handler, NULL);
+ return 0;
+}
+#else
/*
* Callback function invoked by Netlink service for all netlink
* messages (from user space) addressed to WLAN_NL_MSG_OEM
@@ -744,10 +958,10 @@
{
struct nlmsghdr *nlh;
tAniMsgHdr *msg_hdr;
- int ret;
char *sign_str = NULL;
- nlh = (struct nlmsghdr *)skb->data;
+ int ret;
+ nlh = (struct nlmsghdr *)skb->data;
if (!nlh) {
hddLog(LOGE, FL("Netlink header null"));
return -EPERM;
@@ -884,20 +1098,18 @@
an OEM application process.
\param -
- - pAdapter - pointer to HDD adapter
+ - hdd_ctx: Pointer to HDD context
\return - 0 for success, non zero for failure
--------------------------------------------------------------------------*/
-int oem_activate_service(void *pAdapter)
+int oem_activate_service(void *hdd_ctx)
{
- pHddCtx = (struct hdd_context_s*) pAdapter;
+ pHddCtx = (struct hdd_context_s *) hdd_ctx;
/* Register the msg handler for msgs addressed to WLAN_NL_MSG_OEM */
nl_srv_register(WLAN_NL_MSG_OEM, __oem_msg_callback);
return 0;
}
-
-
-
+#endif
#endif
diff --git a/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_wext.c b/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_wext.c
index 46ec11c..ab5cc2d 100644
--- a/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_wext.c
+++ b/drivers/staging/qcacld-2.0/CORE/HDD/src/wlan_hdd_wext.c
@@ -48,7 +48,6 @@
#include <linux/wireless.h>
#include <macTrace.h>
#include <wlan_hdd_includes.h>
-#include <wlan_btc_svc.h>
#include <wlan_nlink_common.h>
#include <vos_api.h>
#include <net/arp.h>
diff --git a/drivers/staging/qcacld-2.0/CORE/MAC/inc/aniGlobal.h b/drivers/staging/qcacld-2.0/CORE/MAC/inc/aniGlobal.h
index f8f0fd3..4ae4f22 100644
--- a/drivers/staging/qcacld-2.0/CORE/MAC/inc/aniGlobal.h
+++ b/drivers/staging/qcacld-2.0/CORE/MAC/inc/aniGlobal.h
@@ -74,7 +74,6 @@
#include "smeInternal.h"
#include "sapApi.h"
#include "ccmApi.h"
-#include "btcApi.h"
#include "csrInternal.h"
#ifdef FEATURE_OEM_DATA_SUPPORT
@@ -1211,7 +1210,6 @@
tOemDataStruct oemData;
#endif
tPmcInfo pmc;
- tSmeBtcInfo btc;
tCcm ccm;
diff --git a/drivers/staging/qcacld-2.0/CORE/MAC/inc/qwlan_version.h b/drivers/staging/qcacld-2.0/CORE/MAC/inc/qwlan_version.h
index 8d45f31..23d9680 100644
--- a/drivers/staging/qcacld-2.0/CORE/MAC/inc/qwlan_version.h
+++ b/drivers/staging/qcacld-2.0/CORE/MAC/inc/qwlan_version.h
@@ -42,9 +42,9 @@
#define QWLAN_VERSION_MINOR 4
#define QWLAN_VERSION_PATCH 23
#define QWLAN_VERSION_EXTRA ""
-#define QWLAN_VERSION_BUILD 14
+#define QWLAN_VERSION_BUILD 20
-#define QWLAN_VERSIONSTR "4.4.23.014"
+#define QWLAN_VERSIONSTR "4.4.23.020"
#define AR6320_REV1_VERSION 0x5000000
diff --git a/drivers/staging/qcacld-2.0/CORE/MAC/inc/sirApi.h b/drivers/staging/qcacld-2.0/CORE/MAC/inc/sirApi.h
index 33ccda4..5e50437 100644
--- a/drivers/staging/qcacld-2.0/CORE/MAC/inc/sirApi.h
+++ b/drivers/staging/qcacld-2.0/CORE/MAC/inc/sirApi.h
@@ -89,6 +89,7 @@
#define MAXNUM_PERIODIC_TX_PTRNS 6
#define WIFI_SCANNING_MAC_OUI_LENGTH 3
+#define PROBE_REQ_BITMAP_LEN 8
#define MAX_LEN_UDP_RESP_OFFLOAD 128
@@ -931,6 +932,15 @@
tANI_U16 uIEFieldLen;
tANI_U16 uIEFieldOffset;
+ uint32_t enable_scan_randomization;
+ uint8_t mac_addr[VOS_MAC_ADDR_SIZE];
+ uint8_t mac_addr_mask[VOS_MAC_ADDR_SIZE];
+ bool ie_whitelist;
+ uint32_t probe_req_ie_bitmap[PROBE_REQ_BITMAP_LEN];
+ uint32_t num_vendor_oui;
+ uint32_t oui_field_len;
+ uint32_t oui_field_offset;
+
//channelList MUST be the last field of this structure
tSirChannelList channelList;
/*-----------------------------
@@ -949,7 +959,10 @@
----------------------------- <--+
... variable size uIEFiled
up to uIEFieldLen (can be 0)
- -----------------------------*/
+ -----------------------------
+ ... variable size upto num_vendor_oui
+ struct vendor_oui voui;
+ */
} tSirSmeScanReq, *tpSirSmeScanReq;
typedef struct sSirSmeScanAbortReq
@@ -970,8 +983,9 @@
#ifdef FEATURE_OEM_DATA_SUPPORT
#ifndef OEM_DATA_REQ_SIZE
-#define OEM_DATA_REQ_SIZE 280
+#define OEM_DATA_REQ_SIZE 500
#endif
+
#ifndef OEM_DATA_RSP_SIZE
#define OEM_DATA_RSP_SIZE 1724
#endif
@@ -981,7 +995,7 @@
tANI_U16 messageType; /* eWNI_SME_OEM_DATA_REQ */
tANI_U16 messageLen;
tSirMacAddr selfMacAddr;
- uint8_t data_len;
+ uint32_t data_len;
uint8_t *data;
} tSirOemDataReq, *tpSirOemDataReq;
@@ -3728,6 +3742,15 @@
uint8_t p24GProbeTemplate[SIR_PNO_MAX_PB_REQ_SIZE];
uint16_t us5GProbeTemplateLen;
uint8_t p5GProbeTemplate[SIR_PNO_MAX_PB_REQ_SIZE];
+
+ /* mac address randomization attributes */
+ uint32_t enable_pno_scan_randomization;
+ uint8_t mac_addr[VOS_MAC_ADDR_SIZE];
+ uint8_t mac_addr_mask[VOS_MAC_ADDR_SIZE];
+ bool ie_whitelist;
+ uint32_t probe_req_ie_bitmap[PROBE_REQ_BITMAP_LEN];
+ uint32_t num_vendor_oui;
+ /* followed by one or more struct vendor_oui */
} tSirPNOScanReq, *tpSirPNOScanReq;
typedef struct sSirSetRSSIFilterReq
@@ -4393,6 +4416,16 @@
tSirP2pScanType p2pScanType;
tANI_U16 uIEFieldLen;
tANI_U16 uIEFieldOffset;
+
+ uint32_t enable_scan_randomization;
+ uint8_t mac_addr[VOS_MAC_ADDR_SIZE];
+ uint8_t mac_addr_mask[VOS_MAC_ADDR_SIZE];
+ bool ie_whitelist;
+ uint32_t probe_req_ie_bitmap[PROBE_REQ_BITMAP_LEN];
+ uint32_t num_vendor_oui;
+ uint32_t oui_field_len;
+ uint32_t oui_field_offset;
+
tSirChannelList channelList;
/*-----------------------------
sSirScanOffloadReq....
@@ -4410,7 +4443,10 @@
----------------------------- <--+
... variable size uIEField
up to uIEFieldLen (can be 0)
- -----------------------------*/
+ -----------------------------
+ ... variable size upto num_vendor_oui
+ struct vendor_oui voui;
+ ------------------------*/
} tSirScanOffloadReq, *tpSirScanOffloadReq;
/**
@@ -5637,9 +5673,25 @@
tANI_U8 stopReq;
} tSirLLStatsClearReq, *tpSirLLStatsClearReq;
+/**
+ * struct vendor_oui - probe request ie vendor oui information
+ * @oui_type: type of the vendor oui (3 valid octets)
+ * @oui_subtype: subtype of the vendor oui (1 valid octet)
+ */
+struct vendor_oui {
+ uint32_t oui_type;
+ uint32_t oui_subtype;
+};
+
typedef struct
{
tANI_U8 oui[WIFI_SCANNING_MAC_OUI_LENGTH];
+ uint32_t vdev_id;
+ uint32_t enb_probe_req_sno_randomization;
+ bool ie_whitelist;
+ uint32_t probe_req_ie_bitmap[PROBE_REQ_BITMAP_LEN];
+ uint32_t num_vendor_oui;
+ /* Followed by 0 or more struct vendor_oui */
} tSirScanMacOui, *tpSirScanMacOui;
enum {
diff --git a/drivers/staging/qcacld-2.0/CORE/MAC/inc/wniApi.h b/drivers/staging/qcacld-2.0/CORE/MAC/inc/wniApi.h
index e46e4be..28210a5 100644
--- a/drivers/staging/qcacld-2.0/CORE/MAC/inc/wniApi.h
+++ b/drivers/staging/qcacld-2.0/CORE/MAC/inc/wniApi.h
@@ -278,8 +278,6 @@
eWNI_SME_REGISTER_MGMT_FRAME_REQ,
- eWNI_SME_COEX_IND,
-
#ifdef FEATURE_WLAN_SCAN_PNO
eWNI_SME_PREF_NETWORK_FOUND_IND,
#endif // FEATURE_WLAN_SCAN_PNO
diff --git a/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/include/limGlobal.h b/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/include/limGlobal.h
index a7b4ca8..be13f29b 100644
--- a/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/include/limGlobal.h
+++ b/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/include/limGlobal.h
@@ -313,18 +313,11 @@
#ifdef FEATURE_OEM_DATA_SUPPORT
-#ifndef OEM_DATA_REQ_SIZE
-#define OEM_DATA_REQ_SIZE 280
-#endif
-#ifndef OEM_DATA_RSP_SIZE
-#define OEM_DATA_RSP_SIZE 1724
-#endif
-
// OEM Data related structure definitions
typedef struct sLimMlmOemDataReq
{
tSirMacAddr selfMacAddr;
- uint8_t data_len;
+ uint32_t data_len;
uint8_t *data;
} tLimMlmOemDataReq, *tpLimMlmOemDataReq;
diff --git a/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limProcessCfgUpdates.c b/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limProcessCfgUpdates.c
index 37e45a8e..488b60a 100644
--- a/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limProcessCfgUpdates.c
+++ b/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limProcessCfgUpdates.c
@@ -552,28 +552,6 @@
pMac->lim.gLimAssocStaLimit = (tANI_U16)val1;
break;
- case WNI_CFG_DEL_ALL_RX_BA_SESSIONS_2_4_G_BTC:
- if (wlan_cfgGetInt
- (pMac, WNI_CFG_DEL_ALL_RX_BA_SESSIONS_2_4_G_BTC, &val1) !=
- eSIR_SUCCESS)
- {
- limLog(pMac, LOGE,
- FL( "Unable to get WNI_CFG_DEL_ALL_RX_BA_SESSIONS_2_4_G_BTC"));
- break;
- }
- if (val1)
- {
- limLog(pMac, LOGW,
- FL("BTC requested to disable all RX BA sessions"));
- limDelPerBssBASessionsBtc(pMac);
- }
- else
- {
- limLog(pMac, LOGW,
- FL("Resetting the WNI_CFG_DEL_ALL_RX_BA_SESSIONS_2_4_G_BTC"));
- }
- break;
-
default:
break;
}
diff --git a/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limProcessMlmReqMessages.c b/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limProcessMlmReqMessages.c
index f925056..b877c9a 100644
--- a/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limProcessMlmReqMessages.c
+++ b/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limProcessMlmReqMessages.c
@@ -2824,10 +2824,12 @@
mlmDisassocCnf.resultCode = eSIR_SME_INVALID_PARAMETERS;
goto end;
}
- limLog(pMac, LOG1,FL("Process DisAssoc Req on sessionID %d Systemrole %d "
- "mlmstate %d from: "MAC_ADDRESS_STR), pMlmDisassocReq->sessionId,
- GET_LIM_SYSTEM_ROLE(psessionEntry), psessionEntry->limMlmState,
- MAC_ADDR_ARRAY(pMlmDisassocReq->peerMacAddr));
+ limLog(pMac, LOGE, FL("Process DisAssoc Req on sessionID %d Systemrole %d "
+ "reason code: %d mlmstate %d from: "MAC_ADDRESS_STR),
+ pMlmDisassocReq->sessionId,
+ GET_LIM_SYSTEM_ROLE(psessionEntry), pMlmDisassocReq->reasonCode,
+ psessionEntry->limMlmState,
+ MAC_ADDR_ARRAY(pMlmDisassocReq->peerMacAddr));
sirCopyMacAddr(currentBssId,psessionEntry->bssId);
diff --git a/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limProcessSmeReqMessages.c b/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limProcessSmeReqMessages.c
index 6616c21..bb43c12 100644
--- a/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limProcessSmeReqMessages.c
+++ b/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limProcessSmeReqMessages.c
@@ -1193,7 +1193,7 @@
/* The tSirScanOffloadReq will reserve the space for first channel,
so allocate the memory for (numChannels - 1) and uIEFieldLen */
len = sizeof(tSirScanOffloadReq) + (pScanReq->channelList.numChannels - 1) +
- pScanReq->uIEFieldLen;
+ pScanReq->uIEFieldLen + pScanReq->oui_field_len;
if (!pMac->per_band_chainmask_supp) {
if (IS_DOT11_MODE_HT(pScanReq->dot11mode)) {
@@ -1287,7 +1287,8 @@
pScanOffloadReq->uIEFieldLen = pScanReq->uIEFieldLen;
pScanOffloadReq->uIEFieldOffset = len - addn_ie_len -
- pScanOffloadReq->uIEFieldLen;
+ pScanOffloadReq->uIEFieldLen -
+ pScanReq->oui_field_len;
vos_mem_copy(
(tANI_U8 *) pScanOffloadReq + pScanOffloadReq->uIEFieldOffset,
(tANI_U8 *) pScanReq + pScanReq->uIEFieldOffset,
@@ -1323,6 +1324,32 @@
#endif /* WLAN_FEATURE_11AC */
}
+ pScanOffloadReq->enable_scan_randomization =
+ pScanReq->enable_scan_randomization;
+ if (pScanOffloadReq->enable_scan_randomization) {
+ vos_mem_copy(pScanOffloadReq->mac_addr, pScanReq->mac_addr,
+ VOS_MAC_ADDR_SIZE);
+ vos_mem_copy(pScanOffloadReq->mac_addr_mask, pScanReq->mac_addr_mask,
+ VOS_MAC_ADDR_SIZE);
+ }
+
+ pScanOffloadReq->oui_field_len = pScanReq->oui_field_len;
+ pScanOffloadReq->num_vendor_oui = pScanReq->num_vendor_oui;
+ pScanOffloadReq->ie_whitelist = pScanReq->ie_whitelist;
+ if (pScanOffloadReq->ie_whitelist)
+ vos_mem_copy(pScanOffloadReq->probe_req_ie_bitmap,
+ pScanReq->probe_req_ie_bitmap,
+ PROBE_REQ_BITMAP_LEN * sizeof(uint32_t));
+ pScanOffloadReq->oui_field_offset = sizeof(tSirScanOffloadReq) +
+ (pScanOffloadReq->channelList.numChannels - 1) +
+ pScanOffloadReq->uIEFieldLen;
+ if (pScanOffloadReq->num_vendor_oui != 0) {
+ vos_mem_copy(
+ (tANI_U8 *) pScanOffloadReq + pScanOffloadReq->oui_field_offset,
+ (uint8_t *) pScanReq + pScanReq->oui_field_offset,
+ pScanReq->oui_field_len);
+ }
+
rc = wdaPostCtrlMsg(pMac, &msg);
if (rc != eSIR_SUCCESS)
{
diff --git a/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limScanResultUtils.c b/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limScanResultUtils.c
index db6c5f2..5ddb392 100644
--- a/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limScanResultUtils.c
+++ b/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limScanResultUtils.c
@@ -817,9 +817,6 @@
}
}
- if ((false == found) && dontUpdateAll)
- return eHAL_STATUS_FAILURE;
-
//for now, only rssi, we can add more if needed
if ((action == LIM_HASH_UPDATE) && dontUpdateAll && rssi && rssi_raw)
{
diff --git a/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limTimerUtils.c b/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limTimerUtils.c
index 2968a47..41fb619 100644
--- a/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limTimerUtils.c
+++ b/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limTimerUtils.c
@@ -1063,17 +1063,6 @@
if (pMac->lim.gpLimMlmScanReq) {
val =
SYS_MS_TO_TICKS(pMac->lim.gpLimMlmScanReq->minChannelTime);
- if (pMac->btc.btcScanCompromise) {
- if (pMac->lim.gpLimMlmScanReq->minChannelTimeBtc) {
- val = SYS_MS_TO_TICKS(
- pMac->lim.gpLimMlmScanReq->minChannelTimeBtc);
- limLog(pMac, LOG1,
- FL("Using BTC Min Active Scan time"));
- } else {
- limLog(pMac, LOGE,
- FL("BTC Active Scan Min Time is Not Set"));
- }
- }
} else {
limLog(pMac, LOGE, FL("gpLimMlmScanReq is NULL"));
break;
@@ -1100,17 +1089,6 @@
}
val = SYS_MS_TO_TICKS(pMac->lim.gpLimMlmScanReq->minChannelTime)/2;
- if (pMac->btc.btcScanCompromise)
- {
- if (pMac->lim.gpLimMlmScanReq->minChannelTimeBtc)
- {
- val = SYS_MS_TO_TICKS(pMac->lim.gpLimMlmScanReq->minChannelTimeBtc)/2;
- }
- else
- {
- limLog(pMac, LOGE, FL("BTC Active Scan Min Time is Not Set"));
- }
- }
if (tx_timer_change(&pMac->lim.limTimers.gLimPeriodicProbeReqTimer,
val, 0) != TX_SUCCESS)
{
@@ -1140,17 +1118,6 @@
if (pMac->lim.gpLimMlmScanReq) {
val = SYS_MS_TO_TICKS(
pMac->lim.gpLimMlmScanReq->maxChannelTime);
- if (pMac->btc.btcScanCompromise) {
- if (pMac->lim.gpLimMlmScanReq->maxChannelTimeBtc) {
- val = SYS_MS_TO_TICKS(
- pMac->lim.gpLimMlmScanReq->maxChannelTimeBtc);
- limLog(pMac, LOG1,
- FL("Using BTC Max Active Scan time"));
- } else {
- limLog(pMac, LOGE,
- FL("BTC Active Scan Max Time is Not Set"));
- }
- }
} else {
limLog(pMac, LOGE, FL("gpLimMlmScanReq is NULL"));
break;
diff --git a/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limUtils.c b/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limUtils.c
index 626e5eb4..7259dd9 100644
--- a/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limUtils.c
+++ b/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limUtils.c
@@ -5227,28 +5227,6 @@
}
/** -------------------------------------------------------------
-\fn limDelAllBASessionsBtc
-\brief Deletes all the existing BA recipient sessions in 2.4GHz
- band.
-\param tpAniSirGlobal pMac
-\return None
--------------------------------------------------------------*/
-
-void limDelPerBssBASessionsBtc(tpAniSirGlobal pMac)
-{
- tANI_U8 sessionId;
- tpPESession pSessionEntry;
- pSessionEntry = peFindSessionByBssid(pMac,pMac->btc.btcBssfordisableaggr,
- &sessionId);
- if (pSessionEntry)
- {
- PELOGW(limLog(pMac, LOGW,
- "Deleting the BA for session %d as host got BTC event", sessionId);)
- limDeleteBASessions(pMac, pSessionEntry, BA_RECIPIENT);
- }
-}
-
-/** -------------------------------------------------------------
\fn limProcessDelTsInd
\brief Handles the DeleteTS indication coming from HAL or generated by
PE itself in some error cases. Validates the request, sends the
diff --git a/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limUtils.h b/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limUtils.h
index 3bc9314..8ff6cc1 100644
--- a/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limUtils.h
+++ b/drivers/staging/qcacld-2.0/CORE/MAC/src/pe/lim/limUtils.h
@@ -398,7 +398,6 @@
void limDeleteStaContext(tpAniSirGlobal pMac, tpSirMsgQ limMsg);
void limProcessAddBaInd(tpAniSirGlobal pMac, tpSirMsgQ limMsg);
void limDeleteBASessions(tpAniSirGlobal pMac, tpPESession pSessionEntry, tANI_U32 baDirection);
-void limDelPerBssBASessionsBtc(tpAniSirGlobal pMac);
void limDelAllBASessions(tpAniSirGlobal pMac);
void limDeleteDialogueTokenList(tpAniSirGlobal pMac);
tSirRetStatus limSearchAndDeleteDialogueToken(tpAniSirGlobal pMac, tANI_U8 token, tANI_U16 assocId, tANI_U16 tid);
diff --git a/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/dbglog_id.h b/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/dbglog_id.h
index 66eafe6..6922d6f 100644
--- a/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/dbglog_id.h
+++ b/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/dbglog_id.h
@@ -33,16 +33,6 @@
#endif
/*
- * The target state machine framework will send dbglog messages on behalf on
- * other modules. We do this do avoid each target module adding identical
- * dbglog code for state transitions and event processing. We also don't want
- * to force each module to define the the same XXX_DBGID_SM_MSG with the same
- * value below. Instead we use a special ID that the host dbglog code
- * recognizes as a message sent by the SM on behalf on another module.
- */
-#define DBGLOG_DBGID_SM_FRAMEWORK_PROXY_DBGLOG_MSG 1000
-
-/*
* The nomenclature for the debug identifiers is MODULE_DESCRIPTION.
* Please ensure that the definition of any new debugid introduced is captured
* between the <MODULE>_DBGID_DEFINITION_START and
diff --git a/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/wlan_defs.h b/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/wlan_defs.h
index 1a2b329..f19f1ce 100644
--- a/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/wlan_defs.h
+++ b/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/wlan_defs.h
@@ -60,6 +60,16 @@
#define SUPPORT_11AX 0 /* 11ax not supported by default */
#endif
+/* defines to set Packet extension values which can be 0 us, 8 us or 16 us */
+/* NOTE: Below values cannot be changed without breaking WMI Compatibility */
+#define MAX_HE_NSS 8
+#define MAX_HE_MODULATION 8
+#define MAX_HE_RU 4
+#define HE_MODULATION_NONE 7
+#define HE_PET_0_USEC 0
+#define HE_PET_8_USEC 1
+#define HE_PET_16_USEC 2
+
typedef enum {
MODE_11A = 0, /* 11a Mode */
MODE_11G = 1, /* 11b/g Mode */
diff --git a/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/wlan_module_ids.h b/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/wlan_module_ids.h
index 716a4f6..da99e3c 100644
--- a/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/wlan_module_ids.h
+++ b/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/wlan_module_ids.h
@@ -100,6 +100,7 @@
WLAN_MODULE_QBOOST, /* 0x41 */
WLAN_MODULE_P2P_LISTEN_OFFLOAD, /* 0x42 */
WLAN_MODULE_HALPHY, /* 0x43 */
+ WAL_MODULE_ENQ, /* 0x44 */
WLAN_MODULE_ID_MAX,
WLAN_MODULE_ID_INVALID = WLAN_MODULE_ID_MAX,
diff --git a/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/wmi.h b/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/wmi.h
index ae7500c..7d31ff2 100644
--- a/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/wmi.h
+++ b/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/wmi.h
@@ -91,9 +91,16 @@
#define WMI_SET_FIELD(_msg_buf, _msg_type, _f, _val) \
SET_FIELD(_msg_buf, _msg_type ## _ ## _f, _val)
-#define WMI_EP_APASS 0x0
+/* TYPO: leave incorrect name as an alias for the correct name */
+#define WMI_EP_APASS WMI_EP_APSS
+/* WLAN driver running on apps processor sub-system */
+#define WMI_EP_APSS 0x0
#define WMI_EP_LPASS 0x1
#define WMI_EP_SENSOR 0x2
+/* WLAN driver running on NANO Hub */
+#define WMI_EP_NANOHUB 0x3
+#define WMI_EP_MODEM 0x4
+#define WMI_EP_LOCATION 0x5
/*
* Control Path
diff --git a/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/wmi_services.h b/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/wmi_services.h
index 37dc6f9..89c88f9 100644
--- a/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/wmi_services.h
+++ b/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/wmi_services.h
@@ -42,136 +42,273 @@
-typedef enum {
- WMI_SERVICE_BEACON_OFFLOAD=0, /* beacon offload */
- WMI_SERVICE_SCAN_OFFLOAD, /* scan offload */
- WMI_SERVICE_ROAM_SCAN_OFFLOAD, /* roam scan offload */
- WMI_SERVICE_BCN_MISS_OFFLOAD, /* beacon miss offload */
- WMI_SERVICE_STA_PWRSAVE, /* fake sleep + basic power save */
- WMI_SERVICE_STA_ADVANCED_PWRSAVE, /* uapsd, pspoll, force sleep */
- WMI_SERVICE_AP_UAPSD, /* uapsd on AP */
- WMI_SERVICE_AP_DFS, /* DFS on AP */
- WMI_SERVICE_11AC, /* supports 11ac */
- WMI_SERVICE_BLOCKACK, /* Supports triggering ADDBA/DELBA from host*/
- WMI_SERVICE_PHYERR, /* PHY error */
- WMI_SERVICE_BCN_FILTER, /* Beacon filter support */
- WMI_SERVICE_RTT, /* RTT (round trip time) support */
- WMI_SERVICE_WOW, /* WOW Support */
- WMI_SERVICE_RATECTRL_CACHE, /* Rate-control caching */
- WMI_SERVICE_IRAM_TIDS, /* TIDs in IRAM */
- WMI_SERVICE_ARPNS_OFFLOAD, /* ARP NS Offload support for STA vdev */
- WMI_SERVICE_NLO, /* Network list offload service */
- WMI_SERVICE_GTK_OFFLOAD, /* GTK offload */
- WMI_SERVICE_SCAN_SCH, /* Scan Scheduler Service */
- WMI_SERVICE_CSA_OFFLOAD, /* CSA offload service */
- WMI_SERVICE_CHATTER, /* Chatter service */
- WMI_SERVICE_COEX_FREQAVOID, /* FW report freq range to avoid */
- WMI_SERVICE_PACKET_POWER_SAVE, /* packet power save service */
- WMI_SERVICE_FORCE_FW_HANG, /* Service to test the firmware recovery mechanism */
- WMI_SERVICE_GPIO, /* GPIO service */
- WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, /* Modulated DTIM support */
- WMI_STA_UAPSD_BASIC_AUTO_TRIG, /* Basic version of station UAPSD AC Trigger Generation Method with
- * variable tigger periods (service, delay, and suspend intervals) */
- WMI_STA_UAPSD_VAR_AUTO_TRIG, /* Station UAPSD AC Trigger Generation Method with variable
- * trigger periods (service, delay, and suspend intervals) */
- WMI_SERVICE_STA_KEEP_ALIVE, /* Serivce to support the STA KEEP ALIVE mechanism */
- WMI_SERVICE_TX_ENCAP, /* Packet type for TX encapsulation */
- WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, /* detect out-of-sync sleeping stations */
- WMI_SERVICE_EARLY_RX, /* adaptive early-rx feature */
- WMI_SERVICE_STA_SMPS, /* STA MIMO-PS */
- WMI_SERVICE_FWTEST, /* Firmware test service */
- WMI_SERVICE_STA_WMMAC, /* STA WMMAC */
- WMI_SERVICE_TDLS, /* TDLS support */
- WMI_SERVICE_BURST, /* SIFS spaced burst support */
- WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE, /* Dynamic beaocn interval change for SAP/P2p GO in MCC scenario */
- WMI_SERVICE_ADAPTIVE_OCS, /* Service to support adaptive off-channel scheduler */
- WMI_SERVICE_BA_SSN_SUPPORT, /* target will provide Sequence number for the peer/tid combo */
- WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE,
- WMI_SERVICE_WLAN_HB, /* wlan HB service */
- WMI_SERVICE_LTE_ANT_SHARE_SUPPORT, /* support LTE/WLAN antenna sharing */
- WMI_SERVICE_BATCH_SCAN, /*Service to support batch scan*/
- WMI_SERVICE_QPOWER, /* QPower service */
- WMI_SERVICE_PLMREQ,
- WMI_SERVICE_THERMAL_MGMT, /* thermal throttling support */
- WMI_SERVICE_RMC, /* RMC support */
- WMI_SERVICE_MHF_OFFLOAD, /* multi-hop forwarding offload */
- WMI_SERVICE_COEX_SAR, /* target support SAR tx limit from WMI_PDEV_PARAM_TXPOWER_LIMITxG */
- WMI_SERVICE_BCN_TXRATE_OVERRIDE, /* Will support the bcn/prb rsp rate override */
- WMI_SERVICE_NAN, /* Neighbor Awareness Network */
- WMI_SERVICE_L1SS_STAT, /* L1SS statistics counter report */
- WMI_SERVICE_ESTIMATE_LINKSPEED, /* Linkspeed Estimation per peer */
- WMI_SERVICE_OBSS_SCAN, /* Service to support OBSS scan */
- WMI_SERVICE_TDLS_OFFCHAN, /* TDLS off channel support */
- WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, /* TDLS UAPSD Buffer STA support */
- WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, /* TDLS UAPSD Sleep STA support */
- WMI_SERVICE_IBSS_PWRSAVE, /* IBSS power save support */
- WMI_SERVICE_LPASS, /*Service to support LPASS*/
- WMI_SERVICE_EXTSCAN, /* Extended Scans */
- WMI_SERVICE_D0WOW, /* D0-WOW Support */
- WMI_SERVICE_HSOFFLOAD, /* Hotspot offload feature Support */
- WMI_SERVICE_ROAM_HO_OFFLOAD, /* roam handover offload */
- WMI_SERVICE_RX_FULL_REORDER, /* target-based Rx full reorder */
- WMI_SERVICE_DHCP_OFFLOAD, /* DHCP offload support */
- WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT, /* STA RX DATA offload to IPA support */
- WMI_SERVICE_MDNS_OFFLOAD, /* mDNS responder offload support */
- WMI_SERVICE_SAP_AUTH_OFFLOAD, /* softap auth offload */
- WMI_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT, /* Dual Band Simultaneous support */
- WMI_SERVICE_OCB, /* OCB mode support */
- WMI_SERVICE_AP_ARPNS_OFFLOAD, /* arp offload support for ap mode vdev */
- WMI_SERVICE_PER_BAND_CHAINMASK_SUPPORT, /* Per band chainmask support */
- WMI_SERVICE_PACKET_FILTER_OFFLOAD, /* Per vdev packet filters */
- WMI_SERVICE_MGMT_TX_HTT, /* Mgmt Tx via HTT interface */
- WMI_SERVICE_MGMT_TX_WMI, /* Mgmt Tx via WMI interface */
- WMI_SERVICE_EXT_MSG, /* WMI_SERVICE_READY_EXT msg follows */
- WMI_SERVICE_MAWC, /* Motion Aided WiFi Connectivity (MAWC)*/
- WMI_SERVICE_PEER_ASSOC_CONF, /* target will send ASSOC_CONF after ASSOC_CMD is processed */
- WMI_SERVICE_EGAP, /* enhanced green ap support */
- WMI_SERVICE_STA_PMF_OFFLOAD, /* FW supports 11W PMF Offload for STA */
- WMI_SERVICE_UNIFIED_WOW_CAPABILITY, /* FW supports unified D0 and D3 wow */
- WMI_SERVICE_ENHANCED_PROXY_STA, /* Enhanced ProxySTA mode support */
- WMI_SERVICE_ATF, /* Air Time Fairness support */
- WMI_SERVICE_COEX_GPIO, /* BTCOEX GPIO support */
- WMI_SERVICE_AUX_SPECTRAL_INTF, /* Aux Radio enhancement support for ignoring spectral scan intf from main radios */
- WMI_SERVICE_AUX_CHAN_LOAD_INTF, /* Aux Radio enhancement support for ignoring chan load intf from main radios*/
- WMI_SERVICE_BSS_CHANNEL_INFO_64, /* BSS channel info (freq, noise floor, 64-bit counters) event support */
- WMI_SERVICE_ENTERPRISE_MESH, /* Enterprise MESH Service Support */
- WMI_SERVICE_RESTRT_CHNL_SUPPORT, /* Restricted Channel Support */
- WMI_SERVICE_BPF_OFFLOAD, /* FW supports bpf offload */
- WMI_SERVICE_SYNC_DELETE_CMDS, /* FW sends response event for Peer, Vdev delete commands */
- WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT,
- WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT,
- WMI_SERVICE_RATECTRL_LIMIT_MAX_MIN_RATES, /* allow per-peer tx MCS min/max limits by host */
- WMI_SERVICE_NAN_DATA, /* FW supports NAN data */
- WMI_SERVICE_NAN_RTT, /* FW supports NAN RTT */
- WMI_SERVICE_11AX, /* FW supports 802.11ax */
+typedef enum {
+ WMI_SERVICE_BEACON_OFFLOAD = 0, /* beacon offload */
+ WMI_SERVICE_SCAN_OFFLOAD = 1, /* scan offload */
+ WMI_SERVICE_ROAM_SCAN_OFFLOAD = 2, /* roam scan offload */
+ WMI_SERVICE_BCN_MISS_OFFLOAD = 3, /* beacon miss offload */
+ /* fake sleep + basic power save */
+ WMI_SERVICE_STA_PWRSAVE = 4,
+ WMI_SERVICE_STA_ADVANCED_PWRSAVE = 5, /* uapsd, pspoll, force sleep */
+ WMI_SERVICE_AP_UAPSD = 6, /* uapsd on AP */
+ WMI_SERVICE_AP_DFS = 7, /* DFS on AP */
+ WMI_SERVICE_11AC = 8, /* supports 11ac */
+ /* Supports triggering ADDBA/DELBA from host*/
+ WMI_SERVICE_BLOCKACK = 9,
+ WMI_SERVICE_PHYERR = 10, /* PHY error */
+ WMI_SERVICE_BCN_FILTER = 11, /* Beacon filter support */
+ /* RTT (round trip time) support */
+ WMI_SERVICE_RTT = 12,
+ WMI_SERVICE_WOW = 13, /* WOW Support */
+ WMI_SERVICE_RATECTRL_CACHE = 14, /* Rate-control caching */
+ WMI_SERVICE_IRAM_TIDS = 15, /* TIDs in IRAM */
+ /* ARP NS Offload support for STA vdev */
+ WMI_SERVICE_ARPNS_OFFLOAD = 16,
+ /* Network list offload service */
+ WMI_SERVICE_NLO = 17,
+ WMI_SERVICE_GTK_OFFLOAD = 18, /* GTK offload */
+ WMI_SERVICE_SCAN_SCH = 19, /* Scan Scheduler Service */
+ WMI_SERVICE_CSA_OFFLOAD = 20, /* CSA offload service */
+ WMI_SERVICE_CHATTER = 21, /* Chatter service */
+ /* FW report freq range to avoid */
+ WMI_SERVICE_COEX_FREQAVOID = 22,
+ WMI_SERVICE_PACKET_POWER_SAVE = 23, /* packet power save service */
+ /* Service to test the firmware recovery mechanism */
+ WMI_SERVICE_FORCE_FW_HANG = 24,
+ WMI_SERVICE_GPIO = 25, /* GPIO service */
+ /* Modulated DTIM support */
+ WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM = 26,
+ /**
+ * Basic version of station UAPSD AC Trigger Generation Method with
+ * variable tigger periods (service, delay, and suspend intervals)
+ */
+ WMI_STA_UAPSD_BASIC_AUTO_TRIG = 27,
+ /**
+ * Station UAPSD AC Trigger Generation Method with variable
+ * trigger periods (service, delay, and suspend intervals)
+ */
+ WMI_STA_UAPSD_VAR_AUTO_TRIG = 28,
+ /* Serivce to support the STA KEEP ALIVE mechanism */
+ WMI_SERVICE_STA_KEEP_ALIVE = 29,
+ /* Packet type for TX encapsulation */
+ WMI_SERVICE_TX_ENCAP = 30,
+ /* detect out-of-sync sleeping stations */
+ WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC = 31,
+ WMI_SERVICE_EARLY_RX = 32, /* adaptive early-rx feature */
+ WMI_SERVICE_STA_SMPS = 33, /* STA MIMO-PS */
+ WMI_SERVICE_FWTEST = 34, /* Firmware test service */
+ WMI_SERVICE_STA_WMMAC = 35, /* STA WMMAC */
+ WMI_SERVICE_TDLS = 36, /* TDLS support */
+ WMI_SERVICE_BURST = 37, /* SIFS spaced burst support */
+ /* Dynamic beaocn interval change for SAP/P2p GO in MCC scenario */
+ WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE = 38,
+ /* Service to support adaptive off-channel scheduler */
+ WMI_SERVICE_ADAPTIVE_OCS = 39,
+ /* target will provide Sequence number for the peer/tid combo */
+ WMI_SERVICE_BA_SSN_SUPPORT = 40,
+ WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE = 41,
+ WMI_SERVICE_WLAN_HB = 42, /* wlan HB service */
+ /* support LTE/WLAN antenna sharing */
+ WMI_SERVICE_LTE_ANT_SHARE_SUPPORT = 43,
+ WMI_SERVICE_BATCH_SCAN = 44, /*Service to support batch scan*/
+ WMI_SERVICE_QPOWER = 45, /* QPower service */
+ WMI_SERVICE_PLMREQ = 46,
+ WMI_SERVICE_THERMAL_MGMT = 47, /* thermal throttling support */
+ WMI_SERVICE_RMC = 48, /* RMC support */
+ /* multi-hop forwarding offload */
+ WMI_SERVICE_MHF_OFFLOAD = 49,
+ /* target support SAR tx limit from WMI_PDEV_PARAM_TXPOWER_LIMITxG */
+ WMI_SERVICE_COEX_SAR = 50,
+ /* Will support the bcn/prb rsp rate override */
+ WMI_SERVICE_BCN_TXRATE_OVERRIDE = 51,
+ WMI_SERVICE_NAN = 52, /* Neighbor Awareness Network */
+ /* L1SS statistics counter report */
+ WMI_SERVICE_L1SS_STAT = 53,
+ /* Linkspeed Estimation per peer */
+ WMI_SERVICE_ESTIMATE_LINKSPEED = 54,
+ /* Service to support OBSS scan */
+ WMI_SERVICE_OBSS_SCAN = 55,
+ WMI_SERVICE_TDLS_OFFCHAN = 56, /* TDLS off channel support */
+ /* TDLS UAPSD Buffer STA support */
+ WMI_SERVICE_TDLS_UAPSD_BUFFER_STA = 57,
+ /* TDLS UAPSD Sleep STA support */
+ WMI_SERVICE_TDLS_UAPSD_SLEEP_STA = 58,
+ WMI_SERVICE_IBSS_PWRSAVE = 59, /* IBSS power save support */
+ WMI_SERVICE_LPASS = 60, /*Service to support LPASS*/
+ WMI_SERVICE_EXTSCAN = 61, /* Extended Scans */
+ WMI_SERVICE_D0WOW = 62, /* D0-WOW Support */
+ /* Hotspot offload feature Support */
+ WMI_SERVICE_HSOFFLOAD = 63,
+ WMI_SERVICE_ROAM_HO_OFFLOAD = 64, /* roam handover offload */
+ /* target-based Rx full reorder */
+ WMI_SERVICE_RX_FULL_REORDER = 65,
+ WMI_SERVICE_DHCP_OFFLOAD = 66, /* DHCP offload support */
+ /* STA RX DATA offload to IPA support */
+ WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT = 67,
+ /* mDNS responder offload support */
+ WMI_SERVICE_MDNS_OFFLOAD = 68,
+ WMI_SERVICE_SAP_AUTH_OFFLOAD = 69, /* softap auth offload */
+ /* Dual Band Simultaneous support */
+ WMI_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT = 70,
+ WMI_SERVICE_OCB = 71, /* OCB mode support */
+ /* arp offload support for ap mode vdev */
+ WMI_SERVICE_AP_ARPNS_OFFLOAD = 72,
+ /* Per band chainmask support */
+ WMI_SERVICE_PER_BAND_CHAINMASK_SUPPORT = 73,
+ WMI_SERVICE_PACKET_FILTER_OFFLOAD = 74, /* Per vdev packet filters */
+ WMI_SERVICE_MGMT_TX_HTT = 75, /* Mgmt Tx via HTT interface */
+ WMI_SERVICE_MGMT_TX_WMI = 76, /* Mgmt Tx via WMI interface */
+ /* WMI_SERVICE_READY_EXT msg follows */
+ WMI_SERVICE_EXT_MSG = 77,
+ /* Motion Aided WiFi Connectivity (MAWC)*/
+ WMI_SERVICE_MAWC = 78,
+ /* target will send ASSOC_CONF after ASSOC_CMD is processed */
+ WMI_SERVICE_PEER_ASSOC_CONF = 79,
+ WMI_SERVICE_EGAP = 80, /* enhanced green ap support */
+ /* FW supports 11W PMF Offload for STA */
+ WMI_SERVICE_STA_PMF_OFFLOAD = 81,
+ /* FW supports unified D0 and D3 wow */
+ WMI_SERVICE_UNIFIED_WOW_CAPABILITY = 82,
+ /* Enhanced ProxySTA mode support */
+ WMI_SERVICE_ENHANCED_PROXY_STA = 83,
+ WMI_SERVICE_ATF = 84, /* Air Time Fairness support */
+ WMI_SERVICE_COEX_GPIO = 85, /* BTCOEX GPIO support */
+ /**
+ * Aux Radio enhancement support for ignoring spectral scan intf
+ * from main radios
+ */
+ WMI_SERVICE_AUX_SPECTRAL_INTF = 86,
+ /**
+ * Aux Radio enhancement support for ignoring chan load intf
+ * from main radios
+ */
+ WMI_SERVICE_AUX_CHAN_LOAD_INTF = 87,
+ /**
+ * BSS channel info (freq, noise floor, 64-bit counters)
+ * event support
+ */
+ WMI_SERVICE_BSS_CHANNEL_INFO_64 = 88,
+ /* Enterprise MESH Service Support */
+ WMI_SERVICE_ENTERPRISE_MESH = 89,
+ WMI_SERVICE_RESTRT_CHNL_SUPPORT = 90, /* Restricted Channel Support */
+ WMI_SERVICE_BPF_OFFLOAD = 91, /* FW supports bpf offload */
+ /* FW sends response event for Peer, Vdev delete commands */
+ WMI_SERVICE_SYNC_DELETE_CMDS = 92,
+ WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT = 93,
+ WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT = 94,
+ /* allow per-peer tx MCS min/max limits by host */
+ WMI_SERVICE_RATECTRL_LIMIT_MAX_MIN_RATES = 95,
+ WMI_SERVICE_NAN_DATA = 96, /* FW supports NAN data */
+ WMI_SERVICE_NAN_RTT = 97, /* FW supports NAN RTT */
+ WMI_SERVICE_11AX = 98, /* FW supports 802.11ax */
+
/* WMI_SERVICE_DEPRECATED_REPLACE
* FW supports these new WMI commands, to be used rather than
* deprecated matching commands:
* - WMI_PDEV_SET_PCL_CMDID (vs. WMI_SOC_SET_PCL_CMDID)
- * - WMI_PDEV_SET_HW_MODE_CMDID (vs. WMI_SOC_SET_HW_MODE_CMDID)
- * - WMI_PDEV_SET_MAC_CONFIG_CMDID (vs. WMI_SOC_SET_DUAL_MAC_CONFIG_CMDID)
- * - WMI_PDEV_SET_ANTENNA_MODE_CMDID (vs. WMI_SOC_SET_ANTENNA_MODE_CMDID)
- * - WMI_VDEV_SET_DSCP_TID_MAP_CMDID (vs. WMI_VDEV_SET_WMM_PARAMS_CMDID)
+ * - WMI_PDEV_SET_HW_MODE_CMDID
+ * (vs. WMI_SOC_SET_HW_MODE_CMDID)
+ * - WMI_PDEV_SET_MAC_CONFIG_CMDID
+ * (vs. WMI_SOC_SET_DUAL_MAC_CONFIG_CMDID)
+ * - WMI_PDEV_SET_ANTENNA_MODE_CMDID
+ * (vs. WMI_SOC_SET_ANTENNA_MODE_CMDID)
+ * - WMI_VDEV_SET_DSCP_TID_MAP_CMDID
+ * (vs. WMI_VDEV_SET_WMM_PARAMS_CMDID)
*/
- WMI_SERVICE_DEPRECATED_REPLACE,
- WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE, /* FW supports a new mode that allows to run connection tracker in host */
- WMI_SERVICE_ENHANCED_MCAST_FILTER,/* FW supports enhanced multicast filtering (of mcast IP inside ucast WLAN) */
- WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT, /* periodic channel stats service */
- WMI_SERVICE_MESH_11S,
- WMI_SERVICE_HALF_RATE_QUARTER_RATE_SUPPORT, /* FW+HW supports 10 MHz (half rate) and 5 MHz (quarter rate) channel bandwidth */
- WMI_SERVICE_VDEV_RX_FILTER, /* Support per-vdev specs of which rx frames to filter out */
- WMI_SERVICE_P2P_LISTEN_OFFLOAD_SUPPORT,
+ WMI_SERVICE_DEPRECATED_REPLACE = 99,
+ /**
+ * FW supports a new mode that allows to run connection tracker
+ * in host
+ */
+ WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE = 100,
+ /**
+ * FW supports enhanced multicast filtering (of mcast IP inside
+ * ucast WLAN)
+ */
+ WMI_SERVICE_ENHANCED_MCAST_FILTER = 101,
+ /* periodic channel stats service */
+ WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT = 102,
+ WMI_SERVICE_MESH_11S = 103,
+ /**
+ * FW+HW supports 10 MHz (half rate) and 5 MHz (quarter rate)
+ * channel bandwidth
+ */
+ WMI_SERVICE_HALF_RATE_QUARTER_RATE_SUPPORT = 104,
+ /* Support per-vdev specs of which rx frames to filter out */
+ WMI_SERVICE_VDEV_RX_FILTER = 105,
+ WMI_SERVICE_P2P_LISTEN_OFFLOAD_SUPPORT = 106,
/*
* FW supports marking the first data packet which wakes
* the host from suspend
*/
- WMI_SERVICE_MARK_FIRST_WAKEUP_PACKET,
- WMI_MAX_SERVICE=128 /* max service */
+ WMI_SERVICE_MARK_FIRST_WAKEUP_PACKET = 107,
+ /* FW supports command that can add/delete multiple mcast filters */
+ WMI_SERVICE_MULTIPLE_MCAST_FILTER_SET = 108,
+ /* WMI_SERVICE_HOST_MANAGED_RX_REORDER -
+ * FW supports host-managed RX reorder.
+ * Host managed RX reorder involves RX BA state machine handling
+ * on peer/TID basis, REO configuration for HW based reordering/PN
+ * check and processing reorder exceptions generated by HW.
+ */
+ WMI_SERVICE_HOST_MANAGED_RX_REORDER = 109,
+ /* Specify whether the target supports the following WMI messages
+ * for reading / writing its flash memory:
+ * WMI_READ_DATA_FROM_FLASH_CMDID,
+ * WMI_READ_DATA_FROM_FLASH_EVENTID,
+ * WMI_TRANSFER_DATA_TO_FLASH_CMDID,
+ * WMI_TRANSFER_DATA_TO_FLASH_COMPLETE_EVENTID,
+ */
+ WMI_SERVICE_FLASH_RDWR_SUPPORT = 110,
+
+ WMI_SERVICE_WLAN_STATS_REPORT=111, /* support WLAN stats report */
+
+ /* WMI_SERVICE_TX_MSDU_ID_NEW_PARTITION_SUPPORT -
+ * FW supports bigger MSDU ID partition which is defined as
+ * HTT_TX_IPA_NEW_MSDU_ID_SPACE_BEGIN. When both host and FW support
+ * new partition, FW uses HTT_TX_IPA_NEW_MSDU_ID_SPACE_BEGIN. If host
+ * doesn't support, FW falls back to HTT_TX_IPA_MSDU_ID_SPACE_BEGIN
+ * Handshaking is done through WMI_INIT and WMI service ready
+ *
+ * support bigger MSDU ID partition
+ */
+ WMI_SERVICE_TX_MSDU_ID_NEW_PARTITION_SUPPORT = 112,
+ WMI_SERVICE_DFS_PHYERR_OFFLOAD=113,
+ WMI_SERVICE_RCPI_SUPPORT=114,
+ WMI_SERVICE_FW_MEM_DUMP_SUPPORT = 115, /* Support FW Memory dump */
+ WMI_SERVICE_PEER_STATS_INFO = 116, /* support per peer stats info */
+
+ /***** ADD NEW SERVICES HERE UNTIL ALL VALUES UP TO 128 ARE USED *****/
+
+ WMI_MAX_SERVICE = 128, /* max service */
+
+ /**
+ * NOTE:
+ * The above service flags are delivered in the wmi_service_bitmap
+ * field of the WMI_SERVICE_READY_EVENT message.
+ * The below service flags are delivered in a
+ * WMI_SERVICE_AVAILABLE_EVENT message rather than in the
+ * WMI_SERVICE_READY_EVENT message's wmi_service_bitmap field.
+ * The WMI_SERVICE_AVAILABLE_EVENT message immediately precedes the
+ * WMI_SERVICE_READY_EVENT message.
+ */
+
+ /*PUT 1ST EXT SERVICE HERE:*//*WMI_SERVICE_xxxxxxxx=128,*/
+ /*PUT 2ND EXT SERVICE HERE:*//*WMI_SERVICE_yyyyyyyy=129,*/
+
+ WMI_MAX_EXT_SERVICE
+
} WMI_SERVICE;
#define WMI_SERVICE_BM_SIZE ((WMI_MAX_SERVICE + sizeof(A_UINT32)- 1)/sizeof(A_UINT32))
-#define WMI_SERVICE_ROAM_OFFLOAD WMI_SERVICE_ROAM_SCAN_OFFLOAD /* depreciated the name WMI_SERVICE_ROAM_OFFLOAD, but here to help compiling with old host driver */
+#define WMI_NUM_EXT_SERVICES (WMI_MAX_EXT_SERVICE - WMI_MAX_SERVICE)
+#define WMI_SERVICE_EXT_BM_SIZE32 ((WMI_NUM_EXT_SERVICES + 31) / 32)
+
+/**
+ * depreciated the name WMI_SERVICE_ROAM_OFFLOAD, but here to help
+ * compiling with old host driver
+ */
+#define WMI_SERVICE_ROAM_OFFLOAD WMI_SERVICE_ROAM_SCAN_OFFLOAD
/*
* turn on the WMI service bit corresponding to the WMI service.
@@ -188,6 +325,39 @@
( ((pwmi_svc_bmap)[(svc_id)/(sizeof(A_UINT32))] & \
(1 << ((svc_id)%(sizeof(A_UINT32)))) ) != 0)
+#define WMI_SERVICE_EXT_ENABLE(pwmi_svc_bmap, pwmi_svc_ext_bmap, svc_id) \
+ do { \
+ if (svc_id < WMI_MAX_SERVICE) { \
+ WMI_SERVICE_ENABLE(pwmi_svc_bmap, svc_id); \
+ } else { \
+ int word = ((svc_id) - WMI_MAX_SERVICE) / 32; \
+ int bit = (svc_id) & 0x1f; /* svc_id mod 32 */ \
+ (pwmi_svc_ext_bmap)[word] |= (1 << bit); \
+ } \
+ } while (0)
+
+#define WMI_SERVICE_EXT_DISABLE(pwmi_svc_bmap, pwmi_svc_ext_bmap, svc_id) \
+ do { \
+ if (svc_id < WMI_MAX_SERVICE) { \
+ WMI_SERVICE_DISABLE(pwmi_svc_bmap, svc_id); \
+ } else { \
+ int word = ((svc_id) - WMI_MAX_SERVICE) / 32; \
+ int bit = (svc_id) & 0x1f; /* svc_id mod 32 */ \
+ (pwmi_svc_ext_bmap)[word] &= ~(1 << bit); \
+ } \
+ } while (0)
+
+#define WMI_SERVICE_EXT_IS_ENABLED(pwmi_svc_bmap, pwmi_svc_ext_bmap, svc_id) \
+ /* If the service ID is beyond the known limit, treat it as disabled */ \
+ ((svc_id) >= WMI_MAX_EXT_SERVICE ? 0 : \
+ /* If service ID is in the non-extension range, use the old check */ \
+ (svc_id) < WMI_MAX_SERVICE ? \
+ WMI_SERVICE_IS_ENABLED(pwmi_svc_bmap, svc_id) : \
+ /* If service ID is in the extended range, check ext_bmap */ \
+ (pwmi_svc_ext_bmap)[((svc_id) - WMI_MAX_SERVICE) / 32] >> \
+ ((svc_id) & 0x1f))
+
+
#ifdef __cplusplus
}
#endif
diff --git a/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/wmi_tlv_defs.h b/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/wmi_tlv_defs.h
index ad06601..fb3ea04 100644
--- a/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/wmi_tlv_defs.h
+++ b/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/wmi_tlv_defs.h
@@ -714,6 +714,64 @@
WMITLV_TAG_STRUC_wmi_p2p_lo_start_cmd_fixed_param,
WMITLV_TAG_STRUC_wmi_p2p_lo_stop_cmd_fixed_param,
WMITLV_TAG_STRUC_wmi_p2p_lo_stopped_event_fixed_param,
+ WMITLV_TAG_STRUC_wmi_peer_reorder_queue_setup_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_peer_reorder_queue_remove_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_set_multiple_mcast_filter_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_mgmt_tx_compl_bundle_event_fixed_param,
+ WMITLV_TAG_STRUC_wmi_read_data_from_flash_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_read_data_from_flash_event_fixed_param,
+ WMITLV_TAG_STRUC_wmi_pdev_set_reorder_timeout_val_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_peer_set_rx_blocksize_cmd_fixed_param,
+ WMITLV_TAG_STRUC_WMI_PDEV_SET_WAKEUP_CONFIG_CMDID_fixed_param,
+ WMITLV_TAG_STRUC_wmi_tlv_buf_len_param,
+ WMITLV_TAG_STRUC_wmi_service_available_event_fixed_param,
+ WMITLV_TAG_STRUC_wmi_peer_antdiv_info_req_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_peer_antdiv_info_event_fixed_param,
+ WMITLV_TAG_STRUC_wmi_peer_antdiv_info,
+ WMITLV_TAG_STRUC_wmi_pdev_get_antdiv_status_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_pdev_antdiv_status_event_fixed_param,
+ WMITLV_TAG_STRUC_wmi_mnt_filter_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_get_chip_power_stats_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_pdev_chip_power_stats_event_fixed_param,
+ WMITLV_TAG_STRUC_wmi_coex_get_antenna_isolation_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_coex_report_isolation_event_fixed_param,
+ WMITLV_TAG_STRUC_wmi_chan_cca_stats,
+ WMITLV_TAG_STRUC_wmi_peer_signal_stats,
+ WMITLV_TAG_STRUC_wmi_tx_stats,
+ WMITLV_TAG_STRUC_wmi_peer_ac_tx_stats,
+ WMITLV_TAG_STRUC_wmi_rx_stats,
+ WMITLV_TAG_STRUC_wmi_peer_ac_rx_stats,
+ WMITLV_TAG_STRUC_wmi_report_stats_event_fixed_param,
+ WMITLV_TAG_STRUC_wmi_chan_cca_stats_thresh,
+ WMITLV_TAG_STRUC_wmi_peer_signal_stats_thresh,
+ WMITLV_TAG_STRUC_wmi_tx_stats_thresh,
+ WMITLV_TAG_STRUC_wmi_rx_stats_thresh,
+ WMITLV_TAG_STRUC_wmi_pdev_set_stats_threshold_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_request_wlan_stats_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_rx_aggr_failure_event_fixed_param,
+ WMITLV_TAG_STRUC_wmi_rx_aggr_failure_info,
+ WMITLV_TAG_STRUC_wmi_vdev_encrypt_decrypt_data_req_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_vdev_encrypt_decrypt_data_resp_event_fixed_param,
+ WMITLV_TAG_STRUC_wmi_pdev_band_to_mac,
+ WMITLV_TAG_STRUC_wmi_tbtt_offset_info,
+ WMITLV_TAG_STRUC_wmi_tbtt_offset_ext_event_fixed_param,
+ WMITLV_TAG_STRUC_wmi_sar_limits_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_sar_limit_cmd_row,
+ WMITLV_TAG_STRUC_wmi_pdev_dfs_phyerr_offload_enable_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_pdev_dfs_phyerr_offload_disable_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_vdev_adfs_ch_cfg_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_vdev_adfs_ocac_abort_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_pdev_dfs_radar_detection_event_fixed_param,
+ WMITLV_TAG_STRUC_wmi_vdev_adfs_ocac_complete_event_fixed_param,
+ WMITLV_TAG_STRUC_wmi_vdev_dfs_cac_complete_event_fixed_param,
+ WMITLV_TAG_STRUC_wmi_vendor_oui,
+ WMITLV_TAG_STRUC_wmi_request_rcpi_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_update_rcpi_event_fixed_param,
+ WMITLV_TAG_STRUC_wmi_request_peer_stats_info_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_peer_stats_info,
+ WMITLV_TAG_STRUC_wmi_peer_stats_info_event_fixed_param,
+ WMITLV_TAG_STRUC_wmi_pkgid_event_fixed_param,
+ WMITLV_TAG_STRUC_wmi_connected_nlo_rssi_params,
} WMITLV_TAG_ID;
/*
@@ -1003,6 +1061,28 @@
OP(WMI_DBGLOG_TIME_STAMP_SYNC_CMDID) \
OP(WMI_P2P_LISTEN_OFFLOAD_START_CMDID) \
OP(WMI_P2P_LISTEN_OFFLOAD_STOP_CMDID) \
+ OP(WMI_PEER_REORDER_QUEUE_SETUP_CMDID) \
+ OP(WMI_PEER_REORDER_QUEUE_REMOVE_CMDID) \
+ OP(WMI_SET_MULTIPLE_MCAST_FILTER_CMDID) \
+ OP(WMI_READ_DATA_FROM_FLASH_CMDID) \
+ OP(WMI_PDEV_SET_REORDER_TIMEOUT_VAL_CMDID) \
+ OP(WMI_PEER_SET_RX_BLOCKSIZE_CMDID) \
+ OP(WMI_PDEV_SET_WAKEUP_CONFIG_CMDID) \
+ OP(WMI_PEER_ANTDIV_INFO_REQ_CMDID) \
+ OP(WMI_PDEV_GET_ANTDIV_STATUS_CMDID) \
+ OP(WMI_MNT_FILTER_CMDID) \
+ OP(WMI_PDEV_GET_CHIP_POWER_STATS_CMDID) \
+ OP(WMI_COEX_GET_ANTENNA_ISOLATION_CMDID) \
+ OP(WMI_PDEV_SET_STATS_THRESHOLD_CMDID) \
+ OP(WMI_REQUEST_WLAN_STATS_CMDID) \
+ OP(WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID) \
+ OP(WMI_SAR_LIMITS_CMDID) \
+ OP(WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID) \
+ OP(WMI_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMDID) \
+ OP(WMI_VDEV_ADFS_CH_CFG_CMDID) \
+ OP(WMI_VDEV_ADFS_OCAC_ABORT_CMDID) \
+ OP(WMI_REQUEST_RCPI_CMDID) \
+ OP(WMI_REQUEST_PEER_STATS_INFO_CMDID) \
/* add new CMD_LIST elements above this line */
/*
@@ -1154,6 +1234,22 @@
OP(WMI_PDEV_SET_MAC_CONFIG_RESP_EVENTID) \
OP(WMI_RADIO_TX_POWER_LEVEL_STATS_EVENTID) \
OP(WMI_P2P_LISTEN_OFFLOAD_STOPPED_EVENTID) \
+ OP(WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID) \
+ OP(WMI_READ_DATA_FROM_FLASH_EVENTID) \
+ OP(WMI_SERVICE_AVAILABLE_EVENTID) \
+ OP(WMI_PEER_ANTDIV_INFO_EVENTID) \
+ OP(WMI_PDEV_ANTDIV_STATUS_EVENTID) \
+ OP(WMI_PDEV_CHIP_POWER_STATS_EVENTID) \
+ OP(WMI_COEX_REPORT_ANTENNA_ISOLATION_EVENTID) \
+ OP(WMI_REPORT_STATS_EVENTID) \
+ OP(WMI_REPORT_RX_AGGR_FAILURE_EVENTID) \
+ OP(WMI_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENTID) \
+ OP(WMI_PDEV_DFS_RADAR_DETECTION_EVENTID) \
+ OP(WMI_VDEV_DFS_CAC_COMPLETE_EVENTID) \
+ OP(WMI_VDEV_ADFS_OCAC_COMPLETE_EVENTID) \
+ OP(WMI_UPDATE_RCPI_EVENTID) \
+ OP(WMI_PEER_STATS_INFO_EVENTID) \
+ OP(WMI_PKGID_EVENTID) \
/* add new EVT_LIST elements above this line */
@@ -1163,7 +1259,8 @@
#define WMITLV_TABLE_WMI_INIT_CMDID(id,op,buf,len)\
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_init_cmd_fixed_param, wmi_init_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)\
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_resource_config, wmi_resource_config, resource_config, WMITLV_SIZE_FIX)\
- WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wlan_host_memory_chunk, host_mem_chunks, WMITLV_SIZE_VAR)
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wlan_host_memory_chunk, host_mem_chunks, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_pdev_set_hw_mode_cmd_fixed_param, wmi_pdev_set_hw_mode_cmd_fixed_param, hw_mode, WMITLV_SIZE_FIX)
WMITLV_CREATE_PARAM_STRUC(WMI_INIT_CMDID);
@@ -1273,8 +1370,9 @@
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_roam_offload_tlv_param, offload_param, WMITLV_SIZE_VAR) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_roam_11i_offload_tlv_param, offload_11i_param, WMITLV_SIZE_VAR) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_roam_11r_offload_tlv_param, offload_11r_param, WMITLV_SIZE_VAR) \
- WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_roam_ese_offload_tlv_param, offload_ese_param, WMITLV_SIZE_VAR)
-
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_ARRAY_STRUC, wmi_roam_ese_offload_tlv_param, offload_ese_param, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_ARRAY_STRUC, wmi_tlv_buf_len_param, assoc_ie_len_param, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, assoc_ie_buf, WMITLV_SIZE_VAR)
WMITLV_CREATE_PARAM_STRUC(WMI_ROAM_SCAN_MODE);
/* Roam scan Rssi Threshold Cmd */
@@ -1338,7 +1436,8 @@
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, channel_list, WMITLV_SIZE_VAR) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_FIXED_STRUC, wmi_ssid, ssid_list, WMITLV_SIZE_VAR) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_FIXED_STRUC, wmi_mac_addr, bssid_list, WMITLV_SIZE_VAR) \
- WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, ie_data, WMITLV_SIZE_VAR)
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, ie_data, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_vendor_oui, vendor_oui, WMITLV_SIZE_VAR)
WMITLV_CREATE_PARAM_STRUC(WMI_START_SCAN_CMDID);
@@ -1651,13 +1750,13 @@
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, data, WMITLV_SIZE_VAR)
WMITLV_CREATE_PARAM_STRUC(WMI_RTT_TSF_CMDID);
-/*RTT OEM req Cmd - DEPRECATED */
+/* RTT OEM req Cmd */
#define WMITLV_TABLE_WMI_OEM_REQ_CMDID(id,op,buf,len) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, data, WMITLV_SIZE_VAR)
WMITLV_CREATE_PARAM_STRUC(WMI_OEM_REQ_CMDID);
-/* RTT OEM request Cmd */
+/* RTT OEM request Cmd - DEPRECATED */
#define WMITLV_TABLE_WMI_OEM_REQUEST_CMDID(id,op,buf,len) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, data, WMITLV_SIZE_VAR)
@@ -1726,7 +1825,9 @@
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, nlo_configured_parameters, nlo_list, WMITLV_SIZE_VAR) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, channel_list, WMITLV_SIZE_VAR)\
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, nlo_channel_prediction_cfg, channel_prediction_param, WMITLV_SIZE_VAR) \
- WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_enlo_candidate_score_param, enlo_candidate_score_params, candidate_score_params, WMITLV_SIZE_FIX)
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_enlo_candidate_score_param, enlo_candidate_score_params, candidate_score_params, WMITLV_SIZE_FIX) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_vendor_oui, vendor_oui, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_connected_nlo_rssi_params, connected_nlo_rssi_params, cnlo_rssi_params, WMITLV_SIZE_FIX)
WMITLV_CREATE_PARAM_STRUC(WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID);
@@ -1766,9 +1867,13 @@
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, data, WMITLV_SIZE_VAR)
WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_QVIT_CMDID);
+#define WMITLV_TABLE_WMI_PDEV_SET_WAKEUP_CONFIG_CMDID(id, op, buf, len) \
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_STRUC_WMI_PDEV_SET_WAKEUP_CONFIG_CMDID_fixed_param, WMI_PDEV_SET_WAKEUP_CONFIG_CMDID_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_SET_WAKEUP_CONFIG_CMDID);
+
/* Vdev Set keep alive Cmd */
-#define WMITLV_TABLE_WMI_VDEV_SET_KEEPALIVE_CMDID(id,op,buf,len) \
- WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_vdev_set_keepalive_cmd_fixed_param, wmi_vdev_set_keepalive_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+#define WMITLV_TABLE_WMI_VDEV_SET_KEEPALIVE_CMDID(id, op, buf, len) \
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_STRUC_wmi_vdev_set_keepalive_cmd_fixed_param, wmi_vdev_set_keepalive_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
WMITLV_CREATE_PARAM_STRUC(WMI_VDEV_SET_KEEPALIVE_CMDID);
/* Vdev Get keep alive Cmd */
@@ -1801,6 +1906,12 @@
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_WMI_SET_MCASTBCAST_FILTER_CMD_fixed_param, WMI_SET_MCASTBCAST_FILTER_CMD_fixed_param, fixed_param, WMITLV_SIZE_FIX)
WMITLV_CREATE_PARAM_STRUC(WMI_SET_MCASTBCAST_FILTER_CMDID);
+/* Enhanced Mcast add/delete filter list cmd */
+#define WMITLV_TABLE_WMI_SET_MULTIPLE_MCAST_FILTER_CMDID(id, op, buf, len) \
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_STRUC_wmi_set_multiple_mcast_filter_cmd_fixed_param, WMI_SET_MULTIPLE_MCAST_FILTER_CMD_fixed_param, fixed_param, WMITLV_SIZE_FIX) \
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_ARRAY_FIXED_STRUC, wmi_mac_addr, mcast_list, WMITLV_SIZE_VAR)
+WMITLV_CREATE_PARAM_STRUC(WMI_SET_MULTIPLE_MCAST_FILTER_CMDID);
+
/* Set dbglog time stamp sync cmd */
#define WMITLV_TABLE_WMI_DBGLOG_TIME_STAMP_SYNC_CMDID(id,op,buf,len) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_dbglog_time_stamp_sync_cmd_fixed_param, WMI_DBGLOG_TIME_STAMP_SYNC_CMD_fixed_param, fixed_param, WMITLV_SIZE_FIX)
@@ -1993,6 +2104,30 @@
WMITLV_CREATE_PARAM_STRUC(WMI_DFS_PHYERR_FILTER_DIS_CMDID);
+/* DFS phyerr processing offload enable cmd */
+#define WMITLV_TABLE_WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_pdev_dfs_phyerr_offload_enable_cmd_fixed_param, wmi_pdev_dfs_phyerr_offload_enable_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+
+WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
+
+/* DFS phyerr processing offload disble cmd */
+#define WMITLV_TABLE_WMI_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_pdev_dfs_phyerr_offload_disable_cmd_fixed_param, wmi_pdev_dfs_phyerr_offload_disable_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+
+WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMDID);
+
+/* set ADFS channel config cmd */
+#define WMITLV_TABLE_WMI_VDEV_ADFS_CH_CFG_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_vdev_adfs_ch_cfg_cmd_fixed_param, wmi_vdev_adfs_ch_cfg_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+
+WMITLV_CREATE_PARAM_STRUC(WMI_VDEV_ADFS_CH_CFG_CMDID);
+
+/* DFS abort ADFS ocac currently in progress */
+#define WMITLV_TABLE_WMI_VDEV_ADFS_OCAC_ABORT_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_vdev_adfs_ocac_abort_cmd_fixed_param, wmi_vdev_adfs_ocac_abort_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+
+WMITLV_CREATE_PARAM_STRUC(WMI_VDEV_ADFS_OCAC_ABORT_CMDID);
+
/* WOW Add Wake Pattern Cmd */
#define WMITLV_TABLE_WMI_WOW_ADD_WAKE_PATTERN_CMDID(id,op,buf,len) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_WMI_WOW_ADD_PATTERN_CMD_fixed_param, WMI_WOW_ADD_PATTERN_CMD_fixed_param, fixed_param, WMITLV_SIZE_FIX) \
@@ -2135,7 +2270,8 @@
WMITLV_CREATE_PARAM_STRUC(WMI_SCAN_UPDATE_REQUEST_CMDID);
#define WMITLV_TABLE_WMI_SCAN_PROB_REQ_OUI_CMDID(id,op,buf,len) \
- WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_scan_prob_req_oui_cmd_fixed_param, wmi_scan_prob_req_oui_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_scan_prob_req_oui_cmd_fixed_param, wmi_scan_prob_req_oui_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_vendor_oui, vendor_oui, WMITLV_SIZE_VAR)
WMITLV_CREATE_PARAM_STRUC(WMI_SCAN_PROB_REQ_OUI_CMDID);
@@ -2297,6 +2433,12 @@
WMITLV_CREATE_PARAM_STRUC(WMI_PEER_INFO_REQ_CMDID);
+#define WMITLV_TABLE_WMI_PEER_ANTDIV_INFO_REQ_CMDID(id, op, buf, len) \
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_STRUC_wmi_peer_antdiv_info_req_cmd_fixed_param, \
+ wmi_peer_antdiv_info_req_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+
+WMITLV_CREATE_PARAM_STRUC(WMI_PEER_ANTDIV_INFO_REQ_CMDID);
+
#define WMITLV_TABLE_WMI_RMC_SET_MODE_CMDID(id,op,buf,len) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_rmc_set_mode_cmd_fixed_param, \
wmi_rmc_set_mode_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
@@ -2413,7 +2555,8 @@
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_ndp_initiator_req_fixed_param, wmi_ndp_initiator_req_fixed_param_PROTOTYPE, fixed_param, WMITLV_SIZE_FIX) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_channel, wmi_channel, channel, WMITLV_SIZE_FIX) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, ndp_cfg, WMITLV_SIZE_VAR) \
- WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, ndp_app_info, WMITLV_SIZE_VAR)
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, ndp_app_info, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, ndp_pmk, WMITLV_SIZE_VAR)
WMITLV_CREATE_PARAM_STRUC(WMI_NDP_INITIATOR_REQ_CMDID);
/** NAN Data Responder Request Cmd
@@ -2425,7 +2568,8 @@
#define WMITLV_TABLE_WMI_NDP_RESPONDER_REQ_CMDID(id,op,buf,len) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_ndp_responder_req_fixed_param, wmi_ndp_responder_req_fixed_param_PROTOTYPE, fixed_param, WMITLV_SIZE_FIX) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, ndp_cfg, WMITLV_SIZE_VAR) \
- WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, ndp_app_info, WMITLV_SIZE_VAR)
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, ndp_app_info, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, ndp_pmk, WMITLV_SIZE_VAR)
WMITLV_CREATE_PARAM_STRUC(WMI_NDP_RESPONDER_REQ_CMDID);
/** NAN Data End Request Cmd
@@ -2439,11 +2583,22 @@
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_ndp_end_req_PROTOTYPE, ndp_end_req_list, WMITLV_SIZE_VAR)
WMITLV_CREATE_PARAM_STRUC(WMI_NDP_END_REQ_CMDID);
+/* RCPI Info Request Cmd */
+#define WMITLV_TABLE_WMI_REQUEST_RCPI_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_request_rcpi_cmd_fixed_param, wmi_request_rcpi_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_REQUEST_RCPI_CMDID);
+
/* Modem power state cmd */
#define WMITLV_TABLE_WMI_MODEM_POWER_STATE_CMDID(id,op,buf,len) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_modem_power_state_cmd_param, wmi_modem_power_state_cmd_param, fixed_param, WMITLV_SIZE_FIX)
WMITLV_CREATE_PARAM_STRUC(WMI_MODEM_POWER_STATE_CMDID);
+/* SAR limit update cmd */
+#define WMITLV_TABLE_WMI_SAR_LIMITS_CMDID(id, op, buf, len) \
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_STRUC_wmi_sar_limits_cmd_fixed_param, wmi_sar_limits_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX) \
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_ARRAY_STRUC, wmi_sar_limit_cmd_row, sar_limits, WMITLV_SIZE_VAR)
+WMITLV_CREATE_PARAM_STRUC(WMI_SAR_LIMITS_CMDID);
+
/* get estimated link speed cmd */
#define WMITLV_TABLE_WMI_PEER_GET_ESTIMATED_LINKSPEED_CMDID(id,op,buf,len) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_peer_get_estimated_linkspeed_cmd_fixed_param, wmi_peer_get_estimated_linkspeed_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
@@ -2503,6 +2658,17 @@
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_pdev_get_temperature_cmd_fixed_param, wmi_pdev_get_temperature_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_GET_TEMPERATURE_CMDID);
+/* Pdev get ANT DIV feature status Cmd */
+#define WMITLV_TABLE_WMI_PDEV_GET_ANTDIV_STATUS_CMDID(id, op, buf, len) \
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_STRUC_wmi_pdev_get_antdiv_status_cmd_fixed_param, wmi_pdev_get_antdiv_status_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_GET_ANTDIV_STATUS_CMDID);
+
+/* DISA feature : vdev encrypt decrypt request */
+#define WMITLV_TABLE_WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_vdev_encrypt_decrypt_data_req_cmd_fixed_param, wmi_vdev_encrypt_decrypt_data_req_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, data, WMITLV_SIZE_VAR)
+WMITLV_CREATE_PARAM_STRUC(WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID);
+
/* Set antenna diversity Cmd */
#define WMITLV_TABLE_WMI_SET_ANTENNA_DIVERSITY_CMDID(id,op,buf,len) \
WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_STRUC_wmi_pdev_set_antenna_diversity_cmd_fixed_param, wmi_pdev_set_antenna_diversity_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
@@ -2698,6 +2864,10 @@
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, data, WMITLV_SIZE_VAR)
WMITLV_CREATE_PARAM_STRUC(WMI_TRANSFER_DATA_TO_FLASH_CMDID);
+#define WMITLV_TABLE_WMI_READ_DATA_FROM_FLASH_CMDID(id, op, buf, len) \
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_STRUC_wmi_read_data_from_flash_cmd_fixed_param, wmi_read_data_from_flash_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_READ_DATA_FROM_FLASH_CMDID);
+
#define WMITLV_TABLE_WMI_CONFIG_ENHANCED_MCAST_FILTER_CMDID(id,op,buf,len) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_config_enhanced_mcast_filter_fixed_param, wmi_config_enhanced_mcast_filter_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
WMITLV_CREATE_PARAM_STRUC(WMI_CONFIG_ENHANCED_MCAST_FILTER_CMDID);
@@ -2731,6 +2901,11 @@
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_WMI_COEX_CONFIG_CMD_fixed_param, WMI_COEX_CONFIG_CMD_fixed_param, fixed_param, WMITLV_SIZE_FIX)
WMITLV_CREATE_PARAM_STRUC(WMI_COEX_CONFIG_CMDID);
+/* Coex get antenna isolation cmd */
+#define WMITLV_TABLE_WMI_COEX_GET_ANTENNA_ISOLATION_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_coex_get_antenna_isolation_cmd_fixed_param, wmi_coex_get_antenna_isolation_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_COEX_GET_ANTENNA_ISOLATION_CMDID);
+
/* bpf offload capability get cmd */
#define WMITLV_TABLE_WMI_BPF_GET_CAPABILITY_CMDID(id,op,buf,len) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_bpf_get_capability_cmd_fixed_param, wmi_bpf_get_capability_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
@@ -2876,12 +3051,61 @@
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, args, WMITLV_SIZE_VAR)
WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_WAL_POWER_DEBUG_CMDID);
+/* pdev set reorder timeout val */
+#define WMITLV_TABLE_WMI_PDEV_SET_REORDER_TIMEOUT_VAL_CMDID(id, op, buf, len) \
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_STRUC_wmi_pdev_set_reorder_timeout_val_cmd_fixed_param, wmi_pdev_set_reorder_timeout_val_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_SET_REORDER_TIMEOUT_VAL_CMDID);
+
+/* peer set rx blocksize cmd */
+#define WMITLV_TABLE_WMI_PEER_SET_RX_BLOCKSIZE_CMDID(id, op, buf, len) \
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_STRUC_wmi_peer_set_rx_blocksize_cmd_fixed_param, wmi_peer_set_rx_blocksize_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_PEER_SET_RX_BLOCKSIZE_CMDID);
+
/* Bandwidth Fairness (BWF) peer configure commands */
#define WMITLV_TABLE_WMI_PEER_BWF_REQUEST_CMDID(id,op,buf,len) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_peer_bwf_request_fixed_param, wmi_peer_bwf_request_fixed_param, fixed_param, WMITLV_SIZE_FIX) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_bwf_peer_info, peer_info, WMITLV_SIZE_VAR)
WMITLV_CREATE_PARAM_STRUC(WMI_PEER_BWF_REQUEST_CMDID);
+/* peer reorder queue setup cmd */
+#define WMITLV_TABLE_WMI_PEER_REORDER_QUEUE_SETUP_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_peer_reorder_queue_setup_cmd_fixed_param, wmi_peer_reorder_queue_setup_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
+
+/* peer reorder queue remove cmd */
+#define WMITLV_TABLE_WMI_PEER_REORDER_QUEUE_REMOVE_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_peer_reorder_queue_remove_cmd_fixed_param, wmi_peer_reorder_queue_remove_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_PEER_REORDER_QUEUE_REMOVE_CMDID);
+
+/* Filter in monitor mode paramters Cmd */
+#define WMITLV_TABLE_WMI_MNT_FILTER_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_mnt_filter_cmd_fixed_param, wmi_mnt_filter_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_MNT_FILTER_CMDID);
+
+/* WLAN GET Chip power Stats*/
+#define WMITLV_TABLE_WMI_PDEV_GET_CHIP_POWER_STATS_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_get_chip_power_stats_cmd_fixed_param, wmi_pdev_get_chip_power_stats_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_GET_CHIP_POWER_STATS_CMDID);
+
+/* pdev set stats threshold cmd*/
+#define WMITLV_TABLE_WMI_PDEV_SET_STATS_THRESHOLD_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_pdev_set_stats_threshold_cmd_fixed_param, wmi_pdev_set_stats_threshold_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_chan_cca_stats_thresh, wmi_chan_cca_stats_thresh, cca_thresh, WMITLV_SIZE_FIX) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_peer_signal_stats_thresh, wmi_peer_signal_stats_thresh, signal_thresh, WMITLV_SIZE_FIX) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_tx_stats_thresh, wmi_tx_stats_thresh, tx_thresh, WMITLV_SIZE_FIX) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_rx_stats_thresh, wmi_rx_stats_thresh, rx_thresh, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_SET_STATS_THRESHOLD_CMDID);
+
+/* Request wlan stats cmd */
+#define WMITLV_TABLE_WMI_REQUEST_WLAN_STATS_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_request_wlan_stats_cmd_fixed_param, wmi_request_wlan_stats_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_REQUEST_WLAN_STATS_CMDID);
+
+/* Request peer stats info cmd */
+#define WMITLV_TABLE_WMI_REQUEST_PEER_STATS_INFO_CMDID(id, op, buf, len) \
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_STRUC_wmi_request_peer_stats_info_cmd_fixed_param, wmi_request_peer_stats_info_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_REQUEST_PEER_STATS_INFO_CMDID);
+
/************************** TLV definitions of WMI events *******************************/
/* Service Ready event */
@@ -2893,6 +3117,11 @@
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, wlan_dbs_hw_mode_list, WMITLV_SIZE_VAR)
WMITLV_CREATE_PARAM_STRUC(WMI_SERVICE_READY_EVENTID);
+/* service available event */
+#define WMITLV_TABLE_WMI_SERVICE_AVAILABLE_EVENTID(id, op, buf, len) \
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_STRUC_wmi_service_available_event_fixed_param, wmi_service_available_event_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_SERVICE_AVAILABLE_EVENTID);
+
/* Service Ready Extension event */
#define WMITLV_TABLE_WMI_SERVICE_READY_EXT_EVENTID(id,op,buf,len) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_service_ready_ext_event_fixed_param, wmi_service_ready_ext_event_fixed_param, fixed_param, WMITLV_SIZE_FIX) \
@@ -3004,6 +3233,13 @@
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_mgmt_tx_compl_event_fixed_param, wmi_mgmt_tx_compl_event_fixed_param, fixed_param, WMITLV_SIZE_FIX)
WMITLV_CREATE_PARAM_STRUC(WMI_MGMT_TX_COMPLETION_EVENTID);
+/* Bundled Mgmt TX completion event */
+#define WMITLV_TABLE_WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID(id, op, buf, len) \
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_STRUC_wmi_mgmt_tx_compl_bundle_event_fixed_param, wmi_mgmt_tx_compl_bundle_event_fixed_param, fixed_param, WMITLV_SIZE_FIX) \
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, desc_ids, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, status, WMITLV_SIZE_VAR)
+WMITLV_CREATE_PARAM_STRUC(WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID);
+
/* VDEV Start response Event */
#define WMITLV_TABLE_WMI_VDEV_START_RESP_EVENTID(id,op,buf,len) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_vdev_start_response_event_fixed_param, wmi_vdev_start_response_event_fixed_param, fixed_param, WMITLV_SIZE_FIX)
@@ -3076,7 +3312,9 @@
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_channel, wmi_channel, chan, WMITLV_SIZE_FIX) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_key_material, key, WMITLV_SIZE_VAR) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, status, WMITLV_SIZE_VAR) \
- WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, reassoc_req_frame, WMITLV_SIZE_VAR)
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, reassoc_req_frame, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_ARRAY_STRUC, wmi_pdev_hw_mode_transition_event_fixed_param, hw_mode_transition_fixed_param, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_ARRAY_STRUC, wmi_pdev_set_hw_mode_response_vdev_mac_entry, wmi_pdev_set_hw_mode_response_vdev_mac_mapping, WMITLV_SIZE_VAR)
WMITLV_CREATE_PARAM_STRUC(WMI_ROAM_SYNCH_EVENTID);
/* WOW Wakeup Host Event */
@@ -3147,6 +3385,11 @@
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_transfer_data_to_flash_complete_event_fixed_param, wmi_transfer_data_to_flash_complete_event_fixed_param, fixed_param, WMITLV_SIZE_FIX)
WMITLV_CREATE_PARAM_STRUC(WMI_TRANSFER_DATA_TO_FLASH_COMPLETE_EVENTID);
+#define WMITLV_TABLE_WMI_READ_DATA_FROM_FLASH_EVENTID(id, op, buf, len) \
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_STRUC_wmi_read_data_from_flash_event_fixed_param, wmi_read_data_from_flash_event_fixed_param, fixed_param, WMITLV_SIZE_FIX) \
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, data, WMITLV_SIZE_VAR)
+WMITLV_CREATE_PARAM_STRUC(WMI_READ_DATA_FROM_FLASH_EVENTID);
+
/* Diagnostics Event */
#define WMITLV_TABLE_WMI_DIAG_EVENTID(id,op,buf,len)\
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, bufp, WMITLV_SIZE_VAR)
@@ -3234,6 +3477,11 @@
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_vdev_rate_ht_info, ht_info, WMITLV_SIZE_VAR)
WMITLV_CREATE_PARAM_STRUC(WMI_UPDATE_VDEV_RATE_STATS_EVENTID);
+/* report rx aggregation failure information */
+#define WMITLV_TABLE_WMI_REPORT_RX_AGGR_FAILURE_EVENTID(id,op,buf,len)\
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_rx_aggr_failure_event_fixed_param, wmi_rx_aggr_failure_event_fixed_param, fixed_param, WMITLV_SIZE_FIX) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_rx_aggr_failure_info, failure_info, WMITLV_SIZE_VAR)
+WMITLV_CREATE_PARAM_STRUC(WMI_REPORT_RX_AGGR_FAILURE_EVENTID);
/* Update memory dump complete Event */
#define WMITLV_TABLE_WMI_UPDATE_FW_MEM_DUMP_EVENTID(id,op,buf,len)\
@@ -3367,6 +3615,11 @@
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_peer_info, peer_info, WMITLV_SIZE_VAR)
WMITLV_CREATE_PARAM_STRUC(WMI_PEER_INFO_EVENTID);
+#define WMITLV_TABLE_WMI_PEER_ANTDIV_INFO_EVENTID(id, op, buf, len) \
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_STRUC_wmi_peer_antdiv_info_event_fixed_param, wmi_peer_antdiv_info_event_fixed_param, fixed_param, WMITLV_SIZE_FIX) \
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_ARRAY_STRUC, wmi_peer_antdiv_info, peer_info, WMITLV_SIZE_VAR)
+WMITLV_CREATE_PARAM_STRUC(WMI_PEER_ANTDIV_INFO_EVENTID);
+
#define WMITLV_TABLE_WMI_PEER_TX_FAIL_CNT_THR_EVENTID(id,op,buf,len) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_peer_tx_fail_cnt_thr_event_fixed_param, wmi_peer_tx_fail_cnt_thr_event_fixed_param, fixed_param, WMITLV_SIZE_FIX)
WMITLV_CREATE_PARAM_STRUC(WMI_PEER_TX_FAIL_CNT_THR_EVENTID);
@@ -3376,6 +3629,18 @@
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_dfs_radar_event_fixed_param, wmi_dfs_radar_event_fixed_param, fixed_param, WMITLV_SIZE_FIX)
WMITLV_CREATE_PARAM_STRUC(WMI_DFS_RADAR_EVENTID);
+#define WMITLV_TABLE_WMI_PDEV_DFS_RADAR_DETECTION_EVENTID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_pdev_dfs_radar_detection_event_fixed_param, wmi_pdev_dfs_radar_detection_event_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_DFS_RADAR_DETECTION_EVENTID);
+
+#define WMITLV_TABLE_WMI_VDEV_ADFS_OCAC_COMPLETE_EVENTID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_vdev_adfs_ocac_complete_event_fixed_param, wmi_vdev_adfs_ocac_complete_event_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_VDEV_ADFS_OCAC_COMPLETE_EVENTID);
+
+#define WMITLV_TABLE_WMI_VDEV_DFS_CAC_COMPLETE_EVENTID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_vdev_dfs_cac_complete_event_fixed_param, wmi_vdev_dfs_cac_complete_event_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_VDEV_DFS_CAC_COMPLETE_EVENTID);
+
/* Thermal Event */
#define WMITLV_TABLE_WMI_THERMAL_MGMT_EVENTID(id,op,buf,len) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_thermal_mgmt_event_fixed_param, wmi_thermal_mgmt_event_fixed_param, fixed_param, WMITLV_SIZE_FIX)
@@ -3407,6 +3672,11 @@
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_nan_joined_cluster_event_fixed_param, wmi_nan_joined_cluster_event_fixed_param_PROTOTYPE, fixed_param, WMITLV_SIZE_FIX)
WMITLV_CREATE_PARAM_STRUC(WMI_NAN_JOINED_CLUSTER_EVENTID);
+/* Coex report antenna isolation event */
+#define WMITLV_TABLE_WMI_COEX_REPORT_ANTENNA_ISOLATION_EVENTID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_coex_report_isolation_event_fixed_param, wmi_coex_report_isolation_event_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_COEX_REPORT_ANTENNA_ISOLATION_EVENTID);
+
/* NDP capabilities response event */
#define WMITLV_TABLE_WMI_NDI_CAP_RSP_EVENTID(id,op,buf,len) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_ndi_cap_rsp_event_fixed_param, wmi_ndi_cap_rsp_event_fixed_param_PROTOTYPE, fixed_param, WMITLV_SIZE_FIX)
@@ -3445,7 +3715,8 @@
#define WMITLV_TABLE_WMI_NDP_INDICATION_EVENTID(id,op,buf,len) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_ndp_indication_event_fixed_param, wmi_ndp_indication_event_fixed_param_PROTOTYPE, fixed_param, WMITLV_SIZE_FIX) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, ndp_cfg, WMITLV_SIZE_VAR) \
- WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, ndp_app_info, WMITLV_SIZE_VAR)
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, ndp_app_info, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, ndp_scid, WMITLV_SIZE_VAR)
WMITLV_CREATE_PARAM_STRUC(WMI_NDP_INDICATION_EVENTID);
/** NDP confirm event
@@ -3470,6 +3741,11 @@
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_ndp_end_indication_PROTOTYPE, ndp_end_indication_list, WMITLV_SIZE_VAR)
WMITLV_CREATE_PARAM_STRUC(WMI_NDP_END_INDICATION_EVENTID);
+/* Update RCPI Info Event */
+#define WMITLV_TABLE_WMI_UPDATE_RCPI_EVENTID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_update_rcpi_event_fixed_param, wmi_update_rcpi_event_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_UPDATE_RCPI_EVENTID);
+
/* L1SS track Event */
#define WMITLV_TABLE_WMI_PDEV_L1SS_TRACK_EVENTID(id,op,buf,len) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_pdev_l1ss_track_event_fixed_param, wmi_pdev_l1ss_track_event_fixed_param, fixed_param, WMITLV_SIZE_FIX)
@@ -3524,6 +3800,11 @@
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_pdev_temperature_event_fixed_param, wmi_pdev_temperature_event_fixed_param, fixed_param, WMITLV_SIZE_FIX)
WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_TEMPERATURE_EVENTID);
+/* Pdev get ANT DIV feature status event */
+#define WMITLV_TABLE_WMI_PDEV_ANTDIV_STATUS_EVENTID(id, op, buf, len) \
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_STRUC_wmi_pdev_antdiv_status_event_fixed_param, wmi_pdev_antdiv_status_event_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_ANTDIV_STATUS_EVENTID);
+
/* mDNS offload stats event */
#define WMITLV_TABLE_WMI_MDNS_STATS_EVENTID(id,op,buf,len) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_mdns_stats_event_fixed_param, wmi_mdns_stats_event_fixed_param, fixed_param, WMITLV_SIZE_FIX)
@@ -3654,7 +3935,7 @@
/* FIPS event */
#define WMITLV_TABLE_WMI_PDEV_FIPS_EVENTID(id,op,buf,len) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_pdev_fips_event_fixed_param, wmi_pdev_fips_event_fixed_param, fixed_param, WMITLV_SIZE_FIX) \
- WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, data, WMITLV_SIZE_VAR)
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, data, WMITLV_SIZE_VAR)
WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_FIPS_EVENTID);
#define WMITLV_TABLE_WMI_PDEV_CHANNEL_HOPPING_EVENTID(id,op,buf,len) \
@@ -3665,6 +3946,11 @@
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_ani_cck_event_fixed_param, wmi_ani_cck_event_fixed_param, fixed_param, WMITLV_SIZE_FIX)
WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_ANI_CCK_LEVEL_EVENTID);
+#define WMITLV_TABLE_WMI_PDEV_CHIP_POWER_STATS_EVENTID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_pdev_chip_power_stats_event_fixed_param, wmi_pdev_chip_power_stats_event_fixed_param, fixed_param, WMITLV_SIZE_FIX) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, debug_registers, WMITLV_SIZE_VAR)
+WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_CHIP_POWER_STATS_EVENTID);
+
#define WMITLV_TABLE_WMI_PDEV_ANI_OFDM_LEVEL_EVENTID(id,op,buf,len) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_ani_ofdm_event_fixed_param, wmi_ani_ofdm_event_fixed_param, fixed_param, WMITLV_SIZE_FIX)
WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_ANI_OFDM_LEVEL_EVENTID);
@@ -3704,6 +3990,149 @@
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, tx_time_per_power_level, WMITLV_SIZE_VAR)
WMITLV_CREATE_PARAM_STRUC(WMI_RADIO_TX_POWER_LEVEL_STATS_EVENTID);
+/* Layout of WMI_REPORT_STATS_EVENTID message:
+ * fixed_param;
+ * wmi_chan_cca_stats chan_cca_stats[]; Array length is specified by num_chan_cca_stats
+ * wmi_peer_signal_stats peer_signal_stats[]; Array length is specified by num_peer_signal_stats
+ * wmi_peer_ac_tx_stats peer_ac_tx_stats[]; Array length is specified by num_peer_ac_tx_stats
+ * wmi_tx_stats tx_stats[][]; Array length is num_peer_ac_tx_stats * WLAN_MAX_AC, array index is (peer_index * WLAN_MAX_AC + ac_index)
+ * A_UINT32 tx_mpdu_aggr[][][]; Array length is num_peer_ac_tx_stats * WLAN_MAX_AC * tx_mpdu_aggr_array_len,
+ * array index is (peer_index * WLAN_MAX_AC + ac_index) * tx_mpdu_aggr_array_len + A-MPDU size index
+ * Contains a histogram of how many A-MPDUs of a given size (i.e. number of MPDUs) have been transmitted.
+ * Element 0 contains the number of PPDUs with a single-MPDU A-MPDU.
+ * Element 1 contains the number of PPDUs with 2 MPDUs.
+ * Element 2 contains the number of PPDUs with 3 MPDUs.
+ * Element tx_mpdu_aggr_array_len-1 contains the number of PPDUs with >= tx_mpdu_aggr_array_len MPDUs.
+ * A_UINT32 tx_succ_mcs[][][]; Array length is num_peer_ac_tx_stats * WLAN_MAX_AC * tx_succ_mcs_array_len,
+ * array index is (peer_index * WLAN_MAX_AC + ac_index) * tx_succ_mcs_array_len + MCS index
+ * Contains a count of how many tx PPDUs have been acked for each MCS of each AC of each peer.
+ * A_UINT32 tx_fail_mcs[][][]; Array length is num_peer_ac_tx_stats * WLAN_MAX_AC * tx_fail_mcs_array_len,
+ * array index is (peer_index * WLAN_MAX_AC + ac_index) * tx_fail_mcs_array_len + MCS index
+ * Contains a count of how many PPDUs failed tx due to no ack for each MCS of each AC of each peer.
+ * A_UINT32 tx_ppdu_delay[][][]; Array length is num_peer_ac_tx_stats * WLAN_MAX_AC * tx_ppdu_delay_array_len,
+ * array index is (peer_index * WLAN_MAX_AC + ac_index) * tx_ppdu_delay_array_len + delay index
+ * Contains a histogram of how many PPDUs encountered each level of delay due to retries or air interface contention.
+ * The time represented by each array element (i.e. histogram bin) is specified by tx_ppdu_delay_bin_size_ms.
+ * Element 0 contains the count of PPDUs delayed by less than tx_ppdu_delay_bin_size_ms.
+ * Element 1 contains the count of PPDUs delayed by more than 1x tx_ppdu_delay_bin_size_ms but less than 2x.
+ * Element tx_delay_array_len-1 contains the count of PPDUs delayed by
+ * >= tx_ppdu_delay_bin_size_ms * (tx_ppdu_delay_array_len-1)
+ * wmi_peer_ac_rx_stats peer_ac_rx_stats[]; Array length is specified by num_peer_ac_rx_stats
+ * wmi_rx_stats rx_stats[][]; Array length is num_peer_ac_rx_stats * WLAN_MAX_AC, array index is (peer_index * WLAN_MAX_AC + ac_index)
+ * A_UINT32 rx_mpdu_aggr[][][]; Array length is num_peer_ac_rx_stats * WLAN_MAX_AC * rx_mpdu_aggr_array_len,
+ * array index is (peer_index * WLAN_MAX_AC + ac_index) * rx_mpdu_aggr_array_len + A-MPDU size index
+ * Contains a histogram of how many A-MPDUs of a given size (i.e. number of MPDUs) have been received.
+ * Element 0 contains the number of PPDUs with a single MPDU.
+ * Element 1 contains the number of PPDUs with 2 MPDUs.
+ * Element 2 contains the number of PPDUs with 3 MPDUs.
+ * Element rx_mpdu_aggr_array_len-1 contains the number of PPDUs with >= rx_mpdu_aggr_array_len MPDUs.
+ * A_UINT32 rx_mcs[][][]; Array length is (num_peer_ac_rx_stats * WLAN_MAX_AC) * rx_mcs_array_len,
+ * array index is (peer_index * WLAN_MAX_AC + ac_index) * rx_mcs_array_len + MCS index
+ * Contains a count of rx PPDUs for each MCS of each AC of each peer.
+ * For example, if there were 2 peers (X and Y) whose stats were being reported,
+ * the message and its TLV arrays would look like this:
+ * 1. fixed_param
+ * 2. chan_cca_stats[0]
+ * 3. peer_signal_stats[0] for peer X
+ * 4. peer_signal_stats[1] for peer Y
+ * 5. peer_ac_tx_stats[0] for X
+ * 6. peer_ac_tx_stats[1] for Y
+ * 7. tx_stats[0][0] for peer X, AC 0
+ * 8. tx_stats[0][1] for peer X, AC 1
+ * 9. tx_stats[0][2] for peer X, AC 2
+ * 10. tx_stats[0][3] for peer X, AC 3
+ * 11. tx_stats[1][0] for peer Y, AC 0
+ * 12 tx_stats[1][1] for peer Y, AC 1
+ * 13. tx_stats[1][2] for peer Y, AC 2
+ * 14. tx_stats[1][3] for peer Y, AC 3
+ * 15. tx_mpdu_aggr[0][0][] for peer X, AC 0
+ * 16. tx_mpdu_aggr[0][1][] for peer X, AC 1
+ * 17. tx_mpdu_aggr[0][2][] for peer X, AC 2
+ * 18. tx_mpdu_aggr[0][3][] for peer X, AC 3
+ * 19. tx_mpdu_aggr[1][0][] for peer Y, AC 0
+ * 20. tx_mpdu_aggr[1][1][] for peer Y, AC 1
+ * 21. tx_mpdu_aggr[1][2][] for peer Y, AC 2
+ * 22. tx_mpdu_aggr[1][3][] for peer Y, AC 3
+ * 23. tx_succ_mcs[0][0][] for peer X, AC 0
+ * 24. tx_succ_mcs[0][1][] for peer X, AC 1
+ * 25. tx_succ_mcs[0][2][] for peer X, AC 2
+ * 26. tx_succ_mcs[0][3][] for peer X, AC 3
+ * 27. tx_succ_mcs[1][0][] for peer Y, AC 0
+ * 28. tx_succ_mcs[1][1][] for peer Y, AC 1
+ * 29. tx_succ_mcs[1][2][] for peer Y, AC 2
+ * 30. tx_succ_mcs[1][3][] for peer Y, AC 3
+ * 31. tx_fail_mcs[0][0][] for peer X, AC 0
+ * 32. tx_fail_mcs[0][1][] for peer X, AC 1
+ * 33. tx_fail_mcs[0][2][] for peer X, AC 2
+ * 34. tx_fail_mcs[0][3][] for peer X, AC 3
+ * 35. tx_fail_mcs[1][0][] for peer Y, AC 0
+ * 36. tx_fail_mcs[1][1][] for peer Y, AC 1
+ * 37. tx_fail_mcs[1][2][] for peer Y, AC 2
+ * 38. tx_fail_mcs[1][3][] for peer Y, AC 3
+ * 39. tx_ppdu_delay[0][0][] for peer X, AC 0
+ * 40. tx_ppdu_delay[0][1][] for peer X, AC 1
+ * 41. tx_ppdu_delay[0][2][] for peer X, AC 2
+ * 42. tx_ppdu_delay[0][3][] for peer X, AC 3
+ * 43. tx_ppdu_delay[1][0][] for peer Y, AC 0
+ * 44. tx_ppdu_delay[1][1][] for peer Y, AC 1
+ * 45. tx_ppdu_delay[1][2][] for peer Y, AC 2
+ * 46. tx_ppdu_delay[1][3][] for peer Y, AC 3
+ * 47. peer_ac_rx_stats[0] for X
+ * 48. peer_ac_rx_stats[1] for Y
+ * 49. rx_stats[0][0] for peer X, AC 0
+ * 50. rx_stats[0][1] for peer X, AC 1
+ * 51. rx_stats[0][2] for peer X, AC 2
+ * 52. rx_stats[0][3] for peer X, AC 3
+ * 53. rx_stats[1][0] for peer Y, AC 0
+ * 54. rx_stats[1][1] for peer Y, AC 1
+ * 55. rx_stats[1][2] for peer Y, AC 2
+ * 56. rx_stats[1][3] for peer Y, AC 3
+ * 57. rx_mpdu_aggr[0][0][] for peer X, AC 0
+ * 58. rx_mpdu_aggr[0][1][] for peer X, AC 1
+ * 59. rx_mpdu_aggr[0][2][] for peer X, AC 2
+ * 60. rx_mpdu_aggr[0][3][] for peer X, AC 3
+ * 61. rx_mpdu_aggr[1][0][] for peer Y, AC 0
+ * 62. rx_mpdu_aggr[1][1][] for peer Y, AC 1
+ * 63. rx_mpdu_aggr[1][2][] for peer Y, AC 2
+ * 64. rx_mpdu_aggr[1][3][] for peer Y, AC 3
+ * 65. rx_mcs[0][0][] for peer X, AC 0
+ * 66. rx_mcs[0][1][] for peer X, AC 1
+ * 67. rx_mcs[0][2][] for peer X, AC 2
+ * 68. rx_mcs[0][3][] for peer X, AC 3
+ * 69. rx_mcs[1][0][] for peer Y, AC 0
+ * 70. rx_mcs[1][1][] for peer Y, AC 1
+ * 71. rx_mcs[1][2][] for peer Y, AC 2
+ * 72. rx_mcs[1][3][] for peer Y, AC 3
+ **/
+#define WMITLV_TABLE_WMI_REPORT_STATS_EVENTID(id, op, buf, len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_report_stats_event_fixed_param, wmi_report_stats_event_fixed_param, fixed_param, WMITLV_SIZE_FIX) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_chan_cca_stats, chan_cca_stats, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_peer_signal_stats, peer_signal_stats, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_peer_ac_tx_stats, peer_ac_tx_stats, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_tx_stats, tx_stats, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, tx_mpdu_aggr, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, tx_succ_mcs, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, tx_fail_mcs, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, tx_ppdu_delay, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_peer_ac_rx_stats, peer_ac_rx_stats, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_rx_stats, rx_stats, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, rx_mpdu_aggr, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, rx_mcs, WMITLV_SIZE_VAR)
+WMITLV_CREATE_PARAM_STRUC(WMI_REPORT_STATS_EVENTID);
+
+#define WMITLV_TABLE_WMI_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENTID(id, op, buf, len) \
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_STRUC_wmi_vdev_encrypt_decrypt_data_resp_event_fixed_param, wmi_vdev_encrypt_decrypt_data_resp_event_fixed_param, fixed_param, WMITLV_SIZE_FIX) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, enc80211_frame, WMITLV_SIZE_VAR)
+WMITLV_CREATE_PARAM_STRUC(WMI_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENTID);
+
+#define WMITLV_TABLE_WMI_PEER_STATS_INFO_EVENTID(id, op, buf, len) \
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_STRUC_wmi_peer_stats_info_event_fixed_param, wmi_peer_stats_info_event_fixed_param, fixed_param, WMITLV_SIZE_FIX) \
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_ARRAY_STRUC, wmi_peer_stats_info, peer_stats_info, WMITLV_SIZE_VAR)
+WMITLV_CREATE_PARAM_STRUC(WMI_PEER_STATS_INFO_EVENTID);
+
+#define WMITLV_TABLE_WMI_PKGID_EVENTID(id, op, buf, len) \
+ WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_STRUC_wmi_pkgid_event_fixed_param, wmi_pkgid_event_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_PKGID_EVENTID);
#ifdef __cplusplus
}
diff --git a/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/wmi_unified.h b/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/wmi_unified.h
index db86c4c..6cf2390 100644
--- a/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/wmi_unified.h
+++ b/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/wmi_unified.h
@@ -236,6 +236,7 @@
WMI_GRP_BPF_OFFLOAD, /* 0x36 Berkeley Packet Filter */
WMI_GRP_NAN_DATA, /* 0x37 */
WMI_GRP_PROTOTYPE, /* 0x38 */
+ WMI_GRP_MONITOR, /* 0x39 */
} WMI_GRP_ID;
#define WMI_CMD_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1)
@@ -343,8 +344,18 @@
WMI_SET_PERIODIC_CHANNEL_STATS_CONFIG_CMDID,
/** WMI command for power debug framework */
WMI_PDEV_WAL_POWER_DEBUG_CMDID,
+ /** set per-AC rx reorder timeouts */
+ WMI_PDEV_SET_REORDER_TIMEOUT_VAL_CMDID,
+ /** WMI command for WOW gpio and type */
+ WMI_PDEV_SET_WAKEUP_CONFIG_CMDID,
+ /* Get current ANT's per chain's RSSI info */
+ WMI_PDEV_GET_ANTDIV_STATUS_CMDID,
+ /** WMI command for getting Chip Power Stats */
+ WMI_PDEV_GET_CHIP_POWER_STATS_CMDID,
+ /** set stats reporting thresholds - see WMI_REPORT_STATS_EVENTID */
+ WMI_PDEV_SET_STATS_THRESHOLD_CMDID,
- /* VDEV(virtual device) specific commands */
+ /* VDEV (virtual device) specific commands */
/** vdev create */
WMI_VDEV_CREATE_CMDID=WMI_CMD_GRP_START_ID(WMI_GRP_VDEV),
/** vdev delete */
@@ -392,6 +403,9 @@
/** To set custom aggregation size for per vdev */
WMI_VDEV_SET_CUSTOM_AGGR_SIZE_CMDID,
+ /* DISA feature: Encrypt-decrypt data request */
+ WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID,
+
/* peer specific commands */
/** create a peer */
@@ -436,6 +450,18 @@
WMI_PEER_ATF_REQUEST_CMDID,
/** bandwidth fairness (BWF) peer configuration request command */
WMI_PEER_BWF_REQUEST_CMDID,
+ /** rx reorder queue setup for peer/tid */
+ WMI_PEER_REORDER_QUEUE_SETUP_CMDID,
+ /** rx reorder queue remove for peer/tid */
+ WMI_PEER_REORDER_QUEUE_REMOVE_CMDID,
+ /** specify a limit for rx A-MPDU block size */
+ WMI_PEER_SET_RX_BLOCKSIZE_CMDID,
+ /**
+ * request peer antdiv info from FW. FW shall respond with
+ * PEER_ANTDIV_INFO_EVENTID
+ */
+ WMI_PEER_ANTDIV_INFO_REQ_CMDID,
+
/* beacon/management specific commands */
@@ -492,6 +518,14 @@
WMI_DFS_PHYERR_FILTER_ENA_CMDID,
/** enable DFS phyerr/parse filter offload */
WMI_DFS_PHYERR_FILTER_DIS_CMDID,
+ /** enable DFS phyerr processing offload */
+ WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID,
+ /** disable DFS phyerr processing offload */
+ WMI_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMDID,
+ /** set ADFS channel config */
+ WMI_VDEV_ADFS_CH_CFG_CMDID,
+ /** abort ADFS off-channel-availability-check currently in progress */
+ WMI_VDEV_ADFS_OCAC_ABORT_CMDID,
/* Roaming specific commands */
/** set roam scan mode */
@@ -521,7 +555,7 @@
/** configure thresholds for MAWC */
WMI_ROAM_CONFIGURE_MAWC_CMDID,
/** configure MultiBand Operation(refer WFA MBO spec) parameter */
- WMI_ROAM_SET_MBO_PARAM_CMDID,
+ WMI_ROAM_SET_MBO_PARAM_CMDID, /* DEPRECATED */
/** offload scan specific commands */
/** set offload scan AP profile */
@@ -667,6 +701,16 @@
/** Cmd to configure the verbose level */
WMI_DIAG_EVENT_LOG_CONFIG_CMDID,
+ /** One time request for wlan stats */
+ WMI_REQUEST_WLAN_STATS_CMDID,
+
+ /** Request for getting RCPI of peer */
+ WMI_REQUEST_RCPI_CMDID,
+
+ /** One time request for peer stats info */
+ WMI_REQUEST_PEER_STATS_INFO_CMDID,
+
+
/** ARP OFFLOAD REQUEST*/
WMI_SET_ARP_NS_OFFLOAD_CMDID=WMI_CMD_GRP_START_ID(WMI_GRP_ARP_NS_OFL),
@@ -770,6 +814,10 @@
WMI_VDEV_WISA_CMDID,
/** set debug log time stamp sync up with host */
WMI_DBGLOG_TIME_STAMP_SYNC_CMDID,
+ /** Command for host to set/delete multiple mcast filters */
+ WMI_SET_MULTIPLE_MCAST_FILTER_CMDID,
+ /** upload a requested section of data from firmware flash to host */
+ WMI_READ_DATA_FROM_FLASH_CMDID,
/* GPIO Configuration */
WMI_GPIO_CONFIG_CMDID=WMI_CMD_GRP_START_ID(WMI_GRP_GPIO),
@@ -859,6 +907,8 @@
WMI_CHAN_AVOID_UPDATE_CMDID,
WMI_COEX_CONFIG_CMDID,
WMI_CHAN_AVOID_RPT_ALLOW_CMDID,
+ WMI_COEX_GET_ANTENNA_ISOLATION_CMDID,
+ WMI_SAR_LIMITS_CMDID,
/**
* OBSS scan offload enable/disable commands
@@ -938,6 +988,10 @@
WMI_BPF_GET_VDEV_STATS_CMDID,
WMI_BPF_SET_VDEV_INSTRUCTIONS_CMDID,
WMI_BPF_DEL_VDEV_INSTRUCTIONS_CMDID,
+
+ /** WMI commands related to monitor mode. */
+ WMI_MNT_FILTER_CMDID = WMI_CMD_GRP_START_ID(WMI_GRP_MONITOR),
+
/**
* Nan Data commands
* NDI - NAN Data Interface
@@ -956,6 +1010,13 @@
/** WMI is ready; after this event the wlan subsystem is initialized and can process commands. */
WMI_READY_EVENTID,
+ /**
+ * Specify what WMI services the target supports
+ * (for services beyond what fits in the WMI_SERVICE_READY_EVENT
+ * message's wmi_service_bitmap)
+ */
+ WMI_SERVICE_AVAILABLE_EVENTID,
+
/** Scan specific events */
WMI_SCAN_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_SCAN) ,
@@ -1008,6 +1069,10 @@
WMI_PDEV_SET_HW_MODE_RESP_EVENTID,
WMI_PDEV_HW_MODE_TRANSITION_EVENTID,
WMI_PDEV_SET_MAC_CONFIG_RESP_EVENTID,
+ /** Report ANT DIV feature's status */
+ WMI_PDEV_ANTDIV_STATUS_EVENTID,
+ /** Chip level Power stats */
+ WMI_PDEV_CHIP_POWER_STATS_EVENTID,
/* VDEV specific events */
/** VDEV started event in response to VDEV_START request */
@@ -1030,7 +1095,13 @@
/* FW response to Host for vdev delete cmdid */
WMI_VDEV_DELETE_RESP_EVENTID,
- /* peer specific events */
+ /**
+ * DISA feature: FW response to Host with encrypted/decrypted
+ * 802.11 DISA frame
+ */
+ WMI_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENTID,
+
+ /* peer specific events */
/** FW reauet to kick out the station for reasons like inactivity,lack of response ..etc */
WMI_PEER_STA_KICKOUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_PEER),
@@ -1061,6 +1132,8 @@
WMI_PEER_RATECODE_LIST_EVENTID,
WMI_WDS_PEER_EVENTID,
WMI_PEER_STA_PS_STATECHG_EVENTID,
+ /** Peer Ant Div Info Event with rssi per chain, etc */
+ WMI_PEER_ANTDIV_INFO_EVENTID,
/* beacon/mgmt specific events */
/** RX management frame. the entire frame is carried along with the event. */
@@ -1081,6 +1154,12 @@
WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID,
/** Event for Mgmt TX completion event */
WMI_MGMT_TX_COMPLETION_EVENTID,
+ /** Event for Mgmt TX bundle completion event */
+ WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID,
+ /** vdev_map used in WMI_TBTTOFFSET_UPDATE_EVENTID supports max 32 vdevs
+ * Use this event if number of vdevs > 32.
+ */
+ WMI_TBTTOFFSET_EXT_UPDATE_EVENTID,
/*ADDBA Related WMI Events*/
/** Indication the completion of the prior
@@ -1151,6 +1230,29 @@
/** FW update tx power levels event */
WMI_RADIO_TX_POWER_LEVEL_STATS_EVENTID,
+ /** This event is used to report wlan stats to host.
+ * It is triggered under 3 conditions:
+ * (a) Periodic timer timed out, based on the period specified
+ * by WMI_PDEV_PARAM_STATS_OBSERVATION_PERIOD
+ * (b) Whenever any of the (enabled) stats thresholds specified
+ * in the WMI_PDEV_SET_STATS_THRESHOLD_CMD message is exceeded
+ * within the current stats period.
+ * (c) In response to the one-time wlan stats request of
+ * WMI_REQUEST_WLAN_STATS_CMDID from host.
+ *
+ * If this event is triggered by condition a or b,
+ * the stats counters are cleared at the start of each period.
+ * But if it is triggered by condition c, stats counters won't be cleared.
+ */
+ WMI_REPORT_STATS_EVENTID,
+
+ /** Event indicating RCPI of the peer requested by host in the
+ * WMI_REQUEST_RCPI_CMDID */
+ WMI_UPDATE_RCPI_EVENTID,
+
+ /** This event is used to respond to WMI_REQUEST_PEER_STATS_INFO_CMDID
+ * and report peer stats info to host */
+ WMI_PEER_STATS_INFO_EVENTID,
/* NLO specific events */
/** NLO match event after the first match */
@@ -1176,6 +1278,12 @@
/*chatter query reply event*/
WMI_CHATTER_PC_QUERY_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_CHATTER),
+ /** DFS related events */
+ WMI_PDEV_DFS_RADAR_DETECTION_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_DFS),
+ /** Indicate channel-availability-check completion event to host */
+ WMI_VDEV_DFS_CAC_COMPLETE_EVENTID,
+ /** Indicate off-channel-availability-check completion event to host */
+ WMI_VDEV_ADFS_OCAC_COMPLETE_EVENTID,
/** echo event in response to echo command */
WMI_ECHO_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MISC),
@@ -1241,6 +1349,15 @@
/** event to report SCPC calibrated data to host */
WMI_PDEV_UTF_SCPC_EVENTID,
+ /** event to provide requested data from the target's flash memory */
+ WMI_READ_DATA_FROM_FLASH_EVENTID,
+
+ /** event to report rx aggregation failure frame information */
+ WMI_REPORT_RX_AGGR_FAILURE_EVENTID,
+
+ /** event to upload a PKGID to host to identify chip for various products */
+ WMI_PKGID_EVENTID,
+
/* GPIO Event */
WMI_GPIO_INPUT_EVENTID=WMI_EVT_GRP_START_ID(WMI_GRP_GPIO),
/** upload H_CV info WMI event
@@ -1280,6 +1397,10 @@
WMI_NAN_STARTED_CLUSTER_EVENTID,
WMI_NAN_JOINED_CLUSTER_EVENTID,
+ /* Coex Event */
+ WMI_COEX_REPORT_ANTENNA_ISOLATION_EVENTID =
+ WMI_EVT_GRP_START_ID(WMI_GRP_COEX),
+
/* LPI Event */
WMI_LPI_RESULT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_LPI),
WMI_LPI_STATUS_EVENTID,
@@ -1558,8 +1679,8 @@
};
/** NOTE: This defs cannot be changed in the future without breaking WMI compatibility */
-#define WMI_MAX_NUM_SS 8
-#define WMI_MAX_NUM_RU 4
+#define WMI_MAX_NUM_SS MAX_HE_NSS
+#define WMI_MAX_NUM_RU MAX_HE_RU
/*
* Figure 8 554ae: -PPE Threshold Info field format
@@ -1582,27 +1703,30 @@
* Note that in these macros, "ru" is one-based, not zero-based, while
* nssm1 is zero-based.
*/
-#define WMI_SET_PPET16(ppet16_ppet8_ru3_ru0, ppet, ru, nssm1) \
+#define WMI_SET_PPET16(ppet16_ppet8_ru3_ru0, ru, nssm1, ppet) \
do { \
- ppet16_ppet8_ru3_ru0[nssm1] &= ~(7 << (((ru-1)%4)*6)); \
- ppet16_ppet8_ru3_ru0[nssm1] |= ((ppet&7) << (((ru-1)%4)*6)); \
+ ppet16_ppet8_ru3_ru0[nssm1] &= ~(7 << (((ru-1)&3)*6)); \
+ ppet16_ppet8_ru3_ru0[nssm1] |= ((ppet&7) << (((ru-1)&3)*6)); \
} while (0)
#define WMI_GET_PPET16(ppet16_ppet8_ru3_ru0, ru, nssm1) \
- ((ppet16_ppet8_ru3_ru0[nssm1] >> (((ru-1)%4)*6))&7)
+ ((ppet16_ppet8_ru3_ru0[nssm1] >> (((ru-1)&3)*6))&7)
-#define WMI_SET_PPET8(ppet16_ppet8_ru3_ru0, ppet, ru, nssm1) \
+#define WMI_SET_PPET8(ppet16_ppet8_ru3_ru0, ru, nssm1, ppet) \
do { \
- ppet16_ppet8_ru3_ru0[nssm1] &= ~(7 << (((ru-1)%4)*6+3)); \
- ppet16_ppet8_ru3_ru0[nssm1] |= ((ppet&7) << (((ru-1)%4)*6+3)); \
+ ppet16_ppet8_ru3_ru0[nssm1] &= ~(7 << (((ru-1)&3)*6+3)); \
+ ppet16_ppet8_ru3_ru0[nssm1] |= ((ppet&7) << (((ru-1)&3)*6+3)); \
} while (0)
#define WMI_GET_PPET8(ppet16_ppet8_ru3_ru0, ru, nssm1) \
- ((ppet16_ppet8_ru3_ru0[nssm1] >> (((ru-1)%4)*6+3))&7)
+ ((ppet16_ppet8_ru3_ru0[nssm1] >> (((ru-1)&3)*6+3))&7)
typedef struct _wmi_ppe_threshold {
A_UINT32 numss_m1; /** NSS - 1*/
- A_UINT32 ru_count; /** Max RU count */
+ union {
+ A_UINT32 ru_count; /** RU COUNT OBSOLETE to be removed after few versions */
+ A_UINT32 ru_mask; /** RU index mask */
+ };
A_UINT32 ppet16_ppet8_ru3_ru0[WMI_MAX_NUM_SS]; /** ppet8 and ppet16 for max num ss */
} wmi_ppe_threshold;
@@ -1821,6 +1945,25 @@
*/
} wmi_service_ready_event_fixed_param;
+#define WMI_SERVICE_SEGMENT_BM_SIZE32 4 /* 4x A_UINT32 = 128 bits */
+typedef struct {
+ /**
+ * TLV tag and len; tag equals
+ * WMITLV_TAG_STRUC_wmi_service_available_event_fixed_param
+ */
+ A_UINT32 tlv_header;
+ /**
+ * The wmi_service_segment offset field specifies the position within
+ * the logical bitmap of WMI service flags at which the WMI service
+ * flags specified within this message begin.
+ * Since the first 128 WMI service flags are specified within the
+ * wmi_service_bitmap field of the WMI_SERVICE_READY_EVENT message,
+ * the wmi_service_segment_offset value is expected to be 128 or more.
+ */
+ A_UINT32 wmi_service_segment_offset;
+ A_UINT32 wmi_service_segment_bitmap[WMI_SERVICE_SEGMENT_BM_SIZE32];
+} wmi_service_available_event_fixed_param;
+
typedef struct {
A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_WMI_SERVICE_EXT_READY_EVENT */
/* which WMI_DBS_CONC_SCAN_CFG setting the FW is initialized with */
@@ -1843,6 +1986,12 @@
* Value 0 means FW hasn't given any limit to host.
*/
A_UINT32 max_bssid_rx_filters;
+ /*
+ * Extended FW build version information:
+ * bits 27:0 -> reserved
+ * bits 31:28 -> CRM sub ID
+ */
+ A_UINT32 fw_build_vers_ext;
} wmi_service_ready_ext_event_fixed_param;
typedef enum {
@@ -2236,6 +2385,13 @@
#define WMI_RSRC_CFG_FLAG_QWRAP_MODE_ENABLE_S 8
#define WMI_RSRC_CFG_FLAG_QWRAP_MODE_ENABLE_M 0x100
+ #define WMI_RSRC_CFG_FLAG_MGMT_COMP_EVT_BUNDLE_SUPPORT_S 9
+ #define WMI_RSRC_CFG_FLAG_MGMT_COMP_EVT_BUNDLE_SUPPORT_M 0x200
+
+ #define WMI_RSRC_CFG_FLAG_TX_MSDU_ID_NEW_PARTITION_SUPPORT_S 10
+ #define WMI_RSRC_CFG_FLAG_TX_MSDU_ID_NEW_PARTITION_SUPPORT_M 0x400
+
+
A_UINT32 flag1;
/** @brief smart_ant_cap - Smart Antenna capabilities information
@@ -2340,6 +2496,16 @@
#define WMI_RSRC_CFG_FLAG_QWRAP_MODE_ENABLE_GET(word32) \
WMI_RSRC_CFG_FLAG_GET((word32), QWRAP_MODE_ENABLE)
+#define WMI_RSRC_CFG_FLAG_MGMT_COMP_EVT_BUNDLE_SUPPORT_SET(word32, value) \
+ WMI_RSRC_CFG_FLAG_SET((word32), MGMT_COMP_EVT_BUNDLE_SUPPORT, (value))
+#define WMI_RSRC_CFG_FLAG_MGMT_COMP_EVT_BUNDLE_SUPPORT_GET(word32) \
+ WMI_RSRC_CFG_FLAG_GET((word32), MGMT_COMP_EVT_BUNDLE_SUPPORT)
+
+#define WMI_RSRC_CFG_FLAG_TX_MSDU_ID_NEW_PARTITION_SUPPORT_SET(word32, value) \
+ WMI_RSRC_CFG_FLAG_SET((word32), TX_MSDU_ID_NEW_PARTITION_SUPPORT, (value))
+#define WMI_RSRC_CFG_FLAG_TX_MSDU_ID_NEW_PARTITION_SUPPORT_GET(word32) \
+ WMI_RSRC_CFG_FLAG_GET((word32), TX_MSDU_ID_NEW_PARTITION_SUPPORT)
+
typedef struct {
A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_init_cmd_fixed_param */
@@ -2350,9 +2516,17 @@
wmi_abi_version host_abi_vers;
A_UINT32 num_host_mem_chunks; /** size of array host_mem_chunks[] */
- /* The TLVs for resource_config and host_mem_chunks[] will follow.
+
+ /* The TLVs for resource_config, host_mem_chunks[], and hw_mode_config
+ * will follow.
* wmi_resource_config resource_config;
* wlan_host_memory_chunk host_mem_chunks[];
+ * wmi_pdev_set_hw_mode_cmd_fixed_param hw_mode_config;
+ * Note that the hw_mode_config, in spite of its "pdev" name,
+ * applies to the entire target rather than for a single pdev
+ * within the target.
+ * To avoid specifying a HW mode for the target, the host should
+ * fill hw_mode_config's fields with 0x0.
*/
} wmi_init_cmd_fixed_param;
@@ -2393,6 +2567,23 @@
A_UINT32 ie_data[1];
} wmi_ie_data;
+/**
+ * TLV used for length/buffer
+ */
+typedef struct {
+ /**
+ * TLV tag and len; tag equals
+ * WMITLV_TAG_STRUC_wmi_tlv_buf_len_param
+ */
+ A_UINT32 tlv_header;
+ A_UINT32 buf_len; /** Length of buf */
+ /**
+ * Following this structure is the TLV byte stream of buf
+ * of length buf_len:
+ * A_UINT8 buf[];
+ *
+ */
+} wmi_tlv_buf_len_param;
typedef struct {
/** Len of the SSID */
@@ -2418,6 +2609,9 @@
#define WLAN_SCAN_PARAMS_MAX_BSSID 4
#define WLAN_SCAN_PARAMS_MAX_IE_LEN 512
+/* NOTE: This constant cannot be changed without breaking WMI compatibility */
+#define WMI_IE_BITMAP_SIZE 8
+
typedef struct {
A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_start_scan_cmd_fixed_param */
/** Scan ID */
@@ -2472,7 +2666,14 @@
A_UINT32 ie_len;
/** Max number of probes to be sent */
A_UINT32 n_probes;
-
+ /** MAC Address to use in Probe Req as SA **/
+ wmi_mac_addr mac_addr;
+ /** Mask on which MAC has to be randomized **/
+ wmi_mac_addr mac_mask;
+ /** ie bitmap to use in probe req **/
+ A_UINT32 ie_bitmap[WMI_IE_BITMAP_SIZE];
+ /** Number of vendor OUIs. In the TLV vendor_oui[] **/
+ A_UINT32 num_vendor_oui;
/**
* TLV (tag length value ) parameters follow the scan_cmd
@@ -2481,6 +2682,7 @@
* wmi_ssid ssid_list[];
* wmi_mac_addr bssid_list[];
* A_UINT8 ie_data[];
+ * wmi_vendor_oui vendor_oui[];
*/
} wmi_start_scan_cmd_fixed_param;
@@ -2526,6 +2728,12 @@
/** always do passive scan on passive channels */
#define WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN 0x10000
+/** set HALF (10MHz) rate support */
+#define WMI_SCAN_FLAG_HALF_RATE_SUPPORT 0x20000
+/** set Quarter (5MHz) rate support */
+#define WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT 0x40000
+#define WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ 0x80000
+#define WMI_SCAN_ENABLE_IE_WHITELIST_IN_PROBE_REQ 0x100000
/** for adaptive scan mode using 3 bits (21 - 23 bits) */
#define WMI_SCAN_DWELL_MODE_MASK 0x00E00000
@@ -2572,7 +2780,8 @@
* Req Type
* req_type should be WMI_SCAN_STOP_ONE, WMI_SCN_STOP_VAP_ALL or WMI_SCAN_STOP_ALL
* WMI_SCAN_STOP_ONE indicates to stop a specific scan with scan_id
- * WMI_SCN_STOP_VAP_ALL indicates to stop all scan requests on a specific vDev with vdev_id
+ * WMI_SCN_STOP_VAP_ALL indicates to stop all scan requests on a specific
+ * vDev with vdev_id and pdev with pdev_id
* WMI_SCAN_STOP_ALL indicates to stop all scan requests in both Scheduler's queue and Scan Engine
*/
A_UINT32 req_type;
@@ -2581,6 +2790,11 @@
* used when req_type equals to WMI_SCN_STOP_VAP_ALL, it indexed the vDev on which to stop the scan
*/
A_UINT32 vdev_id;
+ /** pdev_id for identifying the MAC
+ * See macros starting with WMI_PDEV_ID_ for values.
+ * In non-DBDC case host should set it to 0
+ */
+ A_UINT32 pdev_id;
} wmi_stop_scan_cmd_fixed_param;
#define MAX_NUM_CHAN_PER_WMI_CMD 58 // each WMI cmd can hold 58 channel entries at most
@@ -2590,6 +2804,7 @@
A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_scan_chan_list_cmd_fixed_param */
A_UINT32 num_scan_chans; /** no of elements in chan_info[] */
A_UINT32 flags; /* Flags used to control the behavior of channel list update on target side */
+ A_UINT32 pdev_id; /* pdev_id for identifying the MAC. See macros starting with WMI_PDEV_ID_ for values */
/** Followed by the variable length TLV chan_info:
* wmi_channel chan_info[] */
} wmi_scan_chan_list_cmd_fixed_param;
@@ -2629,6 +2844,12 @@
* for WLAN_M_STA type, there are 3 entries in the table (refer to default_scan_priority_mapping_table definition)
*/
A_UINT32 number_rows;
+ /**
+ * pdev_id for identifying the MAC. See macros starting with
+ * WMI_PDEV_ID_ for values.In non-DBDC case host should
+ * set it to 0.
+ */
+ A_UINT32 pdev_id;
/** mapping_table for a specific vdev follows this TLV
* WLAN_PRIORITY_MAPPING mapping_table[]; */
}wmi_scan_sch_priority_table_cmd_fixed_param;
@@ -2652,8 +2873,22 @@
A_UINT32 min_rest_time;
/** min rest time. Only valid if WMI_SCAN_UPDATE_MAX_REST_TIME flag is set in scan_update_flag */
A_UINT32 max_rest_time;
+ /**
+ * pdev_id for identifying the MAC. See macros starting with
+ * WMI_PDEV_ID_ for values. In non-DBDC case host should set it to 0
+ */
+ A_UINT32 pdev_id;
} wmi_scan_update_request_cmd_fixed_param;
+#define WMI_SCAN_PROBE_OUI_SPOOFED_MAC_IN_PROBE_REQ 0x1
+#define WMI_SCAN_PROBE_OUI_RANDOM_SEQ_NO_IN_PROBE_REQ 0x2
+#define WMI_SCAN_PROBE_OUI_ENABLE_IE_WHITELIST_IN_PROBE_REQ 0x4
+
+typedef struct _wmi_vendor_oui {
+ A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_vendor_oui */
+ A_UINT32 oui_type_subtype; /** Vendor OUI type and subtype, lower 3 bytes is type and highest byte is subtype**/
+}wmi_vendor_oui;
+
typedef struct {
A_UINT32 tlv_header;
/** oui to be used in probe request frame when random mac addresss is
@@ -2661,6 +2896,20 @@
* host initated scans. host can request for random mac address with
* WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ flag. */
A_UINT32 prob_req_oui;
+ A_UINT32 vdev_id;
+ /** Control Flags **/
+ A_UINT32 flags;
+ /** ie bitmap to use in probe req **/
+ A_UINT32 ie_bitmap[WMI_IE_BITMAP_SIZE];
+ /** Number of vendor OUIs. In the TLV vendor_oui[] **/
+ A_UINT32 num_vendor_oui;
+ /**
+ * pdev_id for identifying the MAC. See macros starting with
+ * WMI_PDEV_ID_ for values. In non-DBDC case host should set it to 0
+ */
+ A_UINT32 pdev_id;
+ /* Following this tlv, there comes an array of structure of type wmi_vendor_ouiwmi_vendor_oui vendor_oui[];*/
+
} wmi_scan_prob_req_oui_cmd_fixed_param;
enum wmi_scan_event_type {
@@ -3253,11 +3502,40 @@
A_UINT32 enable_cmd;
} wmi_vdev_spectral_enable_cmd_fixed_param;
+/* information sub element id for QSBW, expected value is 0x02 */
+#define WMI_CSA_EVENT_QSBW_ISE_ID_MASK 0x000000FF
+/* length of QSBW ISE data, expected value is 0x02 */
+#define WMI_CSA_EVENT_QSBW_ISE_LEN_MASK 0x0000FF00
+/* capabilities, 0x01 for 5MHz, 0x02 for 10MHz, 0x01|0x2 for both
+ * (see WMI_CSA_EVENT_QSBW_ISE bitmask defs)
+ */
+#define WMI_CSA_EVENT_QSBW_ISE_CAP_MASK 0x00FF0000
+/* notification from AP, 0x01 for 5MHz, 0x02 for 10MHz
+ * (see WMI_CSA_EVENT_QSBW_ISE bitmask defs)
+ */
+#define WMI_CSA_EVENT_QSBW_ISE_NOTIF_MASK 0xFF000000
+
+#define WMI_CSA_EVENT_QSBW_ISE_ID 0x02
+#define WMI_CSA_EVENT_QSBW_ISE_LEN 0x02
+
+#define WMI_CSA_EVENT_QSBW_ISE_5M_BITMASK 0x01
+#define WMI_CSA_EVENT_QSBW_ISE_10M_BITMASK 0x02
+
+#define WMI_CSA_EVENT_QSBW_ISE_CAP_5M(qsbw_ise) \
+ (((qsbw_ise) >> 16) & WMI_CSA_EVENT_QSBW_ISE_5M_BITMASK)
+#define WMI_CSA_EVENT_QSBW_ISE_CAP_10M(qsbw_ise) \
+ (((qsbw_ise) >> 16) & WMI_CSA_EVENT_QSBW_ISE_10M_BITMASK)
+#define WMI_CSA_EVENT_QSBW_ISE_NOTIF_5M(qsbw_ise) \
+ (((qsbw_ise) >> 24) & WMI_CSA_EVENT_QSBW_ISE_5M_BITMASK)
+#define WMI_CSA_EVENT_QSBW_ISE_NOTIF_10M(qsbw_ise) \
+ (((qsbw_ise) >> 24) & WMI_CSA_EVENT_QSBW_ISE_10M_BITMASK)
+
typedef enum {
WMI_CSA_IE_PRESENT = 0x00000001,
WMI_XCSA_IE_PRESENT = 0x00000002,
WMI_WBW_IE_PRESENT = 0x00000004,
WMI_CSWARP_IE_PRESENT = 0x00000008,
+WMI_QSBW_ISE_PRESENT = 0x00000010,
}WMI_CSA_EVENT_IES_PRESENT_FLAG;
/* wmi CSA receive event from beacon frame */
@@ -3275,6 +3553,7 @@
A_UINT32 wb_ie[2];
A_UINT32 cswarp_ie;
A_UINT32 ies_present_flag; //WMI_CSA_EVENT_IES_PRESENT_FLAG
+ A_UINT32 qsbw_ise;
}wmi_csa_event_fixed_param;
typedef enum {
@@ -3548,6 +3827,66 @@
WMI_PDEV_PARAM_CUST_TXPOWER_SCALE,
/** ATF enabe/disabe dynamically */
WMI_PDEV_PARAM_ATF_DYNAMIC_ENABLE,
+ /** Set tx retry limit for control frames. 0 = disable, 31 = max */
+ WMI_PDEV_PARAM_CTRL_RETRY_LIMIT,
+ /** Set propagation delay for 2G / 5G band.
+ * The propagation delay is fundamentally a per-peer property, but
+ * the target may not support per-peer settings for ack timeouts.
+ * This pdev parameter allows the MAC-level ack timeout to be set to
+ * a value suitable for the worst-case propagation delay of any peer
+ * within that pdev.
+ * Units are microseconds.
+ */
+ WMI_PDEV_PARAM_PROPAGATION_DELAY,
+ /**
+ * Host can enable/disable ANT DIV feature
+ * if it's been enabled in BDF
+ */
+ WMI_PDEV_PARAM_ENA_ANT_DIV,
+ /** Host can force one chain to select a specific ANT */
+ WMI_PDEV_PARAM_FORCE_CHAIN_ANT,
+ /**
+ * Start a cycle ANT self test periodically.
+ * In the test, the FW would select each ANT pair
+ * one by one, the cycle time could be configured
+ * via WMI_PDEV_PARAM_ANT_DIV_SELFTEST_INTVL
+ */
+ WMI_PDEV_PARAM_ANT_DIV_SELFTEST,
+ /**
+ * Configure the cycle time of ANT self test,
+ * the unit is micro second. Per the timer
+ * limitation, too small value could be not so
+ * accurate.
+ */
+ WMI_PDEV_PARAM_ANT_DIV_SELFTEST_INTVL,
+
+ /**
+ * wlan stats observation period, the unit is millisecond.
+ * The value of 0 is used to turn off periodic stats report.
+ */
+ WMI_PDEV_PARAM_STATS_OBSERVATION_PERIOD,
+ /**
+ * Set tx_ppdu_delay[] bin size to specify how many
+ * milliseconds each bin of the wmi_tx_stats.tx_ppdu_delay[]
+ * histogram represents.
+ */
+ WMI_PDEV_PARAM_TX_PPDU_DELAY_BIN_SIZE_MS,
+ /** set wmi_tx_stats.tx_ppdu_delay[] array length */
+ WMI_PDEV_PARAM_TX_PPDU_DELAY_ARRAY_LEN,
+ /** set wmi_tx_stats.tx_mpdu_aggr[] array length */
+ WMI_PDEV_PARAM_TX_MPDU_AGGR_ARRAY_LEN,
+ /** set wmi_rx_stats.rx_mpdu_aggr[] array length */
+ WMI_PDEV_PARAM_RX_MPDU_AGGR_ARRAY_LEN,
+ /** Set TX delay value in TX sch module, unit is microseconds */
+ WMI_PDEV_PARAM_TX_SCH_DELAY,
+ /** Set RTS enable for SIFS bursting */
+ WMI_PDEV_PARAM_ENABLE_RTS_SIFS_BURSTING,
+ /** Set Maximum number of MPDUs in an AMPDU*/
+ WMI_PDEV_PARAM_MAX_MPDUS_IN_AMPDU,
+ /** Enable/disable peer stats info mechanism
+ * A zero value disables; a non-zero value enables.
+ */
+ WMI_PDEV_PARAM_PEER_STATS_INFO_ENABLE,
} WMI_PDEV_PARAM;
@@ -3715,6 +4054,15 @@
A_UINT32 status; /* WMI_MGMT_TX_COMP_STATUS_TYPE */
} wmi_mgmt_tx_compl_event_fixed_param;
+typedef struct {
+ A_UINT32 tlv_header;
+ A_UINT32 num_reports;
+ /* tlv for completion
+ * A_UINT32 desc_ids[num_reports]; <- from tx_send_cmd
+ * A_UINT32 status[num_reports]; <- WMI_MGMT_TX_COMP_STATUS_TYPE
+ */
+} wmi_mgmt_tx_compl_bundle_event_fixed_param;
+
#define WMI_TPC_RATE_MAX 160
/* WMI_TPC_TX_NUM_CHAIN macro can't be changed without breaking the WMI compatibility */
#define WMI_TPC_TX_NUM_CHAIN 4
@@ -3817,6 +4165,7 @@
WMI_PKTLOG_EVENT_RCU = 0x8, /* Rate Control Update */
/* 0x10 used by deprecated DBG_PRINT */
WMI_PKTLOG_EVENT_SMART_ANTENNA = 0x20, /* To support Smart Antenna */
+ WMI_PKTLOG_EVENT_SW = 0x40, /* To support SW defined events */
} WMI_PKTLOG_EVENT;
typedef enum {
@@ -3914,6 +4263,32 @@
A_UINT32 enable_override;
} wmi_vdev_set_dscp_tid_map_cmd_fixed_param;
+enum WMI_WAKE_GPIO_TYPE {
+ WMI_WAKE_GPIO_LOW = 1,
+ WMI_WAKE_GPIO_HIGH = 2,
+ WMI_WAKE_GPIO_RISING_EDGE = 3,
+ WMI_WAKE_GPIO_FALLING_EDGE = 4,
+};
+
+/**
+ * Set GPIO numbers used to wakeup host and wakeup target.
+ */
+typedef struct {
+ /**
+ * TLV tag and len; tag equals
+ * WMITLV_TAG_STRUC_WMI_PDEV_SET_WAKEUP_CONFIG_CMDID_fixed_param
+ */
+ A_UINT32 tlv_header;
+ /* gpio num used to wakeup host, 0xff disable wakeup gpio */
+ A_UINT32 host_wakeup_gpio;
+ /* refer to WMI_WAKE_GPIO_TYPE */
+ A_UINT32 host_wakeup_type;
+ /* gpio num used to wakeup target, 0xff disable wakeup gpio */
+ A_UINT32 target_wakeup_gpio;
+ /* refer to WMI_WAKE_GPIO_TYPE */
+ A_UINT32 target_wakeup_type;
+} WMI_PDEV_SET_WAKEUP_CONFIG_CMDID_fixed_param;
+
/** Fixed rate (rate-code) for broadcast/ multicast data frames */
/* @brief bcast_mcast_data_rate - set the rates for the bcast/ mcast frames
* @details
@@ -4275,6 +4650,8 @@
* per power level stats.
*/
A_UINT32 power_level_offset;
+ /* radio id for this tx time per power level statistics (if multiple radio supported) */
+ A_UINT32 radio_id;
/*
* This TLV will be followed by a TLV containing a variable-length array of
* A_UINT32 with tx time per power level data
@@ -4457,6 +4834,10 @@
time driver waits before shutting down the radio or switching the channel and after receiving an ACK for
a data frame with PM bit set) */
A_UINT32 rx_leak_window;
+ A_UINT32 tx_rts_succ_cnt;
+ A_UINT32 tx_rts_fail_cnt;
+ A_UINT32 tx_ppdu_succ_cnt;
+ A_UINT32 tx_ppdu_fail_cnt;
} wmi_iface_link_stats;
/** Interface statistics (once started) reset and start afresh after each connection */
@@ -4506,6 +4887,25 @@
} wmi_vdev_rate_ht_info;
typedef struct {
+ /**
+ * TLV tag and len, tag equals
+ * WMITLV_TAG_STRUC_wmi_rx_aggr_failure_event_fixed_param
+ */
+ A_UINT32 tlv_header;
+ A_UINT32 num_failure_info; /* How many holes on rx aggregation */
+} wmi_rx_aggr_failure_event_fixed_param;
+
+typedef struct {
+ /**
+ * TLV tag and len, tag equals
+ * WMITLV_wmi_rx_aggr_failure_info
+ */
+ A_UINT32 tlv_header;
+ A_UINT32 start_seq; /* start sequence number of the hole */
+ A_UINT32 end_seq; /* end sequence number of the hole */
+} wmi_rx_aggr_failure_info;
+
+typedef struct {
A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_stats_event_fixed_param */
wmi_stats_id stats_id;
/** number of pdev stats event structures (wmi_pdev_stats) 0 or 1 */
@@ -4532,6 +4932,505 @@
*/
} wmi_stats_event_fixed_param;
+/* WLAN channel CCA stats bitmap */
+#define WLAN_STATS_IDLE_TIME_SHIFT 0
+#define WLAN_STATS_IDLE_TIME_TIME 0x00000001
+
+#define WLAN_STATS_TX_TIME_SHIFT 1
+#define WLAN_STATS_TX_TIME_MASK 0x00000002
+
+#define WLAN_STATS_RX_IN_BSS_TIME_SHIFT 2
+#define WLAN_STATS_RX_IN_BSS_TIME_MASK 0x00000004
+
+#define WLAN_STATS_RX_OUT_BSS_TIME_SHIFT 3
+#define WLAN_STATS_RX_OUT_BSS_TIME_MASK 0x00000008
+
+#define WLAN_STATS_RX_BUSY_TIME_SHIFT 4
+#define WLAN_STATS_RX_BUSY_TIME_MASK 0x00000010
+
+#define WLAN_STATS_RX_IN_BAD_COND_TIME_SHIFT 5
+#define WLAN_STATS_RX_IN_BAD_COND_TIME_MASK 0x00000020
+
+#define WLAN_STATS_TX_IN_BAD_COND_TIME_SHIFT 6
+#define WLAN_STATS_TX_IN_BAD_COND_TIME_MASK 0x00000040
+
+#define WLAN_STATS_WLAN_NOT_AVAIL_TIME_SHIFT 7
+#define WLAN_STATS_WLAN_NOT_AVAIL_TIME_MASK 0x00000080
+
+/* WLAN peer signal stats bitmap */
+#define WLAN_STATS_PER_CHAIN_SNR_SHIFT 0
+#define WLAN_STATS_PER_CHAIN_SNR_MASK 0x00000001
+
+#define WLAN_STATS_PER_CHAIN_NF_SHIFT 1
+#define WLAN_STATS_PER_CHAIN_NF_MASK 0x00000002
+
+/* WLAN TX stats bitmap */
+#define WLAN_STATS_TX_MSDU_CNT_SHIFT 0
+#define WLAN_STATS_TX_MSDU_CNT_MASK 0x00000001
+
+#define WLAN_STATS_TX_MPDU_CNT_SHIFT 1
+#define WLAN_STATS_TX_MPDU_CNT_MASK 0x00000002
+
+#define WLAN_STATS_TX_PPDU_CNT_SHIFT 2
+#define WLAN_STATS_TX_PPDU_CNT_MASK 0x00000004
+
+#define WLAN_STATS_TX_BYTES_SHIFT 3
+#define WLAN_STATS_TX_BYTES_MASK 0x00000008
+
+#define WLAN_STATS_TX_MSDU_DROP_CNT_SHIFT 4
+#define WLAN_STATS_TX_MSDU_DROP_CNT_MASK 0x00000010
+
+#define WLAN_STATS_TX_DROP_BYTES_SHIFT 5
+#define WLAN_STATS_TX_DROP_BYTES_MASK 0x00000020
+
+#define WLAN_STATS_TX_MPDU_RETRY_CNT_SHIFT 6
+#define WLAN_STATS_TX_MPDU_RETRY_CNT_MASK 0x00000040
+
+#define WLAN_STATS_TX_MPDU_FAIL_CNT_SHIFT 7
+#define WLAN_STATS_TX_MPDU_FAIL_CNT_MASK 0x00000080
+
+#define WLAN_STATS_TX_PPDU_FAIL_CNT_SHIFT 8
+#define WLAN_STATS_TX_PPDU_FAIL_CNT_MASK 0x00000100
+
+#define WLAN_STATS_TX_MPDU_AGGR_SHIFT 9
+#define WLAN_STATS_TX_MPDU_AGGR_MASK 0x00000200
+
+#define WLAN_STATS_TX_SUCC_MCS_SHIFT 10
+#define WLAN_STATS_TX_SUCC_MCS_MASK 0x00000400
+
+#define WLAN_STATS_TX_FAIL_MCS_SHIFT 11
+#define WLAN_STATS_TX_FAIL_MCS_MASK 0x00000800
+
+#define WLAN_STATS_TX_PPDU_DELAY_SHIFT 12
+#define WLAN_STATS_TX_PPDU_DELAY_MASK 0x00001000
+
+/* WLAN RX stats bitmap */
+#define WLAN_STATS_MAC_RX_MPDU_CNT_SHIFT 0
+#define WLAN_STATS_MAC_RX_MPDU_CNT_MASK 0x00000001
+
+#define WLAN_STATS_MAC_RX_BYTES_SHIFT 1
+#define WLAN_STATS_MAC_RX_BYTES_MASK 0x00000002
+
+#define WLAN_STATS_PHY_RX_PPDU_CNT_SHIFT 2
+#define WLAN_STATS_PHY_RX_PPDU_CNT_MASK 0x00000004
+
+#define WLAN_STATS_PHY_RX_BYTES_SHIFT 3
+#define WLAN_STATS_PHY_RX_BYTES_MASK 0x00000008
+
+#define WLAN_STATS_RX_DISORDER_CNT_SHIFT 4
+#define WLAN_STATS_RX_DISORDER_CNT_MASK 0x00000010
+
+#define WLAN_STATS_RX_RETRY_CNT_SHIFT 5
+#define WLAN_STATS_RX_RETRY_CNT_MASK 0x00000020
+
+#define WLAN_STATS_RX_DUP_CNT_SHIFT 6
+#define WLAN_STATS_RX_DUP_CNT_MASK 0x00000040
+
+#define WLAN_STATS_RX_DISCARD_CNT_SHIFT 7
+#define WLAN_STATS_RX_DISCARD_CNT_MASK 0x00000080
+
+#define WLAN_STATS_RX_MPDU_AGGR_SHIFT 8
+#define WLAN_STATS_RX_MPDU_AGGR_MASK 0x00000100
+
+#define WLAN_STATS_RX_MCS_SHIFT 9
+#define WLAN_STATS_RX_MCS_MASK 0x00000200
+
+#define WLAN_STATS_STA_PS_INDS_SHIFT 10
+#define WLAN_STATS_STA_PS_INDS_MASK 0x00000400
+
+#define WLAN_STATS_STA_PS_DURS_SHIFT 11
+#define WLAN_STATS_STA_PS_DURS_MASK 0x00000800
+
+#define WLAN_STATS_RX_PROBE_REQS_SHIFT 12
+#define WLAN_STATS_RX_PROBE_REQS_MASK 0x00001000
+
+#define WLAN_STATS_RX_OTH_MGMTS_SHIFT 13
+#define WLAN_STATS_RX_OTH_MGMTS_MASK 0x00002000
+
+typedef struct
+{
+ A_UINT32 tlv_header; /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_chan_cca_stats */
+ A_UINT32 vdev_id;
+ /** Percentage of idle time, no TX, no RX, no interference */
+ A_UINT32 idle_time;
+ /** Percentage of time transmitting packets */
+ A_UINT32 tx_time;
+ /** Percentage of time receiving packets in current BSSs */
+ A_UINT32 rx_in_bss_time;
+ /** Percentage of time receiving packets not in current BSSs */
+ A_UINT32 rx_out_bss_time;
+ /** Percentage of time interference detected. */
+ A_UINT32 rx_busy_time;
+ /** Percentage of time receiving packets with errors
+ * or packets flagged as retransmission or seqnum discontinued. */
+ A_UINT32 rx_in_bad_cond_time;
+ /** Percentage of time the device transmitted packets that haven't been ACKed. */
+ A_UINT32 tx_in_bad_cond_time;
+ /** Percentage of time the chip is unable to work in normal conditions. */
+ A_UINT32 wlan_not_avail_time;
+} wmi_chan_cca_stats;
+
+/** Thresholds of cca stats, stands for percentages of stats variation.
+ * Check wmi_chan_cca_stats for each stats's meaning.
+ */
+typedef struct
+{
+ A_UINT32 tlv_header; /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_chan_cca_stats_thresh */
+ A_UINT32 idle_time; /* units = percent */
+ A_UINT32 tx_time; /* units = percent */
+ A_UINT32 rx_in_bss_time; /* units = percent */
+ A_UINT32 rx_out_bss_time; /* units = percent */
+ A_UINT32 rx_busy_time; /* units = percent */
+ A_UINT32 rx_in_bad_cond_time; /* units = percent */
+ A_UINT32 tx_in_bad_cond_time; /* units = percent */
+ A_UINT32 wlan_not_avail_time; /* units = percent */
+} wmi_chan_cca_stats_thresh;
+
+typedef struct
+{
+ A_UINT32 tlv_header; /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_peer_signal_stats */
+ A_UINT32 vdev_id;
+ A_UINT32 peer_id;
+ /** per chain SNR in current bss, units are dB */
+ A_INT32 per_chain_snr[WMI_MAX_CHAINS];
+ /** per chain background noise, units are dBm */
+ A_INT32 per_chain_nf[WMI_MAX_CHAINS];
+} wmi_peer_signal_stats;
+
+/** Thresholds of signal stats, stand for percentage of stats variation.
+ * Check wmi_peer_signal_stats for each stats's meaning.
+ */
+typedef struct
+{
+ /**
+ * TLV tag and len; tag equals
+ * WMITLV_TAG_STRUC_wmi_peer_signal_stats_thresh
+ */
+ A_UINT32 tlv_header;
+ A_UINT32 per_chain_snr; /* units = dB */
+ A_UINT32 per_chain_nf; /* units = dBm */
+} wmi_peer_signal_stats_thresh;
+
+typedef struct
+{
+ A_UINT32 tlv_header; /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_tx_stats */
+ /** Number of total TX MSDUs on MAC layer in the period */
+ A_UINT32 tx_msdu_cnt;
+ /** Number of total TX MPDUs on MAC layer in the period */
+ A_UINT32 tx_mpdu_cnt;
+ /** Number of total TX PPDUs on MAC layer in the period */
+ A_UINT32 tx_ppdu_cnt;
+ /** Bytes of tx data on MAC layer in the period */
+ A_UINT32 tx_bytes;
+ /** Number of TX MSDUs cancelled due to any reason in the period,
+ * such as WMM limitation/bandwidth limitation/radio congestion */
+ A_UINT32 tx_msdu_drop_cnt;
+ /** Bytes of dropped TX packets in the period */
+ A_UINT32 tx_drop_bytes;
+ /** Number of unacked transmissions of MPDUs */
+ A_UINT32 tx_mpdu_retry_cnt;
+ /** Number of MPDUs have not been ACKed despite retried */
+ A_UINT32 tx_mpdu_fail_cnt;
+ /** Number of PPDUs which received no block ack */
+ A_UINT32 tx_ppdu_fail_cnt;
+ /* This TLV is followed by TLVs below: :
+ * A_UINT32 tx_mpdu_aggr[tx_mpdu_aggr_array_len];
+ * A_UINT32 tx_succ_mcs[tx_succ_mcs_array_len];
+ * A_UINT32 tx_fail_mcs[tx_fail_mcs_array_len];
+ * A_UINT32 tx_delay[tx_ppdu_delay_array_len];
+ */
+} wmi_tx_stats;
+
+/** Thresholds of tx stats, stand for percentage of stats variation.
+ * Check wmi_tx_stats for each stats's meaning.
+ */
+typedef struct
+{
+ A_UINT32 tlv_header; /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_tx_stats_thresh */
+ A_UINT32 tx_msdu_cnt;
+ A_UINT32 tx_mpdu_cnt;
+ A_UINT32 tx_ppdu_cnt;
+ A_UINT32 tx_bytes;
+ A_UINT32 tx_msdu_drop_cnt;
+ A_UINT32 tx_drop_bytes;
+ A_UINT32 tx_mpdu_retry_cnt;
+ A_UINT32 tx_mpdu_fail_cnt;
+ A_UINT32 tx_ppdu_fail_cnt;
+ A_UINT32 tx_mpdu_aggr;
+ A_UINT32 tx_succ_mcs;
+ A_UINT32 tx_fail_mcs;
+ A_UINT32 tx_ppdu_delay;
+} wmi_tx_stats_thresh;
+
+typedef struct
+{
+ A_UINT32 tlv_header; /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_peer_ac_tx_stats */
+ A_UINT32 vdev_id;
+ A_UINT32 peer_id;
+ /* The TLVs for the 4 AC follows:
+ * wmi_tx_stats tx_stats[]; wmi_tx_stats for BE/BK/VI/VO
+ */
+} wmi_peer_ac_tx_stats;
+
+typedef struct
+{
+ A_UINT32 tlv_header; /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_rx_stats */
+ /** Number of RX MPDUs on MAC layer */
+ A_UINT32 mac_rx_mpdu_cnt;
+ /** Bytes of RX packets on MAC layer */
+ A_UINT32 mac_rx_bytes;
+ /** Number of RX PPDU on PHY layer */
+ A_UINT32 phy_rx_ppdu_cnt;
+ /** Bytes of RX packets on PHY layer */
+ A_UINT32 phy_rx_bytes;
+ /** Number of discontinuity in seqnum */
+ A_UINT32 rx_disorder_cnt;
+ /** Number of RX MPDUs flagged as retransmissions */
+ A_UINT32 rx_mpdu_retry_cnt;
+ /** Number of RX MPDUs identified as duplicates */
+ A_UINT32 rx_mpdu_dup_cnt;
+ /** Number of RX MPDUs discarded */
+ A_UINT32 rx_mpdu_discard_cnt;
+ /* This TLV is followed by TLVs below:
+ * A_UINT32 rx_mpdu_aggr[rx_mpdu_aggr_array_len];
+ * A_UINT32 rx_mcs[rx_msdu_mcs_array_len];
+ */
+} wmi_rx_stats;
+
+/** Thresholds of rx stats, stands for percentage of stats variation.
+ * Check wmi_rx_stats for each stats's meaning.
+ */
+typedef struct
+{
+ A_UINT32 tlv_header; /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_rx_stats_thresh */
+ A_UINT32 mac_rx_mpdu_cnt;
+ A_UINT32 mac_rx_bytes;
+ A_UINT32 phy_rx_ppdu_cnt;
+ A_UINT32 phy_rx_bytes;
+ A_UINT32 rx_disorder_cnt;
+ A_UINT32 rx_mpdu_retry_cnt;
+ A_UINT32 rx_mpdu_dup_cnt;
+ A_UINT32 rx_mpdu_discard_cnt;
+ A_UINT32 rx_mpdu_aggr;
+ A_UINT32 rx_mcs;
+ A_UINT32 sta_ps_inds;
+ A_UINT32 sta_ps_durs;
+ A_UINT32 rx_probe_reqs;
+ A_UINT32 rx_oth_mgmts;
+} wmi_rx_stats_thresh;
+
+typedef struct
+{
+ A_UINT32 tlv_header; /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_peer_ac_rx_stats */
+ A_UINT32 vdev_id;
+ A_UINT32 peer_id;
+ /** How many times STAs go to sleep */
+ A_UINT32 sta_ps_inds;
+ /** Total sleep time of STAs, milliseconds units */
+ A_UINT32 sta_ps_durs;
+ /** Number of probe requests received */
+ A_UINT32 rx_probe_reqs;
+ /** Number of other management frames received, not including probe requests */
+ A_UINT32 rx_oth_mgmts;
+ /* The TLVs for the 4 AC follows:
+ * wmi_rx_stats rx_stats[]; wmi_rx_stats for BE/BK/VI/VO
+ */
+} wmi_peer_ac_rx_stats;
+
+typedef enum {
+ /** Periodic timer timed out, based on the period specified
+ * by WMI_PDEV_PARAM_STATS_OBSERVATION_PERIOD
+ */
+ TRIGGER_COND_ID_TIMER_TIMED_OUT = 0x1,
+ /** Any of the (enabled) stats thresholds specified
+ * in the WMI_PDEV_SET_STATS_THRESHOLD_CMD message is exceeded
+ * within the current stats period.
+ */
+ TRIGGER_COND_ID_THRESH_EXCEEDED = 0x2,
+ /** In Response to the one-time wlan stats request of
+ * WMI_REQUEST_WLAN_STATS_CMDID from host.
+ */
+ TRIGGER_COND_ID_ONE_TIME_REQUEST = 0x3,
+} wmi_report_stats_event_trigger_cond_id;
+
+typedef struct {
+ A_UINT32 tlv_header; /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_report_stats_event_fixed_param */
+ /** Indicate what triggered this event, check wmi_report_stats_event_trigger_cond_id for details */
+ A_UINT32 trigger_cond_id;
+ /** Bitmap to indicate changed channel CCA stats which exceeded the thresholds */
+ A_UINT32 cca_chgd_bitmap;
+ /** Bitmap to indicate changed peer signal stats which exceeded the thresholds */
+ A_UINT32 sig_chgd_bitmap;
+ /** Bitmap to indicate changed TX counters which exceeded the thresholds */
+ A_UINT32 tx_chgd_bitmap;
+ /** Bitmap to indicate changed RX counters which exceeded the thresholds */
+ A_UINT32 rx_chgd_bitmap;
+ /** number of per channel CCA stats structures (wmi_chan_cca_stats), 0 to max vdevs*/
+ A_UINT32 num_chan_cca_stats;
+ /** number of per peer signal stats structures (wmi_peer_signal_stats), 0 to max peers*/
+ A_UINT32 num_peer_signal_stats;
+ /** number of per peer ac TX stats structures (wmi_peer_ac_tx_stats), 0 to max peers*/
+ A_UINT32 num_peer_ac_tx_stats;
+ /** Array length of tx_mpdu_aggr[] which is histogram of MPDU aggregation size(1 to 7 and 8+).
+ * The array indicates number of MPDUs sent on specified aggregation size
+ * (per number of MPDUs per AMPDUs / 1 to 7 and 8+).
+ * Array length can be set per WMI_PDEV_PARAM_TX_MPDU_AGGR_ARRAY_LEN */
+ A_UINT32 tx_mpdu_aggr_array_len;
+ /** Array length of tx_succ_mcs[] which is histogram of encoding rate.
+ * The array indicates number of acked PPDUs sent at a specific rate */
+ A_UINT32 tx_succ_mcs_array_len;
+ /** Array length of tx_fail_mcs[] which is histogram of encoding rate.
+ * The array indicates number of unacked PPDUs sent at a specific rate */
+ A_UINT32 tx_fail_mcs_array_len;
+ /** tx_ppdu_delay[]is a histogram of delays on MAC layer.
+ * The array counts numbers of PPDUs encountering different TX time delays.
+ * TX delay here means time interval between the time a PPDU is queued
+ * to the MAC HW for transmission and the time the lower layers of
+ * tx FW return a tx status.
+ *
+ * The bin size tx_ppdu_delay_bin_size_ms specifies how many
+ * milliseconds. Each bin of the tx_ppdu_delay histogram represents.
+ * By default the bin size is 10ms.
+ * tx_ppdu_delay[0] -> delays between 0-9 ms
+ * tx_ppdu_delay[1] -> delays between 10-19 ms
+ * ...
+ * tx_ppdu_delay[9] -> delays between 90-99 ms
+ * tx_ppdu_delay[10] -> delays >= 100 ms
+ * Bin size can be set per WMI_PDEV_PARAM_TX_PPDU_DELAY_BIN_SIZE_MS.
+ */
+ A_UINT32 tx_ppdu_delay_bin_size_ms;
+ /** Array length of tx_ppdu_delay[]. It can be set per WMI_PDEV_PARAM_TX_PPDU_DELAY_ARRAY_LEN */
+ A_UINT32 tx_ppdu_delay_array_len;
+ /** number of per peer ac RX stats structures (wmi_peer_ac_rx_stats), 0 to max peers*/
+ A_UINT32 num_peer_ac_rx_stats;
+ /** Array length of rx_mpdu_aggr[] which is histogram of MPDU aggregation size(1 to 7 and 8+).
+ * It can be set per WMI_PDEV_PARAM_RX_MPDU_AGGR_ARRAY_LEN */
+ A_UINT32 rx_mpdu_aggr_array_len;
+ /** Array size of rx_mcs[] which is histogram of encoding rate.
+ * The array indicates number of PPDUs received at a specific rate */
+ A_UINT32 rx_mcs_array_len;
+
+ /**
+ * This TLV is followed by TLVs below:
+ * wmi_chan_cca_stats chan_cca_stats[]; Array length is specified by num_chan_cca_stats
+ * wmi_peer_signal_stats peer_signal_stats[]; Array length is specified by num_peer_signal_stats
+ * wmi_peer_ac_tx_stats peer_ac_tx_stats[]; Array length is specified by num_peer_ac_tx_stats
+ * wmi_tx_stats tx_stats[][]; Array length is num_peer_ac_tx_stats * WLAN_MAX_AC, array index is (peer_index * WLAN_MAX_AC + ac_index)
+ * A_UINT32 tx_mpdu_aggr[][][]; Array length is num_peer_ac_tx_stats * WLAN_MAX_AC * tx_mpdu_aggr_array_len,
+ * array index is (peer_index * WLAN_MAX_AC + ac_index) * tx_mpdu_aggr_array_len + A-MPDU aggregation index
+ * A_UINT32 tx_succ_mcs[][][]; Array length is num_peer_ac_tx_stats * WLAN_MAX_AC * tx_succ_mcs_array_len,
+ * array index is (peer_index * WLAN_MAX_AC + ac_index) * tx_succ_mcs_array_len + MCS index
+ * A_UINT32 tx_fail_mcs[][][]; Array length is num_peer_ac_tx_stats * WLAN_MAX_AC * tx_fail_mcs_array_len,
+ * array index is (peer_index * WLAN_MAX_AC + ac_index) * tx_fail_mcs_array_len + MCS index
+ * A_UINT32 tx_ppdu_delay[][][]; Array length is num_peer_ac_tx_stats * WLAN_MAX_AC * tx_ppdu_delay_array_len,
+ * array index is (peer_index * WLAN_MAX_AC + ac_index) * tx_ppdu_delay_array_len + tx delay index
+ * wmi_peer_ac_rx_stats peer_ac_rx_stats[]; Array length is specified by num_peer_ac_rx_stats
+ * wmi_rx_stats rx_stats[][]; Array length is num_peer_ac_rx_stats * WLAN_MAX_AC, array index is (peer_index * WLAN_MAX_AC + ac_index)
+ * A_UINT32 rx_mpdu_aggr[][][]; Array length is num_peer_ac_rx_stats * WLAN_MAX_AC * rx_mpdu_aggr_array_len,
+ * array index is (peer_index * WLAN_MAX_AC + ac_index) * rx_mpdu_aggr_array_len + A-MPDU aggregation index
+ * A_UINT32 rx_mcs[][][]; Array length is (num_peer_ac_rx_stats * WLAN_MAX_AC) * rx_mcs_array_len,
+ * array index is (peer_index * WLAN_MAX_AC + ac_index) * rx_mcs_array_len + MCS index
+ **/
+} wmi_report_stats_event_fixed_param;
+
+typedef struct {
+ /** TLV tag and len; tag equals
+ * WMITLV_TAG_STRUC_wmi_peer_stats_info */
+ A_UINT32 tlv_header;
+ /** peer MAC address */
+ wmi_mac_addr peer_macaddr;
+ /** bytes (size of MPDUs) transmitted to this peer */
+ struct {
+ /* lower 32 bits of the tx_bytes value */
+ A_UINT32 low_32;
+ /* upper 32 bits of the tx_bytes value */
+ A_UINT32 high_32;
+ } tx_bytes;
+ /** packets (MSDUs) transmitted to this peer */
+ struct {
+ /* lower 32 bits of the tx_packets value */
+ A_UINT32 low_32;
+ /* upper 32 bits of the tx_packets value */
+ A_UINT32 high_32;
+ } tx_packets;
+ /** bytes (size of MPDUs) received from this peer */
+ struct {
+ /* lower 32 bits of the rx_bytes value */
+ A_UINT32 low_32;
+ /* upper 32 bits of the rx_bytes value */
+ A_UINT32 high_32;
+ } rx_bytes;
+ /** packets (MSDUs) received from this peer */
+ struct {
+ /* lower 32 bits of the rx_packets value */
+ A_UINT32 low_32;
+ /* upper 32 bits of the rx_packets value */
+ A_UINT32 high_32;
+ } rx_packets;
+ /** cumulative retry counts (MPDUs) */
+ A_UINT32 tx_retries;
+ /** number of failed transmissions (MPDUs) (retries exceeded, no ACK) */
+ A_UINT32 tx_failed;
+ /** rate information, it is output of WMI_ASSEMBLE_RATECODE_V1
+ * (in format of 0x1000RRRR)
+ * The rate-code is a 4-bytes field in which,
+ * for given rate, nss and preamble
+ *
+ * b'31-b'29 unused / reserved
+ * b'28 indicate the version of rate-code (1 = RATECODE_V1)
+ * b'27-b'11 unused / reserved
+ * b'10-b'8 indicate the preamble (0 OFDM, 1 CCK, 2 HT, 3 VHT)
+ * b'7-b'5 indicate the NSS (0 - 1x1, 1 - 2x2, 2 - 3x3, 3 - 4x4)
+ * b'4-b'0 indicate the rate, which is indicated as follows:
+ * OFDM : 0: OFDM 48 Mbps
+ * 1: OFDM 24 Mbps
+ * 2: OFDM 12 Mbps
+ * 3: OFDM 6 Mbps
+ * 4: OFDM 54 Mbps
+ * 5: OFDM 36 Mbps
+ * 6: OFDM 18 Mbps
+ * 7: OFDM 9 Mbps
+ * CCK (pream == 1)
+ * 0: CCK 11 Mbps Long
+ * 1: CCK 5.5 Mbps Long
+ * 2: CCK 2 Mbps Long
+ * 3: CCK 1 Mbps Long
+ * 4: CCK 11 Mbps Short
+ * 5: CCK 5.5 Mbps Short
+ * 6: CCK 2 Mbps Short
+ * HT/VHT (pream == 2/3)
+ * 0..7: MCS0..MCS7 (HT)
+ * 0..9: MCS0..MCS9 (11AC VHT)
+ * 0..11: MCS0..MCS11 (11AX VHT)
+ */
+ /** rate-code of the last transmission */
+ A_UINT32 last_tx_rate_code;
+ /** rate-code of the last received PPDU */
+ A_UINT32 last_rx_rate_code;
+ /** bitrate of the last transmission, in units of kbps */
+ A_UINT32 last_tx_bitrate_kbps;
+ /** bitrate of the last received PPDU, in units of kbps */
+ A_UINT32 last_rx_bitrate_kbps;
+ /** combined RSSI of the last received PPDU, in unit of dBm */
+ A_INT32 peer_rssi;
+} wmi_peer_stats_info;
+
+typedef struct {
+ /** TLV tag and len; tag equals
+ * WMITLV_TAG_STRUC_wmi_peer_stats_info_event_fixed_param */
+ A_UINT32 tlv_header;
+ /** VDEV to which the peers belong to */
+ A_UINT32 vdev_id;
+ /** number of peers in peer_stats_info[] */
+ A_UINT32 num_peers;
+ /** flag to indicate if there are more peers which will
+ * be sent a following seperate peer_stats_info event */
+ A_UINT32 more_data;
+ /* This TLV is followed by another TLV of array of structs
+ * wmi_peer_stats_info peer_stats_info[];
+ */
+} wmi_peer_stats_info_event_fixed_param;
+
/**
* PDEV statistics
* @todo
@@ -4748,6 +5647,16 @@
/* NAN Data Interface */
#define WMI_VDEV_TYPE_NDI 0x7
+/*
+ * Param values to be sent for WMI_VDEV_PARAM_SGI command
+ * which are used in 11ax systems
+ */
+#define WMI_SGI_LEGACY 0x1 /* for HT and VHT */
+#define WMI_SGI_HE_400_NS 0x2 /* for HE 400 nsec */
+#define WMI_SGI_HE_800_NS 0x4 /* for HE 800 nsec */
+#define WMI_SGI_HE_1600_NS 0x8 /* for HE 1600 nsec */
+#define WMI_SGI_HE_3200_NS 0x10 /* for HE 3200 nsec */
+
/** values for vdev_subtype */
#define WMI_UNIFIED_VDEV_SUBTYPE_P2P_DEVICE 0x1
#define WMI_UNIFIED_VDEV_SUBTYPE_P2P_CLIENT 0x2
@@ -4778,6 +5687,163 @@
*/
#define WMI_UNIFIED_VDEV_START_BCN_TX_RATE_PRESENT (1<<2)
+#define WMI_HEOPS_COLOR_GET(he_ops) WMI_GET_BITS(he_ops, 0, 6)
+#define WMI_HEOPS_COLOR_SET(he_ops, value) WMI_SET_BITS(he_ops, 0, 6, value)
+
+#define WMI_HEOPS_DEFPE_GET(he_ops) WMI_GET_BITS(he_ops, 6, 3)
+#define WMI_HEOPS_DEFPE_SET(he_ops, value) WMI_SET_BITS(he_ops, 6, 3, value)
+
+#define WMI_HEOPS_TWT_GET(he_ops) WMI_GET_BITS(he_ops, 9, 1)
+#define WMI_HEOPS_TWT_SET(he_ops, value) WMI_SET_BITS(he_ops, 9, 1, value)
+
+#define WMI_HEOPS_RTSTHLD_GET(he_ops) WMI_GET_BITS(he_ops, 10, 10)
+#define WMI_HEOPS_RTSTHLD_SET(he_ops, value) WMI_SET_BITS(he_ops, 10, 10, value)
+
+#define WMI_HEOPS_PDMIN_GET(he_ops) WMI_GET_BITS(he_ops, 20, 5)
+#define WMI_HEOPS_PDMIN_SET(he_ops, value) WMI_SET_BITS(he_ops, 20, 5, value)
+
+#define WMI_HEOPS_PDMAX_GET(he_ops) WMI_GET_BITS(he_ops, 25, 5)
+#define WMI_HEOPS_PDMAX_SET(he_ops, value) WMI_SET_BITS(he_ops, 25, 5, value)
+
+#define WMI_MAX_HECAP_PHY_SIZE (3)
+#define WMI_HECAP_PHY_COD_GET(he_cap_phy) WMI_GET_BITS(he_cap_phy[0], 0, 1)
+#define WMI_HECAP_PHY_COD_SET(he_cap_phy, value) WMI_SET_BITS(he_cap_phy[0], 0, 1, value)
+
+#define WMI_HECAP_PHY_TXLDPC_GET(he_cap_phy) WMI_GET_BITS(he_cap_phy[0], 1, 1)
+#define WMI_HECAP_PHY_TXLDPC_SET(he_cap_phy, value) WMI_SET_BITS(he_cap_phy[0], 1, 1, value)
+
+#define WMI_HECAP_PHY_RXLDPC_GET(he_cap_phy) WMI_GET_BITS(he_cap_phy[0], 2, 1)
+#define WMI_HECAP_PHY_RXLDPC_SET(he_cap_phy, value) WMI_SET_BITS(he_cap_phy[0], 2, 1, value)
+
+#define WMI_HECAP_PHY_DCM_GET(he_cap_phy) WMI_GET_BITS(he_cap_phy[0], 3, 3)
+#define WMI_HECAP_PHY_DCM_SET(he_cap_phy, value) WMI_SET_BITS(he_cap_phy[0], 3, 3, value)
+
+#define WMI_HECAP_PHY_OLTF_GET(he_cap_phy) WMI_GET_BITS(he_cap_phy[0], 6, 1)
+#define WMI_HECAP_PHY_OLTF_SET(he_cap_phy, value) WMI_SET_BITS(he_cap_phy[0], 6, 1, value)
+
+#define WMI_HECAP_PHY_CBW_GET(he_cap_phy) WMI_GET_BITS(he_cap_phy[0], 7, 3)
+#define WMI_HECAP_PHY_CBW_SET(he_cap_phy, value) WMI_SET_BITS(he_cap_phy[0], 7, 3, value)
+
+#define WMI_HECAP_PHY_TXSTBC_GET(he_cap_phy) WMI_GET_BITS(he_cap_phy[0], 10, 1)
+#define WMI_HECAP_PHY_TXSTBC_SET(he_cap_phy, value) WMI_SET_BITS(he_cap_phy[0], 10, 1, value)
+
+#define WMI_HECAP_PHY_RXSTBC_GET(he_cap_phy) WMI_GET_BITS(he_cap_phy[0], 11, 1)
+#define WMI_HECAP_PHY_RXSTBC_SET(he_cap_phy, value) WMI_SET_BITS(he_cap_phy[0], 11, 1, value)
+
+#define WMI_HECAP_PHY_DLOFMAMUMIMO_GET(he_cap_phy) WMI_GET_BITS(he_cap_phy[0], 12, 1)
+#define WMI_HECAP_PHY_DLOFDMAMUMIO_SET(he_cap_phy, value) WMI_SET_BITS(he_cap_phy[0], 12, 1, value)
+
+#define WMI_HECAP_PHY_UL_MU_MIMO_GET(he_cap_phy) WMI_GET_BITS(he_cap_phy[0], 13, 1)
+#define WMI_HECAP_PHY_UL_MU_MIMO_SET(he_cap_phy, value) WMI_SET_BITS(he_cap_phy[0], 13, 1, value)
+
+#define WMI_HECAP_PHY_ULOFDMA_GET(he_cap_phy) WMI_GET_BITS(he_cap_phy[0], 14, 1)
+#define WMI_HECAP_PHY_ULOFDMA_SET(he_cap_phy, value) WMI_SET_BITS(he_cap_phy[0], 14, 1, value)
+
+#define WMI_HECAP_PHY_TXDOPPLER_GET(he_cap_phy) WMI_GET_BITS(he_cap_phy[0], 15, 1)
+#define WMI_HECAP_PHY_TXDOPPLER_SET(he_cap_phy, value) WMI_SET_BITS(he_cap_phy[0], 15, 1, value)
+
+#define WMI_HECAP_PHY_RXDOPPLER_GET(he_cap_phy) WMI_GET_BITS(he_cap_phy[0], 16, 1)
+#define WMI_HECAP_PHY_RXDOPPLER_SET(he_cap_phy, value) WMI_SET_BITS(he_cap_phy[0], 16, 1, value)
+
+#define WMI_HECAP_PHY_CBMODE_GET(he_cap_phy) WMI_GET_BITS(he_cap_phy[0], 17, 8)
+#define WMI_HECAP_PHY_CBMODE_SET(he_cap_phy, value) WMI_SET_BITS(he_cap_phy[0], 17, 8, value)
+
+#define WMI_HECAP_PHY_PADDING_GET(he_cap_phy) WMI_GET_BITS(he_cap_phy[0], 25, 2)
+#define WMI_HECAP_PHY_PADDING_SET(he_cap_phy, value) WMI_SET_BITS(he_cap_phy[0], 25, 2, value)
+
+#define WMI_HECAP_PHY_32GI_GET(he_cap_phy) WMI_GET_BITS(he_cap_phy[1], 0, 26)
+#define WMI_HECAP_PHY_32GI_SET(he_cap_phy, value) WMI_SET_BITS(he_cap_phy[1], 0, 26, value)
+
+#define WMI_HECAP_PHY_SUBFMR_GET(he_cap_phy) WMI_GET_BITS(he_cap_phy[1], 26, 1)
+#define WMI_HECAP_PHY_SUBFMR_SET(he_cap_phy, value) WMI_SET_BITS(he_cap_phy[1], 26, 1, value)
+
+#define WMI_HECAP_PHY_SUBFME_GET(he_cap_phy) WMI_GET_BITS(he_cap_phy[1], 27, 1)
+#define WMI_HECAP_PHY_SUBFME_SET(he_cap_phy, value) WMI_SET_BITS(he_cap_phy[1], 27, 1, value)
+
+#define WMI_HECAP_PHY_SUBFMESTS_GET(he_cap_phy) WMI_GET_BITS(he_cap_phy[1], 28, 3)
+#define WMI_HECAP_PHY_SUBFMESTS_SET(he_cap_phy, value) WMI_SET_BITS(he_cap_phy[1], 28, 3, value)
+
+#define WMI_HECAP_PHY_NOSUNDIMENS_GET(he_cap_phy) WMI_GET_BITS(he_cap_phy[2], 0, 3)
+#define WMI_HECAP_PHY_NOSUNDIMENS_SET(he_cap_phy, value) WMI_SET_BITS(he_cap_phy[2], 0, 3, value)
+
+#define WMI_HECAP_PHY_MUBFMR_GET(he_cap_phy) WMI_GET_BITS(he_cap_phy[2], 3, 1)
+#define WMI_HECAP_PHY_MUBFMR_SET(he_cap_phy, value) WMI_SET_BITS(he_cap_phy[2], 3, 1, value)
+
+#define WMI_HECAP_PHY_40MHZNSS_GET(he_cap_phy) WMI_GET_BITS(he_cap_phy[2], 4, 18)
+#define WMI_HECAP_PHY_40MHZNSS_SET(he_cap_phy, value) WMI_SET_BITS(he_cap_phy[2], 4, 18, value)
+
+#define WMI_HECAP_MAC_MTID_GET(he_cap) WMI_GET_BITS(he_cap, 0, 3)
+#define WMI_HECAP_MAC_MTID_SET(he_cap, value) WMI_SET_BITS(he_cap, 0, 3, value)
+
+#define WMI_HECAP_MAC_AACK_GET(he_cap) WMI_GET_BITS(he_cap, 3, 1)
+#define WMI_HECAP_MAC_AACK_SET(he_cap, value) WMI_SET_BITS(he_cap, 3, 1, value)
+
+#define WMI_HECAP_MAC_MINFRAGSZ_GET(he_cap) WMI_GET_BITS(he_cap, 4, 2)
+#define WMI_HECAP_MAC_MINFRAGSZ_SET(he_cap, value) WMI_SET_BITS(he_cap, 4, 2, value)
+
+#define WMI_HECAP_MAC_HEFRAG_GET(he_cap) WMI_GET_BITS(he_cap, 6, 2)
+#define WMI_HECAP_MAC_HEFRAG_SET(he_cap, value) WMI_SET_BITS(he_cap, 6, 2, value)
+
+#define WMI_HECAP_MAC_MURTS_GET(he_cap) WMI_GET_BITS(he_cap, 8, 1)
+#define WMI_HECAP_MAC_MURTS_SET(he_cap, value) WMI_SET_BITS(he_cap, 8, 1, value)
+
+#define WMI_HECAP_MAC_OMI_GET(he_cap) WMI_GET_BITS(he_cap, 9, 1)
+#define WMI_HECAP_MAC_OMI_SET(he_cap, value) WMI_SET_BITS(he_cap, 9, 1, value)
+
+#define WMI_HECAP_MAC_HECTRL_GET(he_cap) WMI_GET_BITS(he_cap, 10, 1)
+#define WMI_HECAP_MAC_HECTRL_SET(he_cap, value) WMI_SET_BITS(he_cap, 10, 1, value)
+
+#define WMI_HECAP_MAC_MBAHECTRL_GET(he_cap) WMI_GET_BITS(he_cap, 11, 1)
+#define WMI_HECAP_MAC_MBAHECTRL_SET(he_cap, value) WMI_SET_BITS(he_cap, 11, 1, value)
+
+#define WMI_HECAP_MAC_ULMURSP_GET(he_cap) WMI_GET_BITS(he_cap, 12, 1)
+#define WMI_HECAP_MAC_ULMURSP_SET(he_cap, value) WMI_SET_BITS(he_cap, 12, 1, value)
+
+#define WMI_HECAP_MAC_HELKAD_GET(he_cap) WMI_GET_BITS(he_cap, 13, 2)
+#define WMI_HECAP_MAC_HELKAD_SET(he_cap, value) WMI_SET_BITS(he_cap, 13, 2, value)
+
+#define WMI_HECAP_MAC_BSR_GET(he_cap) WMI_GET_BITS(he_cap, 15, 1)
+#define WMI_HECAP_MAC_BSR_SET(he_cap, value) WMI_SET_BITS(he_cap, 15, 1, value)
+
+#define WMI_HECAP_MAC_TWTREQ_GET(he_cap) WMI_GET_BITS(he_cap, 16, 1)
+#define WMI_HECAP_MAC_TWTREQ_SET(he_cap, value) WMI_SET_BITS(he_cap, 16, 1, value)
+
+#define WMI_HECAP_MAC_TWTRSP_GET(he_cap) WMI_GET_BITS(he_cap, 17, 1)
+#define WMI_HECAP_MAC_TWTRSP_SET(he_cap, value) WMI_SET_BITS(he_cap, 17, 1, value)
+
+#define WMI_HECAP_MAC_BCSTTWT_GET(he_cap) WMI_GET_BITS(he_cap, 18, 1)
+#define WMI_HECAP_MAC_BCSTTWT_SET(he_cap, value) WMI_SET_BITS(he_cap, 18, 1, value)
+
+#define WMI_HECAP_MAC_MBSS_GET(he_cap) WMI_GET_BITS(he_cap, 19, 1)
+#define WMI_HECAP_MAC_MBSS_SET(he_cap, value) WMI_SET_BITS(he_cap, 19, 1, value)
+
+#define WMI_HECAP_MAC_TRIGPADDUR_GET(he_cap) WMI_GET_BITS(he_cap, 20, 2)
+#define WMI_HECAP_MAC_TRIGPADDUR_SET(he_cap, value) WMI_SET_BITS(he_cap, 20, 2, value)
+
+#define WMI_HECAP_MAC_MAXFRAGMSDU_GET(he_cap) WMI_GET_BITS(he_cap, 22, 3)
+#define WMI_HECAP_MAC_MAXFRAGMSDU_SET(he_cap, value) WMI_SET_BITS(he_cap, 22, 3, value)
+
+#define WMI_HECAP_MAC_32BITBA_GET(he_cap) WMI_GET_BITS(he_cap, 25, 1)
+#define WMI_HECAP_MAC_32BITBA_SET(he_cap, value) WMI_SET_BITS(he_cap, 25, 1, value)
+
+#define WMI_HECAP_MAC_MUCASCADE_GET(he_cap) WMI_GET_BITS(he_cap, 26, 1)
+#define WMI_HECAP_MAC_MUCASCADE_SET(he_cap, value) WMI_SET_BITS(he_cap, 26, 1, value)
+
+#define WMI_HECAP_MAC_ACKMTIDAMPDU_GET(he_cap) WMI_GET_BITS(he_cap, 27, 1)
+#define WMI_HECAP_MAC_ACKMTIDAMPDU_SET(he_cap, value) WMI_SET_BITS(he_cap, 27, 1, value)
+
+#define WMI_HECAP_MAC_GROUPMSTABA_GET(he_cap) WMI_GET_BITS(he_cap, 28, 1)
+#define WMI_HECAP_MAC_GROUPMSTABA_SET(he_cap, value) WMI_SET_BITS(he_cap, 28, 1, value)
+
+#define WMI_HECAP_MAC_OFDMARA_GET(he_cap) WMI_GET_BITS(he_cap, 29, 1)
+#define WMI_HECAP_MAC_OFDMARA_SET(he_cap, value) WMI_SET_BITS(he_cap, 29, 1, value)
+
+#define WMI_GET_HW_RATECODE_PREAM_V1(_rcode) (((_rcode) >> 8) & 0x7)
+#define WMI_GET_HW_RATECODE_NSS_V1(_rcode) (((_rcode) >> 5) & 0x7)
+#define WMI_GET_HW_RATECODE_RATE_V1(_rcode) (((_rcode) >> 0) & 0x1F)
+#define WMI_ASSEMBLE_RATECODE_V1(_rate, _nss, _pream) \
+ (((1) << 28) | ((_pream) << 8) | ((_nss) << 5) | (_rate))
+
typedef struct {
A_UINT32 tlv_header; /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_vdev_start_request_cmd_fixed_param */
/** unique id identifying the VDEV, generated by the caller */
@@ -4811,7 +5877,9 @@
/** This field will be invalid unless the Dual Band Simultaneous (DBS) feature is enabled. */
/** the DBS policy manager indicates the preferred number of receive streams. */
A_UINT32 preferred_rx_streams;
-
+ A_UINT32 he_ops; /* refer to WMI_HEOPS_xxx macros */
+ A_UINT32 cac_duration_ms; /* in milliseconds */
+ A_UINT32 regdomain;
/* The TLVs follows this structure:
* wmi_channel chan; //WMI channel
* wmi_p2p_noa_descriptor noa_descriptors[]; //actual p2p NOA descriptor from scan entry
@@ -4924,6 +5992,7 @@
WMI_RATE_PREAMBLE_CCK,
WMI_RATE_PREAMBLE_HT,
WMI_RATE_PREAMBLE_VHT,
+ WMI_RATE_PREAMBLE_HE,
} WMI_RATE_PREAMBLE;
/** Value to disable fixed rate setting */
@@ -4983,7 +6052,18 @@
WMI_VDEV_PARAM_MGMT_RATE,
/** Protection Mode */
WMI_VDEV_PARAM_PROTECTION_MODE,
- /** Fixed rate setting */
+ /** Fixed rate setting
+ * The top nibble is used to select which format to use for encoding
+ * the rate specification: 0xVXXXXXXX
+ * If V == 0b0000: format is same as before: 0x000000RR
+ * If V == 0b0001: format is: 0x1000RRRR.
+ * This will be output of WMI_ASSEMBLE_RATECODE_V1
+ * The host shall use the new V1 format (and set V = 0x1) if the target
+ * indicates 802.11ax support via the WMI_SERVICE_11AX flag, or if the
+ * system is configured with Nss > 4 (either at compile time within the
+ * host driver, or through WMI_SERVICE_READY PHY capabilities provided
+ * by the target).
+ */
WMI_VDEV_PARAM_FIXED_RATE,
/** Short GI Enable/Disable */
WMI_VDEV_PARAM_SGI,
@@ -5283,6 +6363,25 @@
*/
WMI_VDEV_PARAM_AGG_SW_RETRY_TH,
+ /** disable dynamic bw RTS **/
+ WMI_VDEV_PARAM_DISABLE_DYN_BW_RTS,
+
+ /**
+ * Per ssid (vdev) based ATF strict/fair scheduling policy
+ * Param values are WMI_ATF_SSID_FAIR_SCHED or
+ * WMI_ATF_SSID_STRICT_SCHED
+ */
+ WMI_VDEV_PARAM_ATF_SSID_SCHED_POLICY,
+
+ /** Enable or disable Dual carrier modulation
+ * valid values: 0-Disable DCM, 1-Enable DCM.
+ */
+ WMI_VDEV_PARAM_HE_DCM,
+ /** Enable or disable Extended range
+ * valid values: 0-Disable ER, 1-Enable ER.
+ */
+ WMI_VDEV_PARAM_HE_RANGE_EXT,
+
/*
* === ADD NEW VDEV PARAM TYPES ABOVE THIS LINE ===
* The below vdev param types are used for prototyping, and are
@@ -5291,24 +6390,20 @@
WMI_VDEV_PARAM_PROTOTYPE = 0x8000,
/* 11AX SPECIFIC defines */
WMI_VDEV_PARAM_BSS_COLOR,
- /* In case of AP this will enable / disable MU-MIMO mode */
- WMI_VDEV_PARAM_SET_UL_MU_MIMO,
/*
- * set fragmentation level of the vdev's peers.
- * Values can be WMI_HE_FRAG_SUPPORT_LEVEL0..WMI_HE_FRAG_SUPPORT_LEVEL3
- */
- WMI_VDEV_PARAM_SET_FRAG_LEVEL,
- /*
- * control different features of HEControl:
- * Bit 0:- 1/0-> Enable/Disable transmssion of UL scheduling.
- * Bit 1:- 1/0-> Enable / disable honoring of ROMI from a peer.
- * Applicable in AP mode only.
- */
- WMI_VDEV_PARAM_SET_HECONTROL,
- /*
- * enable / disable trigger access for a AP vdev's peers.
+ * Enable / disable trigger access for a AP vdev's peers.
* For a STA mode vdev this will enable/disable triggered access
* and enable/disable Multi User mode of operation.
+ * 0 - Disable MU OFDMA and MU MIMO
+ * 1 - Disable DL OFDMA
+ * 2 - Disable DL MUMIMO
+ * 3 - Disable UL OFDMA
+ * 4 - Disable UL MUMIMO
+ * 5 - Enable MU OFDMA and MU MIMO
+ * 6 - Enable DL OFDMA
+ * 7 - Enable DL MUMIMO
+ * 8 - Enable UL OFDMA
+ * 9 - Enable UL MUMIMO
*/
WMI_VDEV_PARAM_SET_HEMU_MODE,
/*
@@ -6008,13 +7103,17 @@
typedef struct {
A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_tim_info */
- /** TIM bitmap len (in bytes)*/
+ /** TIM bitmap len (in bytes) */
A_UINT32 tim_len;
/** TIM Partial Virtual Bitmap */
A_UINT32 tim_mcast;
A_UINT32 tim_bitmap[WMI_TIM_BITMAP_ARRAY_SIZE];
A_UINT32 tim_changed;
A_UINT32 tim_num_ps_pending;
+ /** Use the vdev_id only if vdev_id_valid is set */
+ A_UINT32 vdev_id_valid;
+ /** unique id identifying the VDEV */
+ A_UINT32 vdev_id;
} wmi_tim_info;
typedef struct {
@@ -6044,6 +7143,10 @@
*/
A_UINT32 noa_attributes;
wmi_p2p_noa_descriptor noa_descriptors[WMI_P2P_MAX_NOA_DESCRIPTORS];
+ /** Use the vdev_id only if vdev_id_valid is set */
+ A_UINT32 vdev_id_valid;
+ /** unique id identifying the VDEV */
+ A_UINT32 vdev_id;
}wmi_p2p_noa_info;
#define WMI_UNIFIED_NOA_ATTR_MODIFIED 0x1
@@ -6108,6 +7211,11 @@
A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_host_swba_event_fixed_param */
/** bitmap identifying the VDEVs, generated by the caller */
A_UINT32 vdev_map;
+ /** how many vdev's info is included in this message
+ * If this field is zero, then the number of vdevs is specified by
+ * the number of bits set in the vdev_map bitmap.
+ */
+ A_UINT32 num_vdevs;
/* This TLV is followed by tim_info and p2p_noa_info for each vdev in vdevmap :
* wmi_tim_info tim_info[];
* wmi_p2p_noa_info p2p_noa_info[];
@@ -6128,6 +7236,28 @@
*/
} wmi_tbtt_offset_event_fixed_param;
+ typedef struct {
+ /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_tbtt_offset_info */
+ A_UINT32 tlv_header;
+ /** unique id identifying the VDEV */
+ A_UINT32 vdev_id;
+ /** tbttoffset in TUs */
+ A_UINT32 tbttoffset;
+ } wmi_tbtt_offset_info;
+
+ /** Use this event if number of vdevs > 32 */
+ typedef struct {
+ /*
+ * TLV tag and len;
+ * tag equals WMITLV_TAG_STRUC_wmi_tbtt_offset_ext_event_fixed_param
+ */
+ A_UINT32 tlv_header;
+ A_UINT32 num_vdevs;
+ /*
+ * The TLVs for tbttoffset will follow this TLV.
+ * Of size num_vdevs * wmi_tbtt_offset_info
+ */
+ } wmi_tbtt_offset_ext_event_fixed_param;
/* Peer Specific commands and events */
@@ -6204,6 +7334,23 @@
wmi_mac_addr peer_macaddr;
} wmi_peer_delete_cmd_fixed_param;
+typedef struct {
+ /**
+ * TLV tag and len; tag equals
+ * WMITLV_TAG_STRUC_wmi_peer_set_rx_blocksize_cmd_fixed_param
+ */
+ A_UINT32 tlv_header;
+ /** unique id identifying the VDEV, generated by the caller */
+ A_UINT32 vdev_id;
+ /** peer MAC address */
+ wmi_mac_addr peer_macaddr;
+ /**
+ * maximum block ack window size to use during a rx block ack negotiation,
+ * i.e. the maximum number of MPDUs per A-MPDU that will be received
+ */
+ A_UINT32 rx_block_ack_win_limit;
+} wmi_peer_set_rx_blocksize_cmd_fixed_param;
+
typedef struct {
A_UINT32 tlv_header; /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_peer_flush_tids_cmd_fixed_param */
/** unique id identifying the VDEV, generated by the caller */
@@ -6430,7 +7577,18 @@
#define WMI_PEER_PHYMODE 0xD
/** Use FIXED Pwr */
#define WMI_PEER_USE_FIXED_PWR 0xE
-/** Set peer fixed rate */
+/** Set peer fixed rate
+ * The top nibble is used to select which format to use for encoding
+ * the rate specification: 0xVXXXXXXX
+ * If V == 0b0000: format is same as before: 0x000000RR
+ * If V == 0b0001: format is: 0x1000RRRR.
+ * This will be output of WMI_ASSEMBLE_RATECODE_V1
+ * The host shall use the new V1 format (and set V = 0x1) if the target
+ * indicates 802.11ax support via the WMI_SERVICE_11AX flag, or if the
+ * system is configured with Nss > 4 (either at compile time within the
+ * host driver, or through WMI_SERVICE_READY PHY capabilities provided
+ * by the target).
+ */
#define WMI_PEER_PARAM_FIXED_RATE 0xF
/** Whitelist peer TIDs */
#define WMI_PEER_SET_MU_WHITELIST 0x10
@@ -6438,6 +7596,14 @@
#define WMI_PEER_SET_MAX_TX_RATE 0x11
/** Set peer minimal tx rate (MCS) in adaptive rate ctrl */
#define WMI_PEER_SET_MIN_TX_RATE 0x12
+/**
+ * default ring routing for peer data packets,
+ * param_value = bit 0 for hash based routing enabled or not
+ * (value 1 is enabled, value 0 is disabled)
+ * bits 1:5 are for ring 32 (i.e. ring id value
+ * selected from 0 to 31 values)
+ */
+#define WMI_PEER_SET_DEFAULT_ROUTING 0x13
/** mimo ps values for the parameter WMI_PEER_MIMO_PS_STATE */
#define WMI_PEER_MIMO_PS_NONE 0x0
@@ -6481,26 +7647,42 @@
#define WMI_PEER_MAX_MIN_TX_RATE_SET(value32, tx_mode) WMI_SET_BITS(value32, 16, 16, tx_mode)
/* CCK max/min tx Rate description
- * tx_rate = 0: 1Mbps,
- * tx_rate = 1: 2Mbps
- * tx_rate = 2: 5.5Mbps
- * tx_rate = 3: 11Mbps
- * tx_rate = else : invalid.
+ * tx_rate = 0: 1 Mbps
+ * tx_rate = 1: 2 Mbps
+ * tx_rate = 2: 5.5 Mbps
+ * tx_rate = 3: 11 Mbps
+ * tx_rate = else: invalid
*/
-#define WMI_MAX_CCK_TX_RATE 0x03
+enum {
+ WMI_MAX_CCK_TX_RATE_1M, /* up to 1M CCK Rate avaliable */
+ WMI_MAX_CCK_TX_RATE_2M, /* up to 2M CCK Rate avaliable */
+ WMI_MAX_CCK_TX_RATE_5_5M, /* up to 5.5M CCK Rate avaliable */
+ WMI_MAX_CCK_TX_RATE_11M, /* up to 11M CCK Rate avaliable */
+ WMI_MAX_CCK_TX_RATE = WMI_MAX_CCK_TX_RATE_11M,
+};
/* OFDM max/min tx Rate description
- * tx_rate = 0: 6Mbps,
- * tx_rate = 1: 9Mbps
- * tx_rate = 2: 12Mbps
- * tx_rate = 3: 18Mbps
- * tx_rate = 4: 24Mbps
- * tx_rate = 5: 32Mbps
- * tx_rate = 6: 48Mbps
- * tx_rate = 7: 54Mbps
- * tx_rate = else : invalid.
+ * tx_rate = 0: 6 Mbps
+ * tx_rate = 1: 9 Mbps
+ * tx_rate = 2: 12 Mbps
+ * tx_rate = 3: 18 Mbps
+ * tx_rate = 4: 24 Mbps
+ * tx_rate = 5: 32 Mbps
+ * tx_rate = 6: 48 Mbps
+ * tx_rate = 7: 54 Mbps
+ * tx_rate = else: invalid
*/
-#define WMI_MAX_OFDM_TX_RATE 0x07
+enum {
+ WMI_MAX_OFDM_TX_RATE_6M, /* up to 6M OFDM Rate avaliable */
+ WMI_MAX_OFDM_TX_RATE_9M, /* up to 9M OFDM Rate avaliable */
+ WMI_MAX_OFDM_TX_RATE_12M, /* up to 12M OFDM Rate avaliable */
+ WMI_MAX_OFDM_TX_RATE_18M, /* up to 18M OFDM Rate avaliable */
+ WMI_MAX_OFDM_TX_RATE_24M, /* up to 24M OFDM Rate avaliable */
+ WMI_MAX_OFDM_TX_RATE_36M, /* up to 36M OFDM Rate avaliable */
+ WMI_MAX_OFDM_TX_RATE_48M, /* up to 48M OFDM Rate avaliable */
+ WMI_MAX_OFDM_TX_RATE_54M, /* up to 54M OFDM Rate avaliable */
+ WMI_MAX_OFDM_TX_RATE = WMI_MAX_OFDM_TX_RATE_54M,
+};
/* HT max/min tx rate description
* tx_rate = 0~7 : MCS Rate 0~7
@@ -6549,6 +7731,7 @@
#define WMI_PEER_QOS 0x00000002 /* QoS enabled */
#define WMI_PEER_NEED_PTK_4_WAY 0x00000004 /* Needs PTK 4 way handshake for authorization */
#define WMI_PEER_NEED_GTK_2_WAY 0x00000010 /* Needs GTK 2 way handshake after 4-way handshake */
+#define WMI_PEER_HE 0x00000400 /* HE Enabled */
#define WMI_PEER_APSD 0x00000800 /* U-APSD power save enabled */
#define WMI_PEER_HT 0x00001000 /* HT enabled */
#define WMI_PEER_40MHZ 0x00002000 /* 40MHz enabld */
@@ -6649,8 +7832,10 @@
wmi_ppe_threshold peer_ppet;
A_UINT32 peer_he_cap_info; /* protocol-defined HE / 11ax capability flags */
A_UINT32 peer_he_ops; /* HE operation contains BSS color */
+ A_UINT32 peer_he_cap_phy[WMI_MAX_HECAP_PHY_SIZE];
+ A_UINT32 peer_he_mcs; /* HE MCS/NSS set */
- /* Following this struc are the TLV's:
+ /* Following this struct are the TLV's:
* A_UINT8 peer_legacy_rates[];
* A_UINT8 peer_ht_rates[];
* wmi_vht_rate_set peer_vht_rates; //VHT capabilties of the peer
@@ -6671,6 +7856,9 @@
A_UINT32 vdev_id;
} wmi_peer_add_wds_entry_cmd_fixed_param;
+#define WMI_CHAN_INFO_START_RESP 0
+#define WMI_CHAN_INFO_END_RESP 1
+
typedef struct {
A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_peer_remove_wds_entry_cmd_fixed_param */
/** wds MAC addr */
@@ -6731,6 +7919,10 @@
A_UINT32 my_bss_rx_cycle_count;
/** b-mode data rx time (units are microseconds) */
A_UINT32 rx_11b_mode_data_duration;
+ /** tx frame count */
+ A_UINT32 tx_frame_cnt;
+ /** mac clock */
+ A_UINT32 mac_clk_mhz;
} wmi_chan_info_event_fixed_param;
/**
@@ -7395,7 +8587,9 @@
#define WMI_ROAM_INVOKE_FLAG_ADD_CH_TO_CACHE 0
/* indicate to host of failure if WMI_ROAM_INVOKE_CMDID. */
#define WMI_ROAM_INVOKE_FLAG_REPORT_FAILURE 1
-/* from bit 2 to bit 31 are reserved */
+/* during host-invoked roaming, don't send null data frame to AP */
+#define WMI_ROAM_INVOKE_FLAG_NO_NULL_FRAME_TO_AP 2
+/* from bit 3 to bit 31 are reserved */
#define WMI_SET_ROAM_INVOKE_ADD_CH_TO_CACHE(flag) do { \
(flag) |= (1 << WMI_SET_ROAM_INVOKE_ADD_CH_TO_CACHE); \
@@ -7749,6 +8943,21 @@
A_UINT32 reason;/* refer to p2p_lo_stopped_reason_e */
} wmi_p2p_lo_stopped_event_fixed_param;
+typedef enum {
+ WMI_MNT_FILTER_CONFIG_MANAGER,
+ WMI_MNT_FILTER_CONFIG_CONTROL,
+ WMI_MNT_FILTER_CONFIG_DATA,
+ WMI_MNT_FILTER_CONFIG_ALL,
+ WMI_MNT_FILTER_CONFIG_UNKNOWN,
+} WMI_MNT_FILTER_CONFIG_TYPE;
+
+typedef struct {
+ A_UINT32 tlv_header;
+ A_UINT32 vdev_id;
+ A_UINT32 clear_or_set;
+ A_UINT32 configure_type; /* see WMI_MNT_FILTER_CONFIG_TYPE */
+} wmi_mnt_filter_cmd_fixed_param;
+
typedef struct {
A_UINT32 time32; //upper 32 bits of time stamp
A_UINT32 time0; //lower 32 bits of time stamp
@@ -7950,6 +9159,7 @@
WOW_REASON_TDLS_CONN_TRACKER_EVENT,
WOW_REASON_CRITICAL_LOG,
WOW_REASON_P2P_LISTEN_OFFLOAD,
+ WOW_REASON_NAN_EVENT_WAKE_HOST,
WOW_REASON_DEBUG_TEST = 0xFF,
} WOW_WAKE_REASON_TYPE;
@@ -7967,6 +9177,10 @@
* to request it.
*/
WMI_WOW_FLAG_SEND_PM_PME = 0x00000002,
+ /* Flag to indicate unit test */
+ WMI_WOW_FLAG_UNIT_TEST_ENABLE = 0x00000004,
+ /* Force HTC wakeup */
+ WMI_WOW_FLAG_DO_HTC_WAKEUP = 0x00000008,
};
typedef struct {
@@ -8315,6 +9529,7 @@
A_UINT32 swol_indoor_pattern; /* wakeup pattern */
A_UINT32 swol_indoor_exception; /* wakeup when exception happens */
A_UINT32 swol_indoor_exception_app;
+ A_UINT32 swol_assist_enable; /* whether to enable IoT mode */
} wmi_extwow_set_app_type1_params_cmd_fixed_param;
typedef struct {
@@ -8682,16 +9897,20 @@
#define WMI_NLO_MAX_SSIDS 16
#define WMI_NLO_MAX_CHAN 48
-#define WMI_NLO_CONFIG_STOP (0x1 << 0)
-#define WMI_NLO_CONFIG_START (0x1 << 1)
-#define WMI_NLO_CONFIG_RESET (0x1 << 2)
-#define WMI_NLO_CONFIG_SLOW_SCAN (0x1 << 4)
-#define WMI_NLO_CONFIG_FAST_SCAN (0x1 << 5)
-#define WMI_NLO_CONFIG_SSID_HIDE_EN (0x1 << 6)
+#define WMI_NLO_CONFIG_STOP (0x1 << 0)
+#define WMI_NLO_CONFIG_START (0x1 << 1)
+#define WMI_NLO_CONFIG_RESET (0x1 << 2)
+#define WMI_NLO_CONFIG_SLOW_SCAN (0x1 << 4)
+#define WMI_NLO_CONFIG_FAST_SCAN (0x1 << 5)
+#define WMI_NLO_CONFIG_SSID_HIDE_EN (0x1 << 6)
/* This bit is used to indicate if EPNO or supplicant PNO is enabled. Only one of them can be enabled at a given time */
-#define WMI_NLO_CONFIG_ENLO (0x1 << 7)
-#define WMI_NLO_CONFIG_SCAN_PASSIVE (0x1 << 8)
-#define WMI_NLO_CONFIG_ENLO_RESET (0x1 << 9)
+#define WMI_NLO_CONFIG_ENLO (0x1 << 7)
+#define WMI_NLO_CONFIG_SCAN_PASSIVE (0x1 << 8)
+#define WMI_NLO_CONFIG_ENLO_RESET (0x1 << 9)
+#define WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ (0x1 << 10)
+#define WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ (0x1 << 11)
+#define WMI_NLO_CONFIG_ENABLE_IE_WHITELIST_IN_PROBE_REQ (0x1 << 12)
+#define WMI_NLO_CONFIG_ENABLE_CNLO_RSSI_CONFIG (0x1 << 13)
/* Whether directed scan needs to be performed (for hidden SSIDs) */
#define WMI_ENLO_FLAG_DIRECTED_SCAN 1
@@ -8794,6 +10013,16 @@
A_UINT32 band5GHz_bonus; /* 5GHz RSSI score bonus (applied to all 5GHz networks) */
} enlo_candidate_score_params;
+typedef struct connected_nlo_rssi_params_t {
+ A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_connected_nlo_rssi_params */
+ /* Relative rssi threshold (in dB) by which new BSS should have better rssi than
+ * the current connected BSS.
+ */
+ A_INT32 relative_rssi;
+ /* The amount of rssi preference (in dB) that can be given to a 5G BSS over 2.4G BSS. */
+ A_INT32 relative_rssi_5g_pref;
+} connected_nlo_rssi_params;
+
typedef struct wmi_nlo_config {
A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_nlo_config_cmd_fixed_param */
A_UINT32 flags;
@@ -8810,11 +10039,21 @@
A_UINT32 no_of_ssids;
A_UINT32 num_of_channels;
A_UINT32 delay_start_time; /* NLO scan start delay time in milliseconds */
+ /** MAC Address to use in Probe Req as SA **/
+ wmi_mac_addr mac_addr;
+ /** Mask on which MAC has to be randomized **/
+ wmi_mac_addr mac_mask;
+ /** IE bitmap to use in Probe Req **/
+ A_UINT32 ie_bitmap[WMI_IE_BITMAP_SIZE];
+ /** Number of vendor OUIs. In the TLV vendor_oui[] **/
+ A_UINT32 num_vendor_oui;
/* The TLVs will follow.
* nlo_configured_parameters nlo_list[];
* A_UINT32 channel_list[];
* nlo_channel_prediction_cfg ch_prediction_cfg;
* enlo_candidate_score_params candidate_score_params;
+ * wmi_vendor_oui vendor_oui[];
+ * connected_nlo_rssi_params cnlo_rssi_params;
*/
} wmi_nlo_config_cmd_fixed_param;
@@ -9194,8 +10433,12 @@
A_UINT32 type; /*0:unused 1: ASSERT, 2: not respond detect command,3: simulate ep-full(),4:...*/
A_UINT32 delay_time_ms; /*0xffffffff means the simulate will delay for random time (0 ~0xffffffff ms)*/
}WMI_FORCE_FW_HANG_CMD_fixed_param;
-#define WMI_MCAST_FILTER_SET 1
-#define WMI_MCAST_FILTER_DELETE 2
+
+typedef enum {
+ WMI_MCAST_FILTER_SET = 1,
+ WMI_MCAST_FILTER_DELETE
+} WMI_SET_SINGLE_MCAST_FILTER_OP;
+
typedef struct {
A_UINT32 tlv_header;
A_UINT32 vdev_id;
@@ -9204,6 +10447,28 @@
wmi_mac_addr mcastbdcastaddr;
} WMI_SET_MCASTBCAST_FILTER_CMD_fixed_param;
+typedef enum {
+ WMI_MULTIPLE_MCAST_FILTER_CLEAR = 1, /* clear all previous mc list */
+ /* clear all previous mc list, and set new list */
+ WMI_MULTIPLE_MCAST_FILTER_SET,
+ WMI_MULTIPLE_MCAST_FILTER_DELETE, /* delete one/multiple mc list */
+ WMI_MULTIPLE_MCAST_FILTER_ADD /* add one/multiple mc list */
+} WMI_MULTIPLE_MCAST_FILTER_OP;
+
+typedef struct {
+ A_UINT32 tlv_header;
+ A_UINT32 vdev_id;
+ A_UINT32 operation; /* refer WMI_MULTIPLE_MCAST_FILTER_OP */
+ /* number of elements in the subsequent mcast addr list */
+ A_UINT32 num_mcastaddrs;
+ /**
+ * TLV (tag length value) parameters follow the
+ * structure. The TLV's are:
+ * wmi_mac_addr mcastaddr_list[num_mcastaddrs];
+ */
+} WMI_SET_MULTIPLE_MCAST_FILTER_CMD_fixed_param;
+
+
/* WMI_DBGLOG_TIME_STAMP_SYNC_CMDID */
typedef enum {
WMI_TIME_STAMP_SYNC_MODE_MS, /* millisecond units */
@@ -9450,6 +10715,74 @@
A_UINT32 pdev_id;
} wmi_dfs_phyerr_filter_dis_cmd_fixed_param;
+typedef struct {
+ A_UINT32 tlv_header;
+ A_UINT32 pdev_id;
+} wmi_pdev_dfs_phyerr_offload_enable_cmd_fixed_param;
+
+typedef struct {
+ A_UINT32 tlv_header;
+ A_UINT32 pdev_id;
+} wmi_pdev_dfs_phyerr_offload_disable_cmd_fixed_param;
+
+typedef enum {
+ QUICK_OCAC = 0,
+ EXTENSIVE_OCAC,
+} WMI_ADFS_OCAC_MODE;
+
+typedef struct {
+ A_UINT32 tlv_header;
+ A_UINT32 vdev_id;
+ A_UINT32 ocac_mode; /* WMI_ADFS_OCAC_MODE */
+ A_UINT32 min_duration_ms; /* in milliseconds */
+ A_UINT32 max_duration_ms; /* in milliseconds */
+ A_UINT32 chan_freq; /* in MHz */
+ A_UINT32 chan_width; /* in MHz */
+ A_UINT32 center_freq; /* in MHz */
+} wmi_vdev_adfs_ch_cfg_cmd_fixed_param;
+
+typedef struct {
+ A_UINT32 tlv_header;
+ A_UINT32 vdev_id;
+} wmi_vdev_adfs_ocac_abort_cmd_fixed_param;
+
+typedef enum {
+ IN_SERVICE_MODE = 0,
+ OCAC_MODE,
+} WMI_DFS_RADAR_DETECTION_MODE;
+
+typedef struct {
+ A_UINT32 tlv_header;
+ A_UINT32 pdev_id;
+ /* In-service mode or O-CAC mode */
+ A_UINT32 detection_mode; /* WMI_DFS_RADAR_DETECTION_MODE */
+ A_UINT32 chan_freq; /* in MHz */
+ A_UINT32 chan_width; /* in MHz */
+ A_UINT32 detector_id;
+ A_UINT32 segment_id;
+ A_UINT32 timestamp;
+ A_UINT32 is_chirp;
+} wmi_pdev_dfs_radar_detection_event_fixed_param;
+
+typedef enum {
+ OCAC_COMPLETE = 0,
+ OCAC_ABORT,
+} WMI_VDEV_OCAC_COMPLETE_STATUS;
+
+typedef struct {
+ A_UINT32 tlv_header;
+ A_UINT32 vdev_id;
+ A_UINT32 chan_freq; /* in MHz */
+ A_UINT32 chan_width; /* in MHz */
+ A_UINT32 center_freq; /* in MHz */
+ A_UINT32 status; /* WMI_VDEV_OCAC_COMPLETE_STATUS */
+} wmi_vdev_adfs_ocac_complete_event_fixed_param;
+
+typedef struct {
+ A_UINT32 tlv_header;
+ A_UINT32 vdev_id;
+} wmi_vdev_dfs_cac_complete_event_fixed_param;
+
/** TDLS COMMANDS */
/* WMI_TDLS_SET_STATE_CMDID */
@@ -9739,6 +11072,16 @@
WMI_TDLS_ENTER_BT_BUSY_MODE,
/** BT exited busy mode, TDLS connection tracker needs to handle this */
WMI_TDLS_EXIT_BT_BUSY_MODE,
+ /*
+ * TDLS module received a scan start event, TDLS connection tracker
+ * needs to handle this
+ */
+ WMI_TDLS_SCAN_STARTED_EVENT,
+ /*
+ * TDLS module received a scan complete event, TDLS connection tracker
+ * needs to handle this
+ */
+ WMI_TDLS_SCAN_COMPLETED_EVENT,
};
/* WMI_TDLS_PEER_EVENTID */
@@ -10371,6 +11714,69 @@
*/
} wmi_peer_info_event_fixed_param;
+/**
+ * WMI_PEER_ANTDIV_INFO_REQ_CMDID
+ * Request FW to provide peer info
+ */
+typedef struct {
+ /**
+ * TLV tag and len; tag equals
+ * WMITLV_TAG_STRUC_wmi_peer_antdiv_info_req_cmd_fixed_param
+ */
+ A_UINT32 tlv_header;
+ /**
+ * In order to get the peer antdiv info for a single peer, host shall
+ * issue the peer_mac_address of that peer. For getting the
+ * info all peers, the host shall issue 0xFFFFFFFF as the mac
+ * address. The firmware will return the peer info for all the
+ * peers on the specified vdev_id
+ */
+ wmi_mac_addr peer_mac_address;
+ /** vdev id */
+ A_UINT32 vdev_id;
+} wmi_peer_antdiv_info_req_cmd_fixed_param;
+
+/** FW response with the peer antdiv info */
+typedef struct {
+ /** TLV tag and len; tag equals
+ * WMITLV_TAG_STRUC_wmi_peer_antdiv_info_event_fixed_param
+ */
+ A_UINT32 tlv_header;
+ /** number of peers in peer_info */
+ A_UINT32 num_peers;
+ /** VDEV to which the peer belongs to */
+ A_UINT32 vdev_id;
+ /**
+ * This TLV is followed by another TLV of array of structs
+ * wmi_peer_antdiv_info peer_antdiv_info[];
+ */
+} wmi_peer_antdiv_info_event_fixed_param;
+
+typedef struct {
+ /**
+ * TLV tag and len; tag equals
+ * WMITLV_TAG_STRUC_wmi_peer_antdiv_info
+ */
+ A_UINT32 tlv_header;
+ /** mac addr of the peer */
+ wmi_mac_addr peer_mac_address;
+ /**
+ * per chain rssi of the peer, for up to 8 chains.
+ * Each chain's entry reports the RSSI for different bandwidths:
+ * bits 7:0 -> primary 20 MHz
+ * bits 15:8 -> secondary 20 MHz of 40 MHz channel (if applicable)
+ * bits 23:16 -> secondary 40 MHz of 80 MHz channel (if applicable)
+ * bits 31:24 -> secondary 80 MHz of 160 MHz channel (if applicable)
+ * Each of these 8-bit RSSI reports is in dB units, with respect to
+ * the noise floor.
+ * 0x80 means invalid.
+ * All unused bytes within used chain_rssi indices shall be set
+ * to 0x80.
+ * All unused chain_rssi indices shall be set to 0x80808080.
+ */
+ A_INT32 chain_rssi[8];
+} wmi_peer_antdiv_info;
+
/** FW response when tx failure count has reached threshold
* for a peer */
typedef struct {
@@ -10879,6 +12285,17 @@
#define wmi_ndp_rsp_code wmi_ndp_rsp_code_PROTOTYPE
/**
+* NDP Channel configuration type
+*/
+typedef enum {
+ WMI_NDP_CHANNEL_NOT_REQUESTED = 0, /* Channel will not configured */
+ WMI_NDP_REQUEST_CHANNEL_SETUP = 1, /* Channel will be provided and is optional/hint */
+ WMI_NDP_FORCE_CHANNEL_SETUP = 2/* NDP must start on the provided channel */
+} wmi_ndp_channel_cfg_PROTOTYPE;
+
+#define wmi_ndp_channel_cfg wmi_ndp_channel_cfg_PROTOTYPE
+
+/**
* NDP Initiator requesting a data session
*/
typedef struct {
@@ -10896,12 +12313,19 @@
A_UINT32 ndp_cfg_len;
/** Actual number of bytes in TLV ndp_app_info */
A_UINT32 ndp_app_info_len;
+ /** NDP channel configuration type defined in wmi_ndp_channel_cfg */
+ A_UINT32 ndp_channel_cfg;
+ /** NAN Cipher Suite Shared Key */
+ A_UINT32 nan_csid;
+ /** Actual number of bytes in TLV nan_pmk */
+ A_UINT32 nan_pmk_len;
/**
* TLV (tag length value ) parameters follow the ndp_initiator_req
* structure. The TLV's are:
* wmi_channel channel;
* A_UINT8 ndp_cfg[];
* A_UINT8 ndp_app_info[];
+ * A_UINT8 nan_pmk[];
*/
} wmi_ndp_initiator_req_fixed_param_PROTOTYPE;
@@ -10929,11 +12353,16 @@
A_UINT32 ndp_cfg_len;
/** Number of bytes in TLV ndp_app_info */
A_UINT32 ndp_app_info_len;
+ /** NAN Cipher Suite Shared Key */
+ A_UINT32 nan_csid;
+ /** Actual number of bytes in TLV nan_pmk */
+ A_UINT32 nan_pmk_len;
/**
* TLV (tag length value ) parameters follow the ndp_responder_req
* structure. The TLV's are:
* A_UINT8 ndp_cfg[];
* A_UINT8 ndp_app_info[];
+ * A_UINT8 nan_pmk[];
*/
} wmi_ndp_responder_req_fixed_param_PROTOTYPE;
@@ -11006,6 +12435,8 @@
A_UINT32 max_ndp_sessions;
/** Max number of peer's per ndi */
A_UINT32 max_peers_per_ndi;
+ /** which combination of bands is supported - see NAN_DATA_SUPPORTED_BAND enums */
+ A_UINT32 nan_data_supported_bands;
} wmi_ndi_cap_rsp_event_fixed_param_PROTOTYPE;
#define wmi_ndi_cap_rsp_event_fixed_param wmi_ndi_cap_rsp_event_fixed_param_PROTOTYPE
@@ -11062,6 +12493,8 @@
A_UINT32 ndp_instance_id;
/** NDI mac address of the peer */
wmi_mac_addr peer_ndi_mac_addr;
+ /** Host can create peer if this entry is TRUE */
+ A_UINT32 create_peer;
} wmi_ndp_responder_rsp_event_fixed_param_PROTOTYPE;
#define wmi_ndp_responder_rsp_event_fixed_param wmi_ndp_responder_rsp_event_fixed_param_PROTOTYPE
@@ -11167,11 +12600,16 @@
A_UINT32 ndp_cfg_len;
/** Number of bytes in TLV wmi_ndp_app_info */
A_UINT32 ndp_app_info_len;
+ /** Peer NAN Cipher Suite Shared Key */
+ A_UINT32 nan_csid;
+ /** Actual number of bytes in TLV nan_scid */
+ A_UINT32 nan_scid_len;
/**
* TLV (tag length value ) parameters follow the ndp_indication
* structure. The TLV's are:
* A_UINT8 ndp_cfg[];
* A_UINT8 ndp_app_info[];
+ * A_UINT8 nan_scid[];
*/
} wmi_ndp_indication_event_fixed_param_PROTOTYPE;
@@ -11293,6 +12731,115 @@
WMI_MODEM_STATE_ON
};
+/**
+ * This command is sent from WLAN host driver to firmware to
+ * notify the updated Specific Absorption Rate (SAR) limits.
+ * A critical regulation for FCC compliance, OEMs require methods to set
+ * limits on TX power of WLAN/WWAN.
+ * Host would receive instructions on what to set the limits per
+ * band/chain/modulation to, it would then interpret and send the limits
+ * to FW using this WMI message.
+ * Since it is possible to have too many commands to fit into one message,
+ * FW will keep receiving the messages, until it finds one with
+ * commit_limits = 1, at which point it will apply all the received
+ * specifications.
+*/
+
+typedef struct {
+ /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_sar_limits_cmd_param */
+ A_UINT32 tlv_header;
+
+ /** when set to WMI_SAR_FEATURE_ON, enable SAR feature;
+ * with BDF (SET_0 to 4) or WMI
+ * if set to WMI_SAR_FEATURE_OFF, disable feature;
+ * if set to WMI_SAR_FEATURE_NO_CHANGE, do not alter state of feature;
+ */
+
+ A_UINT32 sar_enable;
+
+ /** number of items in sar_limits[] */
+ A_UINT32 num_limit_rows;
+ /** once received and is set to 1, FW will calculate the power limits
+ * and send set_power command to apply them.
+ * Otherwise just update local values stored in FW until a future msg
+ * with commit_limits=1 arrives.
+ */
+
+ A_UINT32 commit_limits;
+
+ /**
+ * TLV (tag length value) parameters follow the sar_limit_cmd_row
+ * structure. The TLV's are:
+ * wmi_sar_limit_cmd_row sar_limits[];
+ */
+} wmi_sar_limits_cmd_fixed_param;
+
+enum wmi_sar_feature_state_flags {
+ WMI_SAR_FEATURE_OFF = 0,
+ WMI_SAR_FEATURE_ON_SET_0,
+ WMI_SAR_FEATURE_ON_SET_1,
+ WMI_SAR_FEATURE_ON_SET_2,
+ WMI_SAR_FEATURE_ON_SET_3,
+ WMI_SAR_FEATURE_ON_SET_4,
+ WMI_SAR_FEATURE_NO_CHANGE,
+ WMI_SAR_FEATURE_ON_USER_DEFINED,
+};
+
+typedef struct {
+ A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_sar_limit_cmd_row */
+
+ /** Current values: WMI_SAR_2G_ID, WMI_SAR_5G_ID. Can be extended by adding
+ * new band_id values .
+ */
+ A_UINT32 band_id;
+
+ A_UINT32 chain_id;
+
+ /** Current values: WMI_SAR_MOD_CCK, WMI_SAR_MOD_OFDM */
+ A_UINT32 mod_id;
+
+ /** actual power limit value, in steps of 0.5 dBm */
+ A_UINT32 limit_value;
+
+ /** in case the OEM doesn't care about one of the qualifiers from above,
+ * the bit for that qualifier within the validity_bitmap can be set to 0
+ * so that limit is applied to all possible cases of this qualifier
+ * (i.e. if a qualifier's validity_bitmap flag is 0, the qualifier is
+ * treated as a wildcard).
+ * Current masks:
+ * WMI_SAR_BAND_ID_VALID_MASK
+ * WMI_SAR_CHAIN_ID_VALID_MASK
+ * WMI_SAR_MOD_ID_VALID_MASK
+ * Example: if !WMI_IS_SAR_MOD_ID_VALID(bitmap),
+ * it means apply same limit_value to both WMI_SAR_MOD_CCK and
+ * WMI_SAR_MOD_OFDM cases.
+ */
+
+ A_UINT32 validity_bitmap;
+} wmi_sar_limit_cmd_row;
+
+enum wmi_sar_band_id_flags {
+ WMI_SAR_2G_ID = 0,
+ WMI_SAR_5G_ID
+};
+
+enum wmi_sar_mod_id_flags {
+ WMI_SAR_MOD_CCK = 0,
+ WMI_SAR_MOD_OFDM
+};
+
+#define WMI_SAR_BAND_ID_VALID_MASK (0x1)
+#define WMI_SAR_CHAIN_ID_VALID_MASK (0x2)
+#define WMI_SAR_MOD_ID_VALID_MASK (0x4)
+
+#define WMI_SET_SAR_BAND_ID_VALID(bitmap) ((bitmap) |= WMI_SAR_BAND_ID_VALID_MASK)
+#define WMI_SET_SAR_CHAIN_ID_VALID(bitmap) ((bitmap) |= WMI_SAR_CHAIN_ID_VALID_MASK)
+#define WMI_SET_SAR_MOD_ID_VALID(bitmap) ((bitmap) |= WMI_SAR_MOD_ID_VALID_MASK)
+
+#define WMI_IS_SAR_BAND_ID_VALID(bitmap) ((bitmap) & WMI_SAR_BAND_ID_VALID_MASK)
+#define WMI_IS_SAR_CHAIN_ID_VALID(bitmap) ((bitmap) & WMI_SAR_CHAIN_ID_VALID_MASK)
+#define WMI_IS_SAR_MOD_ID_VALID(bitmap) ((bitmap) & WMI_SAR_MOD_ID_VALID_MASK)
+
#define WMI_ROAM_AUTH_STATUS_CONNECTED 0x1 /** connected, but not authenticated */
#define WMI_ROAM_AUTH_STATUS_AUTHENTICATED 0x2 /** connected and authenticated */
@@ -12378,6 +13925,50 @@
A_UINT32 pdev_id;
} wmi_pdev_temperature_event_fixed_param;
+typedef enum {
+ ANTDIV_HW_CFG_STATUS,
+ ANTDIV_SW_CFG_STATUS,
+ ANTDIV_MAX_STATUS_TYPE_NUM
+} ANTDIV_STATUS_TYPE;
+
+typedef struct {
+ /**
+ * TLV tag and len; tag equals
+ * WMITLV_TAG_STRUC_wmi_pdev_get_antdiv_status_cmd_fixed_param
+ */
+ A_UINT32 tlv_header;
+ /* Status event ID - see ANTDIV_STATUS_TYPE */
+ A_UINT32 status_event_id;
+ /**
+ * pdev_id for identifying the MAC
+ * See macros starting with WMI_PDEV_ID_ for values.
+ */
+ A_UINT32 pdev_id;
+} wmi_pdev_get_antdiv_status_cmd_fixed_param;
+
+typedef struct {
+ /**
+ * TLV tag and len; tag equals
+ * WMITLV_TAG_STRUC_wmi_pdev_antdiv_status_event_fixed_param
+ */
+ A_UINT32 tlv_header;
+ /* ANT DIV feature enabled or not */
+ A_UINT32 support;
+ A_UINT32 chain_num; /* how many chain supported */
+ /* how many ANT supported, 32 max */
+ A_UINT32 ant_num;
+ /**
+ * Each entry is for a tx/rx chain, and contains a bitmap identifying
+ * the antennas attached to that tx/rx chain.
+ */
+ A_UINT32 selectable_ant_mask[8];
+ /**
+ * pdev_id for identifying the MAC
+ * See macros starting with WMI_PDEV_ID_ for values.
+ */
+ A_UINT32 pdev_id;
+} wmi_pdev_antdiv_status_event_fixed_param;
+
typedef struct {
A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_set_dhcp_server_offload_cmd_fixed_param */
A_UINT32 vdev_id;
@@ -12832,6 +14423,11 @@
A_UINT32 channel_count;
A_UINT32 schedule_size;
A_UINT32 flags;
+ /**
+ * Max duration of continuing multichannel operation without
+ * receiving a TA frame (units = seconds)
+ */
+ A_UINT32 ta_max_duration;
/** This is followed by a TLV array of wmi_channel. */
/** This is followed by a TLV array of wmi_ocb_channel. */
@@ -13457,6 +15053,22 @@
A_UINT32 qtimer_high;
} wmi_vdev_tsf_report_event_fixed_param;
+/**
+ * ie_id values:
+ * 0 to 255 are used for individual IEEE802.11 Information Element types
+ */
+#define WMI_SET_VDEV_IE_ID_SCAN_SET_DEFAULT_IE 256
+
+/* source values: */
+#define WMI_SET_VDEV_IE_SOURCE_HOST 0x0
+
+/* band values: */
+typedef enum {
+ WMI_SET_VDEV_IE_BAND_ALL = 0,
+ WMI_SET_VDEV_IE_BAND_2_4GHZ,
+ WMI_SET_VDEV_IE_BAND_5GHZ,
+} wmi_set_vdev_ie_band;
+
typedef struct {
/** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_vdev_set_ie_cmd_fixed_param */
A_UINT32 tlv_header;
@@ -13465,11 +15077,86 @@
/** unique id to identify the ie_data as defined by ieee 802.11 spec */
A_UINT32 ie_id; /** ie_len corresponds to num of bytes in ie_data[] */
A_UINT32 ie_len;
+ /** source of this command */
+ A_UINT32 ie_source; /* see WMI_SET_VDEV_IE_SOURCE_ defs */
+ /** band for this IE - se wmi_set_vdev_ie_band enum */
+ A_UINT32 band;
/**
* Following this structure is the TLV byte stream of ie data of length ie_buf_len:
* A_UINT8 ie_data[]; */
} wmi_vdev_set_ie_cmd_fixed_param;
+/* DISA feature related data structures */
+#define MAX_MAC_HEADER_LEN 32
+typedef enum {
+ WMI_ENCRYPT_DECRYPT_FLAG_INVALID,
+ WMI_ENCRYPT = 1,
+ WMI_DECRYPT = 2,
+} WMI_ENCRYPT_DECRYPT_FLAG;
+
+typedef struct {
+ /**
+ * TLV tag and len; tag equals
+ * WMITLV_TAG_STRUC_wmi_vdev_encrypt_decrypt_data_req_cmd_fixed_param
+ */
+ A_UINT32 tlv_header;
+ /** unique id identifying the VDEV, generated by the caller */
+ A_UINT32 vdev_id;
+ A_UINT32 key_flag; /* WMI_ENCRYPT_DECRYPT_FLAG */
+ A_UINT32 key_idx;
+ A_UINT32 key_cipher;
+ A_UINT32 key_len; /* units = bytes */
+ A_UINT32 key_txmic_len; /* units = bytes */
+ A_UINT32 key_rxmic_len; /* units = bytes */
+ /** Key: This array needs to be provided in little-endian order */
+ A_UINT8 key_data[WMI_MAX_KEY_LEN];
+ /**
+ * Packet number: This array needs to be provided in little-endian
+ * order.
+ * If the PN is less than 8 bytes, the PN data shall be placed into this
+ * pn[] array starting at byte 0, leaving the MSBs empty.
+ */
+ A_UINT8 pn[8];
+ /**
+ * 802.11 MAC header to be typecast to struct ieee80211_qosframe_addr4
+ * This array needs to be provided in little-endian order.
+ */
+ A_UINT8 mac_hdr[MAX_MAC_HEADER_LEN];
+ A_UINT32 data_len; /** Payload length, units = bytes */
+ /**
+ * Following this struct are this TLV:
+ * A_UINT8 data[]; <-- actual data to be encrypted,
+ * needs to be provided in little-endian order
+ */
+} wmi_vdev_encrypt_decrypt_data_req_cmd_fixed_param;
+
+/**
+ * This event is generated in response to
+ * WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID from HOST.
+ * On receiving WMI command WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID from
+ * HOST with DISA test vectors, DISA frame will prepared and submitted to HW,
+ * then on receiving the tx completion for the DISA frame this WMI event
+ * will be delivered to HOST with the encrypted frame.
+ */
+typedef struct {
+ /**
+ * TLV tag and len; tag equals
+ * WMITLV_TAG_STRUC_wmi_vdev_encrypt_decrypt_data_resp_event_fixed_param
+ */
+ A_UINT32 tlv_header;
+ /* VDEV identifier */
+ A_UINT32 vdev_id;
+ A_INT32 status; /* 0: success, -1: Failure, */
+ /* 802.11 header length + encrypted payload length (units = bytes) */
+ A_UINT32 data_length;
+ /**
+ * Following this struct is this TLV:
+ * A_UINT8 enc80211_frame[]; <-- Encrypted 802.11 frame;
+ * 802.11 header + encrypted payload,
+ * provided in little-endian order
+ */
+} wmi_vdev_encrypt_decrypt_data_resp_event_fixed_param;
+
/* DEPRECATED - use wmi_pdev_set_pcl_cmd_fixed_param instead */
typedef struct {
A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_soc_set_pcl_cmd_fixed_param */
@@ -13484,6 +15171,15 @@
**/
} wmi_soc_set_pcl_cmd_fixed_param;
+/* Values for channel_weight */
+typedef enum {
+ WMI_PCL_WEIGHT_DISALLOW = 0,
+ WMI_PCL_WEIGHT_LOW = 1,
+ WMI_PCL_WEIGHT_MEDIUM = 2,
+ WMI_PCL_WEIGHT_HIGH = 3,
+ WMI_PCL_WEIGHT_VERY_HIGH = 4,
+} wmi_pcl_chan_weight;
+
typedef struct {
A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_pdev_set_pcl_cmd_fixed_param */
/** Set Preferred Channel List **/
@@ -13512,6 +15208,19 @@
} wmi_soc_set_hw_mode_cmd_fixed_param;
typedef struct {
+ /* TLV tag and len tag equals WMITLV_TAG_STRUC_wmi_pdev_band_to_mac */
+ A_UINT32 tlv_header;
+ /** pdev_id for identifying the MACC
+ * See macros starting with WMI_PDEV_ID_ for values..
+ */
+ A_UINT32 pdev_id;
+ /* start frequency in MHz */
+ A_UINT32 start_freq;
+ /* end frequency in MHz */
+ A_UINT32 end_freq;
+} wmi_pdev_band_to_mac;
+
+typedef struct {
A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_pdev_set_hw_mode_cmd_fixed_param */
/** Set Hardware Mode **/
@@ -13522,6 +15231,13 @@
/* Hardware Mode Index */
A_UINT32 hw_mode_index;
+ /* Number of band to mac TLVs */
+
+ A_UINT32 num_band_to_mac;
+
+ /* Followed by TLVs of typee
+ * num_band_to_mac * wmi_pdev_band_to_mac.
+ */
} wmi_pdev_set_hw_mode_cmd_fixed_param;
/* DEPRECATED - use wmi_pdev_set_dual_mac_config_cmd_fixed_param instead */
@@ -13731,6 +15447,55 @@
*/
} wmi_pdev_hw_mode_transition_event_fixed_param;
+/**
+ * This command is sent from WLAN host driver to firmware for
+ * plugging in reorder queue desc to lithium hw.
+ *
+ * Example: plug-in queue desc for TID 5
+ * host->target: WMI_PEER_REORDER_QUEUE_SETUP_CMDID,
+ * (vdev_id = PEER vdev id,
+ * peer_macaddr = PEER mac addr,
+ * tid = 5,
+ * queue_ptr_lo = queue desc addr lower 32 bits,
+ * queue_ptr_hi = queue desc addr higher 32 bits,
+ * queue_no = 16-bit number assigned by host for queue,
+ * stored in bits 15:0 of queue_no field)
+ */
+typedef struct {
+ /* TLV tag and len; tag equals
+ * WMITLV_TAG_STRUC_wmi_peer_reorder_queue_setup_cmd_fixed_param
+ */
+ A_UINT32 tlv_header;
+ A_UINT32 vdev_id;
+ wmi_mac_addr peer_macaddr; /* peer mac address */
+ A_UINT32 tid; /* 0 to 15 = QoS TIDs, 16 = non-qos TID */
+ A_UINT32 queue_ptr_lo; /* lower 32 bits of queue desc adddress */
+ A_UINT32 queue_ptr_hi; /* upper 32 bits of queue desc adddress */
+ A_UINT32 queue_no; /* 16-bit number assigned by host for queue,
+ stored in bits 15:0 of queue_no field */
+} wmi_peer_reorder_queue_setup_cmd_fixed_param;
+
+/**
+ * This command is sent from WLAN host driver to firmware for
+ * removing one or more reorder queue desc to lithium hw.
+ *
+ * Example: remove queue desc for all TIDs
+ * host->target: WMI_PEER_REORDER_REMOVE_CMDID,
+ * (vdev_id = PEER vdev id,
+ * peer_macaddr = PEER mac addr,
+ * tid = 0x1FFFF,
+ */
+typedef struct {
+ /* TLV tag and len;
+ * tag equals WMITLV_TAG_STRUC_wmi_peer_reorder_queue_remove_cmd_fixed_param
+ */
+ A_UINT32 tlv_header;
+ A_UINT32 vdev_id;
+ wmi_mac_addr peer_macaddr; /* peer mac address */
+ A_UINT32 tid_mask; /* bits 0 to 15 = QoS TIDs, bit 16 = non-qos TID */
+} wmi_peer_reorder_queue_remove_cmd_fixed_param;
+
+
/* DEPRECATED - use wmi_pdev_set_mac_config_response_event_fixed_param instead */
typedef struct {
A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_soc_set_dual_mac_config_response_event_fixed_param */
@@ -13798,7 +15563,7 @@
* 1 - Allow to connect to MBO AP only
* Bit 1-31 : reserved.
*/
-#define WMI_ROAM_MBO_FLAG_MBO_ONLY_MODE (1<<0)
+#define WMI_ROAM_MBO_FLAG_MBO_ONLY_MODE (1<<0) /* DEPRECATED */
typedef struct {
/* TLV tag and len; tag equals
@@ -13810,7 +15575,7 @@
A_UINT32 enable;
/** MBO flags, refer to definition of MBO flags*/
A_UINT32 flags;
-} wmi_roam_set_mbo_fixed_param;
+} wmi_roam_set_mbo_fixed_param; /* DEPRECATED */
typedef struct {
/* TLV tag and len; tag equals
@@ -14153,10 +15918,6 @@
A_UINT32 toeplitz_hash_ipv6_40;
} wmi_lro_info_cmd_fixed_param;
-/*
- * This structure is used to set the pattern for WOW host wakeup pin pulse
- * pattern confirguration.
- */
typedef struct {
A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_transfer_data_to_flash_cmd_fixed_param */
A_UINT32 offset; /* flash offset to write, starting from 0 */
@@ -14169,6 +15930,25 @@
A_UINT32 status;
} wmi_transfer_data_to_flash_complete_event_fixed_param;
+typedef struct {
+ /* TLV tag and len; tag equals
+ * WMITLV_TAG_STRUC_wmi_read_data_from_flash_cmd_fixed_param
+ */
+ A_UINT32 tlv_header;
+ A_UINT32 offset; /* flash offset to read, starting from 0 */
+ A_UINT32 length; /* data length to read, unit: byte */
+} wmi_read_data_from_flash_cmd_fixed_param;
+
+typedef struct {
+ /* TLV tag and len; tag equals
+ * WMITLV_TAG_STRUC_wmi_read_data_from_flash_event_fixed_param
+ */
+ A_UINT32 tlv_header;
+ A_UINT32 status; /* Return status. 0 for success, non-zero otherwise */
+ A_UINT32 offset; /* flash offset reading from, starting from 0 */
+ A_UINT32 length; /* length of data being reported, unit: byte */
+} wmi_read_data_from_flash_event_fixed_param;
+
typedef enum {
ENHANCED_MCAST_FILTER_DISABLED,
ENHANCED_MCAST_FILTER_ENABLED
@@ -14481,6 +16261,9 @@
#define WMI_ATF_DENOMINATION 1000 /* Expressed in 1 part in 1000 (permille) */
+#define WMI_ATF_SSID_FAIR_SCHED 0 /** Fair ATF scheduling for vdev */
+#define WMI_ATF_SSID_STRICT_SCHED 1 /** Strict ATF scheduling for vdev */
+
typedef struct {
/** TLV tag and len; tag equals
* WMITLV_TAG_STRUC_wmi_atf_peer_info */
@@ -14708,6 +16491,16 @@
} wmi_pdev_get_tpc_cmd_fixed_param;
typedef struct {
+ A_UINT32 tlv_header; /* TLV tag and len; tag equals
+ WMITLV_TAG_STRUC_wmi_pdev_get_chip_power_stats_cmd_fixed_param */
+ /**
+ * pdev_id for identifying the MAC See macros
+ * starting with WMI_PDEV_ID_ for values.
+ */
+ A_UINT32 pdev_id;
+} wmi_pdev_get_chip_power_stats_cmd_fixed_param;
+
+typedef struct {
A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_pdev_tpc_event_fixed_param */
A_UINT32 reserved0; /* for future need */
/*
@@ -14750,17 +16543,73 @@
A_UINT32 cck_level;
} wmi_ani_cck_event_fixed_param;
+typedef enum wmi_power_debug_reg_fmt_type {
+ /* WMI_POWER_DEBUG_REG_FMT_TYPE_ROME -> Dumps following 12 Registers
+ * SOC_SYSTEM_SLEEP
+ * WLAN_SYSTEM_SLEEP
+ * RTC_SYNC_FORCE_WAKE
+ * MAC_DMA_ISR
+ * MAC_DMA_TXRX_ISR
+ * MAC_DMA_ISR_S1
+ * MAC_DMA_ISR_S2
+ * MAC_DMA_ISR_S3
+ * MAC_DMA_ISR_S4
+ * MAC_DMA_ISR_S5
+ * MAC_DMA_ISR_S6
+ * MAC_DMA_ISR_S7
+ */
+ WMI_POWER_DEBUG_REG_FMT_TYPE_ROME,
+ WMI_POWER_DEBUG_REG_FMT_TYPE_MAX = 0xf,
+} WMI_POWER_DEBUG_REG_FMT_TYPE;
+
+typedef struct {
+ A_UINT32 tlv_header; /* TLV tag and len; tag equals
+ WMITLV_TAG_STRUC_wmi_chip_power_stats_event_fixed_param */
+ A_UINT32 cumulative_sleep_time_ms; /* maximum range is 35 hours, due to conversion from internal 0.03215 ms units to ms */
+ A_UINT32 cumulative_total_on_time_ms; /* maximum range is 35 hours, due to conversion from internal 0.03215 ms units to ms */
+ A_UINT32 deep_sleep_enter_counter; /* count of number of times chip enterred deep sleep */
+ A_UINT32 last_deep_sleep_enter_tstamp_ms; /* Last Timestamp when Chip went to deep sleep */
+ A_UINT32 debug_register_fmt; /* WMI_POWER_DEBUG_REG_FMT_TYPE enum, describes debug registers being dumped as part of the event */
+ A_UINT32 num_debug_register; /* number of debug registers being sent to host */
+ /*
+ * Following this structure is the TLV:
+ * A_UINT32 debug_registers[num_debug_registers];
+ */
+} wmi_pdev_chip_power_stats_event_fixed_param;
+
typedef struct {
A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_ani_ofdm_event_fixed_param */
A_UINT32 ofdm_level;
} wmi_ani_ofdm_event_fixed_param;
typedef enum wmi_coex_config_type {
- WMI_COEX_CONFIG_PAGE_P2P_TDM = 1, /* config interval (arg1 BT, arg2 WLAN) for P2P + PAGE */
- WMI_COEX_CONFIG_PAGE_STA_TDM = 2, /* config interval (arg1 BT, arg2 WLAN) for STA + PAGE */
- WMI_COEX_CONFIG_PAGE_SAP_TDM = 3, /* config interval (arg1 BT, arg2 WLAN) for SAP + PAGE */
- WMI_COEX_CONFIG_DURING_WLAN_CONN = 4, /* config during WLAN connection */
- WMI_COEX_CONFIG_BTC_ENABLE = 5, /* config to enable/disable BTC */
+ /* config interval (arg1 BT, arg2 WLAN) for P2P + PAGE */
+ WMI_COEX_CONFIG_PAGE_P2P_TDM = 1,
+ /* config interval (arg1 BT, arg2 WLAN) for STA + PAGE */
+ WMI_COEX_CONFIG_PAGE_STA_TDM = 2,
+ /* config interval (arg1 BT, arg2 WLAN) for SAP + PAGE */
+ WMI_COEX_CONFIG_PAGE_SAP_TDM = 3,
+ /* config during WLAN connection */
+ WMI_COEX_CONFIG_DURING_WLAN_CONN = 4,
+ /* config to enable/disable BTC */
+ WMI_COEX_CONFIG_BTC_ENABLE = 5,
+ /* config of COEX debug setting */
+ WMI_COEX_CONFIG_COEX_DBG = 6,
+ /* config interval (ms units) (arg1 BT, arg2 WLAN) for P2P + STA + PAGE */
+ WMI_COEX_CONFIG_PAGE_P2P_STA_TDM = 7,
+ /* config interval (ms units) (arg1 BT, arg2 WLAN) for P2P + INQUIRY */
+ WMI_COEX_CONFIG_INQUIRY_P2P_TDM = 8,
+ /* config interval (ms units) (arg1 BT, arg2 WLAN) for STA + INQUIRY */
+ WMI_COEX_CONFIG_INQUIRY_STA_TDM = 9,
+ /* config interval (ms units) (arg1 BT, arg2 WLAN) for SAP + INQUIRY */
+ WMI_COEX_CONFIG_INQUIRY_SAP_TDM = 10,
+ /*
+ * config interval (ms units) (arg1 BT, arg2 WLAN) for P2P + STA +
+ * INQUIRY
+ */
+ WMI_COEX_CONFIG_INQUIRY_P2P_STA_TDM = 11,
+ /* config wlan total tx power when bt coex (arg1 is wlan_tx_power_limit, in 0.5dbm units) */
+ WMI_COEX_CONFIG_TX_POWER = 12,
} WMI_COEX_CONFIG_TYPE;
typedef struct {
@@ -14816,11 +16665,147 @@
**/
} wmi_pdev_wal_power_debug_cmd_fixed_param;
+typedef struct {
+ /**
+ * TLV tag and len; tag equals
+ * WMITLV_TAG_STRUC_wmi_pdev_set_reorder_timeout_val_cmd_fixed_param
+ */
+ A_UINT32 tlv_header;
+ /**
+ * @brief rx_timeout_pri - what rx reorder timeout (ms) to use for the AC
+ * @details
+ * Each WMM access category (voice, video, best-effort, background)
+ * will have its own timeout value to dictate how long to wait for
+ * missing rx MPDUs to arrive before releasing subsequent MPDUs that
+ * have already been received.
+ * This parameter specifies the timeout in milliseconds for each
+ * access category.
+ * The array elements are indexed by the WMI_AC enum to identify
+ * which array element corresponds to which AC / traffic type.
+ */
+ A_UINT32 rx_timeout_pri[WMI_AC_MAX];
+} wmi_pdev_set_reorder_timeout_val_cmd_fixed_param;
+
+/**
+ * wlan stats shall be understood as per period.
+ * Generally, it is reported periodically based on the period specified by host.
+ * But if the variation of one stats of compared to the
+ * pervious notification exceeds a threshold,
+ * FW will report the wlan stats immediately.
+ * The values of the stats becomes the new reference to compute variations.
+ * This threshold can be a global setting or per category.
+ * Host can enable/disable the mechanism for any stats per bitmap.
+ * TX/RX thresholds (percentage value) are shared across ACs,
+ * and TX/RX stats comprisons are processed per AC of each peer.
+ * For example, if bit 0 (stand for tx_mpdus) of tx_thresh_bitmap is set to 1,
+ * and the detailed tx_mpdus threshold value is set to 10%,
+ * suppose tx_mpdus value of BE of peer 0 is 100 in first period,
+ * and it reaches 110 during the second period,
+ * FW will generate and send out a wlan stats event immediately.
+ */
+typedef struct {
+ A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_pdev_set_stats_threshold_cmd_fixed_param */
+ /** Indicate if threshold mechnism is enabled or disabled.
+ * It is disabled by default.
+ * Host can enable and disable it dynamically.
+ */
+ A_UINT32 enable_thresh;
+ /** use_thresh_bitmap equals 0 means gbl_thresh is used.
+ * when use_thresh_bitmap equals 1, ignore gbl_thresh and use stats bitmap indicated thresholds.
+ */
+ A_UINT32 use_thresh_bitmap;
+ /** global threshold, valid when use_thresh_bitmap equals 0.
+ * It takes effect for all counters.
+ * If use_thresh_bitmap ==0 && gbl_thresh == 0, disable threshold mechanism.
+ */
+ A_UINT32 gbl_thresh;
+ /** Enable/disable bitmap for threshold mechanism of CCA stats */
+ A_UINT32 cca_thresh_enable_bitmap;
+ /** Enable/disable bitmap for threshold mechanism of signal stats */
+ A_UINT32 signal_thresh_enable_bitmap;
+ /** Enable/disable bitmap for threshold mechanism of TX stats */
+ A_UINT32 tx_thresh_enable_bitmap;
+ /** Enable/disable bitmap for threshold mechanism of RX stats */
+ A_UINT32 rx_thresh_enable_bitmap;
+ /* This TLV is followed by TLVs below:
+ * wmi_chan_cca_stats_thresh cca_thresh;
+ * wmi_peer_signal_stats_thresh signal_thresh;
+ * wmi_tx_stats_thresh tx_thresh;
+ * wmi_rx_stats_thresh rx_thresh;
+ */
+} wmi_pdev_set_stats_threshold_cmd_fixed_param;
+
+typedef enum {
+ WMI_REQUEST_WLAN_TX_STAT = 0x01,
+ WMI_REQUEST_WLAN_RX_STAT = 0x02,
+ WMI_REQUEST_WLAN_CCA_STAT = 0x04,
+ WMI_REQUEST_WLAN_SIGNAL_STAT = 0x08,
+} wmi_wlan_stats_id;
+
+typedef struct {
+ A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_request_wlan_stats_cmd_fixed_param */
+ wmi_wlan_stats_id stats_id;
+} wmi_request_wlan_stats_cmd_fixed_param;
+
+typedef enum {
+ WMI_REQUEST_ONE_PEER_STATS_INFO = 0x01, /* request stats of one specified peer */
+ WMI_REQUEST_VDEV_ALL_PEER_STATS_INFO = 0x02, /* request stats of all peers belong to specified VDEV */
+} wmi_peer_stats_info_request_type;
+
+/** It is required to issue WMI_PDEV_PARAM_PEER_STATS_INFO_ENABLE
+* (with a non-zero value) before issuing the first REQUEST_PEER_STATS_INFO.
+*/
+typedef struct {
+ /** TLV tag and len; tag equals
+ * WMITLV_TAG_STRUC_wmi_request_peer_stats_info_cmd_fixed_param */
+ A_UINT32 tlv_header;
+ /** request_type to indicate if only stats of
+ * one peer or all peers of the VDEV are requested,
+ * see wmi_peer_stats_info_request_type.
+ */
+ A_UINT32 request_type;
+ /** VDEV identifier */
+ A_UINT32 vdev_id;
+ /** this peer_macaddr is only used if request_type == ONE_PEER_STATS_INFO */
+ wmi_mac_addr peer_macaddr;
+ /** flag to indicate if FW needs to reset requested peers stats */
+ A_UINT32 reset_after_request;
+} wmi_request_peer_stats_info_cmd_fixed_param;
+
typedef enum {
WLAN_2G_CAPABILITY = 0x1,
WLAN_5G_CAPABILITY = 0x2,
} WLAN_BAND_CAPABILITY;
+typedef enum wmi_hw_mode_config_type {
+ /* Only one PHY is active. */
+ WMI_HW_MODE_SINGLE = 0,
+ /**
+ * Both PHYs are active in different bands, one in 2G
+ * and another in 5G.
+ */
+ WMI_HW_MODE_DBS = 1,
+ /**
+ * Both PHYs are in passive mode (only rx) in same band;
+ * no tx allowed.
+ */
+ WMI_HW_MODE_SBS_PASSIVE = 2,
+ /**
+ * Both PHYs are active in the same band.
+ * Support for both PHYs within one band is planned for 5G only
+ * (as indicated in WMI_MAC_PHY_CAPABILITIES),
+ * but could be extended to other bands in the future.
+ * The separation of the band between the two PHYs needs to be
+ * communicated separately.
+ */
+ WMI_HW_MODE_SBS = 3,
+ /**
+ * 3 PHYs, with 2 on the same band doing SBS
+ * as in WMI_HW_MODE_SBS, and 3rd on the other band
+ */
+ WMI_HW_MODE_DBS_SBS = 4,
+} WMI_HW_MODE_CONFIG_TYPE;
+
#define WMI_SUPPORT_11B_GET(flags) WMI_GET_BITS(flags, 0, 1)
#define WMI_SUPPORT_11B_SET(flags, value) WMI_SET_BITS(flags, 0, 1, value)
@@ -14839,6 +16824,9 @@
#define WMI_SUPPORT_11AX_GET(flags) WMI_GET_BITS(flags, 5, 1)
#define WMI_SUPPORT_11AX_SET(flags, value) WMI_SET_BITS(flags, 5, 1, value)
+#define WMI_MAX_MUBFEE_GET(flags) WMI_GET_BITS(flags, 28, 4)
+#define WMI_MAX_MUBFEE_SET(flags, value) WMI_SET_BITS(flags, 28, 4, value)
+
typedef struct {
A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_WMI_MAC_PHY_CAPABILITIES */
/* hw_mode_id - identify a particular set of HW characteristics, as specified
@@ -14851,7 +16839,7 @@
A_UINT32 pdev_id;
/* phy id. Starts with 0 */
A_UINT32 phy_id;
- /* supported modulations */
+ /* supported modulations and number of MU beamformees */
union {
struct {
A_UINT32 supports_11b:1,
@@ -14859,7 +16847,12 @@
supports_11a:1,
supports_11n:1,
supports_11ac:1,
- supports_11ax:1;
+ supports_11ax:1,
+
+ unused:22,
+
+ /* max MU beamformees supported per MAC */
+ max_mubfee:4;
};
A_UINT32 supported_flags;
};
@@ -14910,6 +16903,11 @@
A_UINT32 tx_chain_mask_5G;
/* Valid Receive chain mask */
A_UINT32 rx_chain_mask_5G;
+ /* HE capability phy field of 802.11ax, WMI_HE_CAP defines */
+ A_UINT32 he_cap_phy_info_2G[WMI_MAX_HECAP_PHY_SIZE];
+ A_UINT32 he_cap_phy_info_5G[WMI_MAX_HECAP_PHY_SIZE];
+ wmi_ppe_threshold he_ppet2G;
+ wmi_ppe_threshold he_ppet5G;
} WMI_MAC_PHY_CAPABILITIES;
typedef struct {
@@ -14917,12 +16915,21 @@
/* hw_mode_id - identify a particular set of HW characteristics,
* as specified by the subsequent fields */
A_UINT32 hw_mode_id;
- /* BIT0 represents phy_id 0, BIT1 represent phy_id 1 and so on */
+ /**
+ * BIT0 represents phy_id 0, BIT1 represent phy_id 1 and so on.
+ * Number of bits set in phy_id_map represents number of
+ * WMI_MAC_PHY_CAPABILITIES TLV's, one for each active PHY for current HW
+ * mode identified by hw_mode_id. For example, for DBS/SBS mode there will
+ * be 2 WMI_MAC_PHY_CAPABILITIES TLVs and for single MAC modes it will be
+ * 1 WMI_MAC_PHY_CAPABILITIES TLVs
+ */
A_UINT32 phy_id_map;
- /* number of bits set in phy_id_map represents number of WMI_MAC_PHY_CAPABILITIES TLV's
- * one for each active PHY for current HW mode identified by hw_mode_id. For example for
- * DBS/SBS mode there will be 2 WMI_MAC_PHY_CAPABILITIES TLVs and for single MAC modes it
- * will be 1 WMI_MAC_PHY_CAPABILITIES TLVs */
+ /**
+ * hw_mode_config_type
+ * Identify a particular type of HW mode such as SBS, DBS etc.
+ * Refer to WMI_HW_MODE_CONFIG_TYPE values.
+ */
+ A_UINT32 hw_mode_config_type;
} WMI_HW_MODE_CAPABILITIES;
typedef struct {
@@ -14975,13 +16982,90 @@
A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_scan_adaptive_dwell_config_fixed_param */
/* globally enable/disable adaptive dwell */
A_UINT32 enable;
-/**
- * followed by TLV (tag length value) parameters array
- * The TLV's are:
- * wmi_scan_adaptive_dwell_parameters_tlv param[]; (0 or 1 elements)
- */
+ /**
+ * pdev_id for identifying the MAC. See macros starting with
+ * WMI_PDEV_ID_ for values. In non-DBDC case host should set it to 0
+ */
+ A_UINT32 pdev_id;
+ /**
+ * followed by TLV (tag length value) parameters array
+ * The TLV's are:
+ * wmi_scan_adaptive_dwell_parameters_tlv param[]; (0 or 1 elements)
+ */
} wmi_scan_adaptive_dwell_config_fixed_param;
+typedef struct {
+ /** TLV tag and len; tag equals
+ * WMITLV_TAG_STRUC_wmi_coex_get_antenna_isolation_cmd_fixed_param */
+ A_UINT32 tlv_header;
+ /* Currently there are no parameters for this message. */
+} wmi_coex_get_antenna_isolation_cmd_fixed_param;
+
+typedef struct {
+ /** TLV tag and len; tag equals
+ * WMITLV_TAG_STRUC_wmi_coex_report_isolation_event_fixed_param */
+ A_UINT32 tlv_header;
+ /** Antenna isolation value in dB units, none zero value is valid while 0 means failed to do isolation measurement or corresponding chain is not active.
+ * Currently the HW descriptor only supports 4 chains at most.
+ * Further isolation_chainX elements can be added in the future
+ * for additional chains, if needed.
+ */
+ A_UINT32 isolation_chain0:8, /* [7:0], isolation value for chain 0 */
+ isolation_chain1:8, /* [15:8], isolation value for chain 1 */
+ isolation_chain2:8, /* [23:16], isolation value for chain 2 */
+ isolation_chain3:8; /* [31:24], isolation value for chain 3 */
+} wmi_coex_report_isolation_event_fixed_param;
+
+typedef enum {
+ WMI_RCPI_MEASUREMENT_TYPE_AVG_MGMT = 1,
+ WMI_RCPI_MEASUREMENT_TYPE_AVG_DATA = 2,
+ WMI_RCPI_MEASUREMENT_TYPE_LAST_MGMT = 3,
+ WMI_RCPI_MEASUREMENT_TYPE_LAST_DATA = 4,
+} wmi_rcpi_measurement_type;
+
+typedef struct {
+ /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_request_rcpi_cmd_fixed_param */
+ A_UINT32 tlv_header;
+ /* VDEV identifier */
+ A_UINT32 vdev_id;
+ /* peer MAC address */
+ wmi_mac_addr peer_macaddr;
+ /* measurement type - defined in enum wmi_rcpi_measurement_type */
+ A_UINT32 measurement_type;
+} wmi_request_rcpi_cmd_fixed_param;
+
+typedef struct {
+ /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_update_rcpi_event_fixed_param */
+ A_UINT32 tlv_header;
+ /* VDEV identifier */
+ A_UINT32 vdev_id;
+ /* peer MAC address */
+ wmi_mac_addr peer_macaddr;
+ /* measurement type - defined in enum wmi_rcpi_measurement_type */
+ A_UINT32 measurement_type;
+ /* Measured RCPI in dBm of the peer requested by host */
+ A_INT32 rcpi;
+ /** status
+ * 0 - Requested peer RCPI available
+ * 1 - Requested peer RCPI not available
+ */
+ A_UINT32 status;
+} wmi_update_rcpi_event_fixed_param;
+
+/* Definition of mask for various package id */
+#define WMI_PKGID_MASK_AUTO 0x00000080
+
+typedef struct {
+ /** TLV tag and len; tag equals*/
+ A_UINT32 tlv_header;
+ /**
+ * The value field is filled with WMI_PKGID_MASK values.
+ * Currently, the only flag used within values is
+ * WMI_PKGID_MASK_AUTO, where bit7=1 for automotive systems.
+ */
+ A_UINT32 value;
+}wmi_pkgid_event_fixed_param;
+
/* ADD NEW DEFS HERE */
/*****************************************************************************
diff --git a/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/wmi_version.h b/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/wmi_version.h
index f2e272c..7154910 100644
--- a/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/wmi_version.h
+++ b/drivers/staging/qcacld-2.0/CORE/SERVICES/COMMON/wmi_version.h
@@ -36,7 +36,7 @@
#define __WMI_VER_MINOR_ 0
/** WMI revision number has to be incremented when there is a
* change that may or may not break compatibility. */
-#define __WMI_REVISION_ 260
+#define __WMI_REVISION_ 326
/** The Version Namespace should not be normally changed. Only
* host and firmware of the same WMI namespace will work
diff --git a/drivers/staging/qcacld-2.0/CORE/SERVICES/WMA/wma.c b/drivers/staging/qcacld-2.0/CORE/SERVICES/WMA/wma.c
index 47992c8..272f152 100644
--- a/drivers/staging/qcacld-2.0/CORE/SERVICES/WMA/wma.c
+++ b/drivers/staging/qcacld-2.0/CORE/SERVICES/WMA/wma.c
@@ -6976,6 +6976,9 @@
len += vos_scnprintf(buf + len, *size - len,
"\n wow_ipv6_mcast_na_stats %d",
wma_handle->wow_ipv6_mcast_na_stats);
+ len += vos_scnprintf(buf + len, *size - len,
+ "\n wow_oem_response_wake_up_count %d",
+ wma_handle->wow_oem_response_wake_up_count);
*size -= len;
*buf_ptr += len;
@@ -8813,6 +8816,8 @@
u_int8_t SSID_num;
int i;
int len = sizeof(*cmd);
+ wmi_vendor_oui *voui = NULL;
+ struct vendor_oui *pvoui = NULL;
tpAniSirGlobal pMac = (tpAniSirGlobal )vos_get_context(VOS_MODULE_ID_PE,
wma_handle->vos_context);
@@ -8837,6 +8842,10 @@
if (scan_req->uIEFieldLen)
len += roundup(scan_req->uIEFieldLen, sizeof(u_int32_t));
+ len += WMI_TLV_HDR_SIZE; /* Length of TLV for array of wmi_vendor_oui */
+ if (scan_req->num_vendor_oui)
+ len += scan_req->num_vendor_oui * sizeof(wmi_vendor_oui);
+
/* Allocate the memory */
*buf = wmi_buf_alloc(wma_handle->wmi_handle, len);
if (!*buf) {
@@ -8927,6 +8936,15 @@
*/
cmd->burst_duration = 0;
+ /* mac randomization attributes */
+ if (scan_req->enable_scan_randomization) {
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ |
+ WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ;
+ WMI_CHAR_ARRAY_TO_MAC_ADDR(scan_req->mac_addr, &cmd->mac_addr);
+ WMI_CHAR_ARRAY_TO_MAC_ADDR(scan_req->mac_addr_mask,
+ &cmd->mac_mask);
+ }
+
if (!scan_req->p2pScanType) {
WMA_LOGD("Normal Scan request");
cmd->scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES;
@@ -8938,6 +8956,15 @@
cmd->scan_ctrl_flags |= WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ;
cmd->scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+ if (scan_req->ie_whitelist) {
+ cmd->scan_ctrl_flags |=
+ WMI_SCAN_ENABLE_IE_WHITELIST_IN_PROBE_REQ;
+ for (i = 0; i < PROBE_REQ_BITMAP_LEN; i++)
+ cmd->ie_bitmap[i] =
+ scan_req->probe_req_ie_bitmap[i];
+ }
+
+ cmd->num_vendor_oui = scan_req->num_vendor_oui;
/*
* Decide burst_duration and dwell_time_active based on
* what type of devices are active.
@@ -9137,6 +9164,29 @@
}
buf_ptr += WMI_TLV_HDR_SIZE + ie_len_with_pad;
+ /* mac randomization */
+ WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC,
+ scan_req->num_vendor_oui *
+ sizeof(wmi_vendor_oui));
+
+ buf_ptr += WMI_TLV_HDR_SIZE;
+
+ if (cmd->num_vendor_oui != 0) {
+ voui = (wmi_vendor_oui *)buf_ptr;
+ pvoui = (struct vendor_oui *)((u_int8_t *)scan_req +
+ (scan_req->oui_field_offset));
+ for (i = 0; i < cmd->num_vendor_oui; i++) {
+ WMITLV_SET_HDR(&voui[i].tlv_header,
+ WMITLV_TAG_STRUC_wmi_vendor_oui,
+ WMITLV_GET_STRUCT_TLVLEN(
+ wmi_vendor_oui));
+ voui[i].oui_type_subtype = pvoui[i].oui_type |
+ (pvoui[i].oui_subtype << 24);
+ }
+ buf_ptr += cmd->num_vendor_oui *
+ sizeof(wmi_vendor_oui);
+ }
+
*buf_len = len;
return VOS_STATUS_SUCCESS;
error:
@@ -19419,17 +19469,24 @@
u_int8_t *buf_ptr;
u_int8_t i;
int ret;
+ wmi_vendor_oui *voui = NULL;
+ struct vendor_oui *pvoui = NULL;
WMA_LOGD("PNO Start");
len = sizeof(*cmd) +
WMI_TLV_HDR_SIZE + /* TLV place holder for array of structures nlo_configured_parameters(nlo_list) */
- WMI_TLV_HDR_SIZE; /* TLV place holder for array of uint32 channel_list */
+ WMI_TLV_HDR_SIZE + /* TLV place holder for array of uint32 channel_list */
+ WMI_TLV_HDR_SIZE + /* TLV of nlo_channel_prediction_cfg */
+ WMI_TLV_HDR_SIZE; /* array of wmi_vendor_oui */
len += sizeof(u_int32_t) * MIN(pno->aNetworks[0].ucChannelCount,
WMI_NLO_MAX_CHAN);
len += sizeof(nlo_configured_parameters) *
MIN(pno->ucNetworksCount, WMI_NLO_MAX_SSIDS);
+ /* Add the fixed length of enlo_candidate_score_params */
+ len += sizeof(enlo_candidate_score_params);
+ len += sizeof(wmi_vendor_oui) * pno->num_vendor_oui;
buf = wmi_buf_alloc(wma->wmi_handle, len);
if (!buf) {
@@ -19461,6 +19518,25 @@
cmd->fast_scan_period, cmd->slow_scan_period);
WMA_LOGD("fast_scan_max_cycles: %d", cmd->fast_scan_max_cycles);
+ if (pno->enable_pno_scan_randomization) {
+ cmd->flags |= WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
+ WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ;
+ WMI_CHAR_ARRAY_TO_MAC_ADDR(pno->mac_addr, &cmd->mac_addr);
+ WMI_CHAR_ARRAY_TO_MAC_ADDR(pno->mac_addr_mask, &cmd->mac_mask);
+ }
+
+ if (pno->ie_whitelist)
+ cmd->flags |= WMI_NLO_CONFIG_ENABLE_IE_WHITELIST_IN_PROBE_REQ;
+
+ WMA_LOGI("pno flags = %x", cmd->flags);
+
+ cmd->num_vendor_oui = pno->num_vendor_oui;
+
+ if (pno->ie_whitelist) {
+ for (i = 0; i < PROBE_REQ_BITMAP_LEN; i++)
+ cmd->ie_bitmap[i] = pno->probe_req_ie_bitmap[i];
+ }
+
buf_ptr += sizeof(wmi_nlo_config_cmd_fixed_param);
cmd->no_of_ssids = MIN(pno->ucNetworksCount, WMI_NLO_MAX_SSIDS);
@@ -19521,6 +19597,37 @@
}
buf_ptr += cmd->num_of_channels * sizeof(u_int32_t);
+ /*
+ * For pno start, this is not needed but to get the correct offset of
+ * wmi_vendor_oui, this is needed
+ */
+ WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, 0);
+ buf_ptr += WMI_TLV_HDR_SIZE; /* zero no.of nlo_channel_prediction_cfg */
+ WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_STRUC_enlo_candidate_score_param,
+ WMITLV_GET_STRUCT_TLVLEN(enlo_candidate_score_params));
+ buf_ptr += sizeof(enlo_candidate_score_params);
+
+ /* ie white list */
+ WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC,
+ pno->num_vendor_oui *
+ sizeof(wmi_vendor_oui));
+
+ buf_ptr += WMI_TLV_HDR_SIZE;
+
+ if (cmd->num_vendor_oui != 0) {
+ voui = (wmi_vendor_oui *)buf_ptr;
+ pvoui = (struct vendor_oui *)((uint8_t *)pno + sizeof(*pno));
+ for (i = 0; i < cmd->num_vendor_oui; i++) {
+ WMITLV_SET_HDR(&voui[i].tlv_header,
+ WMITLV_TAG_STRUC_wmi_vendor_oui,
+ WMITLV_GET_STRUCT_TLVLEN(
+ wmi_vendor_oui));
+ voui[i].oui_type_subtype = pvoui[i].oui_type |
+ (pvoui[i].oui_subtype << 24);
+ }
+ buf_ptr += cmd->num_vendor_oui * sizeof(wmi_vendor_oui);
+ }
+
/* TODO: Discrete firmware doesn't have command/option to configure
* App IE which comes from wpa_supplicant as of part PNO start request.
*/
@@ -19980,6 +20087,8 @@
return "REASSOC_RES_RECV";
case WOW_REASON_ACTION_FRAME_RECV:
return "ACTION_FRAME_RECV";
+ case WOW_REASON_OEM_RESPONSE_EVENT:
+ return "WOW_REASON_OEM_RESPONSE_EVENT";
}
return "unknown";
}
@@ -20481,7 +20590,7 @@
*/
static void wma_wow_wake_up_stats_display(tp_wma_handle wma)
{
- WMA_LOGA("uc %d bc %d v4_mc %d v6_mc %d ra %d ns %d na %d pno_match %d pno_complete %d gscan %d low_rssi %d rssi_breach %d icmp %d icmpv6 %d",
+ WMA_LOGA("uc %d bc %d v4_mc %d v6_mc %d ra %d ns %d na %d pno_match %d pno_complete %d gscan %d low_rssi %d rssi_breach %d icmp %d icmpv6 %d oem %d",
wma->wow_ucast_wake_up_count,
wma->wow_bcast_wake_up_count,
wma->wow_ipv4_mcast_wake_up_count,
@@ -20495,7 +20604,8 @@
wma->wow_low_rssi_wake_up_count,
wma->wow_rssi_breach_wake_up_count,
wma->wow_icmpv4_count,
- wma->wow_icmpv6_count);
+ wma->wow_icmpv6_count,
+ wma->wow_oem_response_wake_up_count);
return;
}
@@ -20620,6 +20730,10 @@
wma->wow_rssi_breach_wake_up_count++;
break;
+ case WOW_REASON_OEM_RESPONSE_EVENT:
+ wma->wow_oem_response_wake_up_count++;
+ break;
+
default:
WMA_LOGE("Unknown wake up reason");
break;
@@ -21416,6 +21530,26 @@
}
break;
#endif
+
+ case WOW_REASON_OEM_RESPONSE_EVENT:
+ wma_wow_wake_up_stats(wma, NULL, 0,
+ WOW_REASON_OEM_RESPONSE_EVENT);
+ if (param_buf->wow_packet_buffer) {
+ WMA_LOGD(FL("Host woken up by OEM Response event"));
+ wow_buf_pkt_len =
+ *(uint32_t *)param_buf->wow_packet_buffer;
+ vos_trace_hex_dump(VOS_MODULE_ID_WDA,
+ VOS_TRACE_LEVEL_DEBUG,
+ param_buf->wow_packet_buffer,
+ wow_buf_pkt_len);
+ wma_oem_data_response_handler(handle,
+ (uint8_t*)(param_buf->wow_packet_buffer),
+ wow_buf_pkt_len);
+ } else {
+ WMA_LOGD(FL("No wow_packet_buffer for OEM response"));
+ }
+ break;
+
default:
break;
}
@@ -21505,6 +21639,8 @@
return "WOW_NAN_DATA_EVENT";
case WOW_TDLS_CONN_TRACKER_EVENT:
return "WOW_TDLS_CONN_TRACKER_EVENT";
+ case WOW_OEM_RESPONSE_EVENT:
+ return "WOW_OEM_RESPONSE_EVENT";
default:
return "UNSPECIFIED_EVENT";
}
@@ -26716,7 +26852,6 @@
cmd->scan_ctrl_flags = WMI_SCAN_ADD_BCAST_PROBE_REQ |
WMI_SCAN_ADD_CCK_RATES |
WMI_SCAN_ADD_OFDM_RATES |
- WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ |
WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ;
cmd->scan_priority = WMI_SCAN_PRIORITY_HIGH;
cmd->num_ssids = 0;
@@ -27946,13 +28081,17 @@
uint32_t len;
u_int8_t *buf_ptr;
u_int32_t *oui_buf;
+ uint32_t i = 0;
+ wmi_vendor_oui *voui = NULL;
+ struct vendor_oui *pvoui = NULL;
if (!wma || !wma->wmi_handle) {
WMA_LOGE("%s: WMA is closed, can not issue cmd",
__func__);
return VOS_STATUS_E_INVAL;
}
- len = sizeof(*cmd);
+ len = sizeof(*cmd) + WMI_TLV_HDR_SIZE +
+ psetoui->num_vendor_oui * sizeof(wmi_vendor_oui);
wmi_buf = wmi_buf_alloc(wma->wmi_handle, len);
if (!wmi_buf) {
WMA_LOGE("%s: wmi_buf_alloc failed", __func__);
@@ -27972,6 +28111,44 @@
WMA_LOGD("%s: wma:oui received from hdd %08x", __func__,
cmd->prob_req_oui);
+ cmd->vdev_id = psetoui->vdev_id;
+ cmd->flags = WMI_SCAN_PROBE_OUI_SPOOFED_MAC_IN_PROBE_REQ;
+ if (psetoui->enb_probe_req_sno_randomization)
+ cmd->flags |= WMI_SCAN_PROBE_OUI_RANDOM_SEQ_NO_IN_PROBE_REQ;
+
+ if (psetoui->ie_whitelist)
+ cmd->flags |=
+ WMI_SCAN_PROBE_OUI_ENABLE_IE_WHITELIST_IN_PROBE_REQ;
+
+ WMA_LOGI(FL("vdev_id = %d, flags = %x"), cmd->vdev_id, cmd->flags);
+
+ cmd->num_vendor_oui = psetoui->num_vendor_oui;
+
+ if (psetoui->ie_whitelist) {
+ for (i = 0; i < PROBE_REQ_BITMAP_LEN; i++)
+ cmd->ie_bitmap[i] = psetoui->probe_req_ie_bitmap[i];
+ }
+
+ buf_ptr += sizeof(*cmd);
+ WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC,
+ psetoui->num_vendor_oui *
+ sizeof(wmi_vendor_oui));
+
+ buf_ptr += WMI_TLV_HDR_SIZE;
+ if (cmd->num_vendor_oui != 0) {
+ voui = (wmi_vendor_oui *)buf_ptr;
+ pvoui = (struct vendor_oui *)((u_int8_t *)psetoui +
+ sizeof(*psetoui));
+ for (i = 0; i < cmd->num_vendor_oui; i++) {
+ WMITLV_SET_HDR(&voui[i].tlv_header,
+ WMITLV_TAG_STRUC_wmi_vendor_oui,
+ WMITLV_GET_STRUCT_TLVLEN(
+ wmi_vendor_oui));
+ voui[i].oui_type_subtype = pvoui[i].oui_type |
+ (pvoui[i].oui_subtype << 24);
+ }
+ }
+
if (wmi_unified_cmd_send(wma->wmi_handle, wmi_buf, len,
WMI_SCAN_PROB_REQ_OUI_CMDID)) {
WMA_LOGE("%s: failed to send command", __func__);
diff --git a/drivers/staging/qcacld-2.0/CORE/SERVICES/WMA/wma.h b/drivers/staging/qcacld-2.0/CORE/SERVICES/WMA/wma.h
index c7e6d2e..cd3a386 100644
--- a/drivers/staging/qcacld-2.0/CORE/SERVICES/WMA/wma.h
+++ b/drivers/staging/qcacld-2.0/CORE/SERVICES/WMA/wma.h
@@ -886,6 +886,7 @@
uint32_t wow_ipv6_mcast_na_stats;
uint32_t wow_icmpv4_count;
uint32_t wow_icmpv6_count;
+ uint32_t wow_oem_response_wake_up_count;
uint32_t wow_wakeup_enable_mask;
uint32_t wow_wakeup_disable_mask;
uint16_t max_mgmt_tx_fail_count;
diff --git a/drivers/staging/qcacld-2.0/CORE/SERVICES/WMI/wmi_unified.c b/drivers/staging/qcacld-2.0/CORE/SERVICES/WMI/wmi_unified.c
index 11107d6..fa2ef69 100644
--- a/drivers/staging/qcacld-2.0/CORE/SERVICES/WMI/wmi_unified.c
+++ b/drivers/staging/qcacld-2.0/CORE/SERVICES/WMI/wmi_unified.c
@@ -704,6 +704,28 @@
CASE_RETURN_STRING(WMI_DBGLOG_TIME_STAMP_SYNC_CMDID);
CASE_RETURN_STRING(WMI_P2P_LISTEN_OFFLOAD_START_CMDID);
CASE_RETURN_STRING(WMI_P2P_LISTEN_OFFLOAD_STOP_CMDID);
+ CASE_RETURN_STRING(WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
+ CASE_RETURN_STRING(WMI_PEER_REORDER_QUEUE_REMOVE_CMDID);
+ CASE_RETURN_STRING(WMI_SET_MULTIPLE_MCAST_FILTER_CMDID);
+ CASE_RETURN_STRING(WMI_READ_DATA_FROM_FLASH_CMDID);
+ CASE_RETURN_STRING(WMI_PDEV_SET_REORDER_TIMEOUT_VAL_CMDID);
+ CASE_RETURN_STRING(WMI_PEER_SET_RX_BLOCKSIZE_CMDID);
+ CASE_RETURN_STRING(WMI_PDEV_SET_WAKEUP_CONFIG_CMDID);
+ CASE_RETURN_STRING(WMI_PDEV_GET_ANTDIV_STATUS_CMDID);
+ CASE_RETURN_STRING(WMI_PEER_ANTDIV_INFO_REQ_CMDID);
+ CASE_RETURN_STRING(WMI_MNT_FILTER_CMDID);
+ CASE_RETURN_STRING(WMI_PDEV_GET_CHIP_POWER_STATS_CMDID);
+ CASE_RETURN_STRING(WMI_COEX_GET_ANTENNA_ISOLATION_CMDID);
+ CASE_RETURN_STRING(WMI_PDEV_SET_STATS_THRESHOLD_CMDID);
+ CASE_RETURN_STRING(WMI_REQUEST_WLAN_STATS_CMDID);
+ CASE_RETURN_STRING(WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID);
+ CASE_RETURN_STRING(WMI_SAR_LIMITS_CMDID);
+ CASE_RETURN_STRING(WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
+ CASE_RETURN_STRING(WMI_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMDID);
+ CASE_RETURN_STRING(WMI_VDEV_ADFS_CH_CFG_CMDID);
+ CASE_RETURN_STRING(WMI_VDEV_ADFS_OCAC_ABORT_CMDID);
+ CASE_RETURN_STRING(WMI_REQUEST_RCPI_CMDID);
+ CASE_RETURN_STRING(WMI_REQUEST_PEER_STATS_INFO_CMDID);
}
return "Invalid WMI cmd";
}
diff --git a/drivers/staging/qcacld-2.0/CORE/SME/inc/btcApi.h b/drivers/staging/qcacld-2.0/CORE/SME/inc/btcApi.h
deleted file mode 100644
index 9a06f7c..0000000
--- a/drivers/staging/qcacld-2.0/CORE/SME/inc/btcApi.h
+++ /dev/null
@@ -1,323 +0,0 @@
-/*
- * Copyright (c) 2013-2014 The Linux Foundation. All rights reserved.
- *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
- * This file was originally distributed by Qualcomm Atheros, Inc.
- * under proprietary terms before Copyright ownership was assigned
- * to the Linux Foundation.
- */
-
-/******************************************************************************
-*
-* Name: btcApi.h
-*
-* Description: BTC Events Layer API definitions.
-*
-
-*
-******************************************************************************/
-
-#ifndef __BTC_API_H__
-#define __BTC_API_H__
-
-#include "vos_types.h"
-#include "vos_timer.h"
-#include "vos_nvitem.h"
-
-#define BT_INVALID_CONN_HANDLE (0xFFFF) /**< Invalid connection handle */
-
-/* ACL and Sync connection attempt results */
-#define BT_CONN_STATUS_FAIL (0) /**< Connection failed */
-#define BT_CONN_STATUS_SUCCESS (1) /**< Connection successful */
-#define BT_CONN_STATUS_MAX (2) /**< This and beyond are invalid values */
-
-/** ACL and Sync link types
- These must match the Bluetooth Spec!
-*/
-#define BT_SCO (0) /**< SCO Link */
-#define BT_ACL (1) /**< ACL Link */
-#define BT_eSCO (2) /**< eSCO Link */
-#define BT_LINK_TYPE_MAX (3) /**< This value and higher are invalid */
-
-/** ACL link modes
- These must match the Bluetooth Spec!
-*/
-#define BT_ACL_ACTIVE (0) /**< Active mode */
-#define BT_ACL_HOLD (1) /**< Hold mode */
-#define BT_ACL_SNIFF (2) /**< Sniff mode */
-#define BT_ACL_PARK (3) /**< Park mode */
-#define BT_ACL_MODE_MAX (4) /**< This value and higher are invalid */
-
-/**
- * A2DP BTC max no of BT sub intervals
- *
- * **/
-#define BTC_MAX_NUM_ACL_BT_SUB_INTS (7)
-
-/** BTC Executions Modes allowed to be set by user
-*/
-#define BTC_SMART_COEXISTENCE (0) /** BTC Mapping Layer decides whats best */
-#define BTC_WLAN_ONLY (1) /** WLAN takes all mode */
-#define BTC_PTA_ONLY (2) /** Allow only 3 wire protocol in H/W */
-#define BTC_SMART_MAX_WLAN (3) /** BTC Mapping Layer decides whats best, WLAN weighted */
-#define BTC_SMART_MAX_BT (4) /** BTC Mapping Layer decides whats best, BT weighted */
-#define BTC_SMART_BT_A2DP (5) /** BTC Mapping Layer decides whats best, balanced + BT A2DP weight */
-#define BT_EXEC_MODE_MAX (6) /** This and beyond are invalid values */
-
-/** Bitmaps used for maintaining various BT events that requires
- enough time to complete such that it might require disbling of
- heartbeat monitoring to avoid WLAN link loss with the AP
-*/
-#define BT_INQUIRY_STARTED (1<<0)
-#define BT_PAGE_STARTED (1<<1)
-#define BT_CREATE_ACL_CONNECTION_STARTED (1<<2)
-#define BT_CREATE_SYNC_CONNECTION_STARTED (1<<3)
-
-/** Maximum time duration in milliseconds between a specific BT start event and its
- respective stop event, before it can be declared timed out on receiving the stop event.
-*/
-#define BT_MAX_EVENT_DONE_TIMEOUT 45000
-
-
-/*
- To suppurt multiple SCO connections for BT+UAPSD work
-*/
-#define BT_MAX_SCO_SUPPORT 3
-#define BT_MAX_ACL_SUPPORT 3
-#define BT_MAX_DISCONN_SUPPORT (BT_MAX_SCO_SUPPORT+BT_MAX_ACL_SUPPORT)
-#define BT_MAX_NUM_EVENT_ACL_DEFERRED 4 //We may need to defer these many BT events for ACL
-#define BT_MAX_NUM_EVENT_SCO_DEFERRED 4 //We may need to defer these many BT events for SYNC
-
-/*
- * Number of mws coex configurations
- */
-#define MWS_COEX_MAX_CONFIG 6
-
-/** Enumeration of all the different kinds of BT events
-*/
-typedef enum eSmeBtEventType
-{
- BT_EVENT_DEVICE_SWITCHED_ON,
- BT_EVENT_DEVICE_SWITCHED_OFF,
- BT_EVENT_INQUIRY_STARTED,
- BT_EVENT_INQUIRY_STOPPED,
- BT_EVENT_INQUIRY_SCAN_STARTED,
- BT_EVENT_INQUIRY_SCAN_STOPPED,
- BT_EVENT_PAGE_STARTED,
- BT_EVENT_PAGE_STOPPED,
- BT_EVENT_PAGE_SCAN_STARTED,
- BT_EVENT_PAGE_SCAN_STOPPED,
- BT_EVENT_CREATE_ACL_CONNECTION,
- BT_EVENT_ACL_CONNECTION_COMPLETE,
- BT_EVENT_CREATE_SYNC_CONNECTION,
- BT_EVENT_SYNC_CONNECTION_COMPLETE,
- BT_EVENT_SYNC_CONNECTION_UPDATED,
- BT_EVENT_DISCONNECTION_COMPLETE,
- BT_EVENT_MODE_CHANGED,
- BT_EVENT_A2DP_STREAM_START,
- BT_EVENT_A2DP_STREAM_STOP,
- BT_EVENT_TYPE_MAX, //This and beyond are invalid values
-} tSmeBtEventType;
-
-/**Data structure that specifies the needed event parameters for
- BT_EVENT_CREATE_ACL_CONNECTION and BT_EVENT_ACL_CONNECTION_COMPLETE
-*/
-typedef struct sSmeBtAclConnectionParam
-{
- v_U8_t bdAddr[6];
- v_U16_t connectionHandle;
- v_U8_t status;
-} tSmeBtAclConnectionParam, *tpSmeBtAclConnectionParam;
-
-/** Data structure that specifies the needed event parameters for
- BT_EVENT_CREATE_SYNC_CONNECTION, BT_EVENT_SYNC_CONNECTION_COMPLETE
- and BT_EVENT_SYNC_CONNECTION_UPDATED
-*/
-typedef struct sSmeBtSyncConnectionParam
-{
- v_U8_t bdAddr[6];
- v_U16_t connectionHandle;
- v_U8_t status;
- v_U8_t linkType;
- v_U8_t scoInterval; //units in number of 625us slots
- v_U8_t scoWindow; //units in number of 625us slots
- v_U8_t retransmisisonWindow; //units in number of 625us slots
-} tSmeBtSyncConnectionParam, *tpSmeBtSyncConnectionParam;
-
-typedef struct sSmeBtSyncUpdateHist
-{
- tSmeBtSyncConnectionParam btSyncConnection;
- v_BOOL_t fValid;
-} tSmeBtSyncUpdateHist, *tpSmeBtSyncUpdateHist;
-
-/**Data structure that specifies the needed event parameters for
- BT_EVENT_MODE_CHANGED
-*/
-typedef struct sSmeBtAclModeChangeParam
-{
- v_U16_t connectionHandle;
- v_U8_t mode;
-} tSmeBtAclModeChangeParam, *tpSmeBtAclModeChangeParam;
-
-/*Data structure that specifies the needed event parameters for
- BT_EVENT_DISCONNECTION_COMPLETE
-*/
-typedef struct sSmeBtDisconnectParam
-{
- v_U16_t connectionHandle;
-} tSmeBtDisconnectParam, *tpSmeBtDisconnectParam;
-
-/*Data structure that specifies the needed event parameters for
- BT_EVENT_A2DP_STREAM_START
- BT_EVENT_A2DP_STREAM_STOP
-*/
-typedef struct sSmeBtA2DPParam
-{
- v_U8_t bdAddr[6];
-} tSmeBtA2DPParam, *tpSmeBtA2DPParam;
-
-
-/** Generic Bluetooth Event structure for BTC
-*/
-typedef struct sSmeBtcBtEvent
-{
- tSmeBtEventType btEventType;
- union
- {
- v_U8_t bdAddr[6]; /**< For events with only a BT Addr in event_data */
- tSmeBtAclConnectionParam btAclConnection;
- tSmeBtSyncConnectionParam btSyncConnection;
- tSmeBtDisconnectParam btDisconnect;
- tSmeBtAclModeChangeParam btAclModeChange;
- }uEventParam;
-} tSmeBtEvent, *tpSmeBtEvent;
-
-
-/** Data structure that specifies the BTC Configuration parameters
-*/
-typedef struct sSmeBtcConfig
-{
- v_U8_t btcExecutionMode;
- v_U32_t mwsCoexConfig[MWS_COEX_MAX_CONFIG];
-} tSmeBtcConfig, *tpSmeBtcConfig;
-
-
-typedef struct sSmeBtAclModeChangeEventHist
-{
- tSmeBtAclModeChangeParam btAclModeChange;
- v_BOOL_t fValid;
-} tSmeBtAclModeChangeEventHist, *tpSmeBtAclModeChangeEventHist;
-
-typedef struct sSmeBtAclEventHist
-{
- //At most, cached events are COMPLETION, DISCONNECT, CREATION, COMPLETION
- tSmeBtEventType btEventType[BT_MAX_NUM_EVENT_ACL_DEFERRED];
- tSmeBtAclConnectionParam btAclConnection[BT_MAX_NUM_EVENT_ACL_DEFERRED];
- //bNextEventIdx == 0 meaning no event cached here
- tANI_U8 bNextEventIdx;
-} tSmeBtAclEventHist, *tpSmeBtAclEventHist;
-
-typedef struct sSmeBtSyncEventHist
-{
- //At most, cached events are COMPLETION, DISCONNECT, CREATION, COMPLETION
- tSmeBtEventType btEventType[BT_MAX_NUM_EVENT_SCO_DEFERRED];
- tSmeBtSyncConnectionParam btSyncConnection[BT_MAX_NUM_EVENT_SCO_DEFERRED];
- //bNextEventIdx == 0 meaning no event cached here
- tANI_U8 bNextEventIdx;
-} tSmeBtSyncEventHist, *tpSmeBtSyncEventHist;
-
-typedef struct sSmeBtDisconnectEventHist
-{
- tSmeBtDisconnectParam btDisconnect;
- v_BOOL_t fValid;
-} tSmeBtDisconnectEventHist, *tpSmeBtDisconnectEventHist;
-
-
-/*
- Data structure for the history of BT events
-*/
-typedef struct sSmeBtcEventHist
-{
- tSmeBtSyncEventHist btSyncConnectionEvent[BT_MAX_SCO_SUPPORT];
- tSmeBtAclEventHist btAclConnectionEvent[BT_MAX_ACL_SUPPORT];
- tSmeBtAclModeChangeEventHist btAclModeChangeEvent[BT_MAX_ACL_SUPPORT];
- tSmeBtDisconnectEventHist btDisconnectEvent[BT_MAX_DISCONN_SUPPORT];
- tSmeBtSyncUpdateHist btSyncUpdateEvent[BT_MAX_SCO_SUPPORT];
- int nInquiryEvent; //>0 for # of outstanding inquiriy starts
- //<0 for # of outstanding inquiry stops
- //0 == no inquiry event
- int nPageEvent; //>0 for # of outstanding page starts
- //<0 for # of outstanding page stops
- //0 == no page event
- v_BOOL_t fA2DPStarted;
- v_BOOL_t fA2DPStopped;
-} tSmeBtcEventHist, *tpSmeBtcEventHist;
-
-typedef struct sSmeBtcEventReplay
-{
- tSmeBtcEventHist btcEventHist;
- v_BOOL_t fBTSwitchOn;
- v_BOOL_t fBTSwitchOff;
- //This is not directly tied to BT event so leave it alone when processing BT events
- v_BOOL_t fRestoreHBMonitor;
-} tSmeBtcEventReplay, *tpSmeBtcEventReplay;
-
-typedef struct sSmeBtcInfo
-{
- tSmeBtcConfig btcConfig;
- v_BOOL_t btcReady;
- v_U8_t btcEventState;
- v_U8_t btcHBActive; /* Is HB currently active */
- v_U8_t btcHBCount; /* default HB count */
- vos_timer_t restoreHBTimer; /* Timer to restore heart beat */
- tSmeBtcEventReplay btcEventReplay;
- v_BOOL_t fReplayBTEvents;
- v_BOOL_t btcUapsdOk; /* Indicate whether BTC is ok with UAPSD */
- v_BOOL_t fA2DPTrafStop;/*flag to check A2DP_STOP event has come before MODE_CHANGED*/
- v_U16_t btcScoHandles[BT_MAX_SCO_SUPPORT]; /* Handles for SCO, if any*/
- v_BOOL_t fA2DPUp; /*remember whether A2DP is in session*/
- v_BOOL_t btcScanCompromise;
- v_U8_t btcBssfordisableaggr[VOS_MAC_ADDRESS_LEN];
-} tSmeBtcInfo, *tpSmeBtcInfo;
-
-
-/** Routine definitions
-*/
-
-#ifndef WLAN_MDM_CODE_REDUCTION_OPT
-VOS_STATUS btcOpen (tHalHandle hHal);
-VOS_STATUS btcClose (tHalHandle hHal);
-VOS_STATUS btcReady (tHalHandle hHal);
-VOS_STATUS btcSendCfgMsg(tHalHandle hHal, tpSmeBtcConfig pSmeBtcConfig);
-VOS_STATUS btcSignalBTEvent (tHalHandle hHal, tpSmeBtEvent pBtEvent);
-VOS_STATUS btcSetConfig (tHalHandle hHal, tpSmeBtcConfig pSmeBtcConfig);
-VOS_STATUS btcGetConfig (tHalHandle hHal, tpSmeBtcConfig pSmeBtcConfig);
-/*
- Caller can check whether BTC's current event allows UAPSD. This doesn't affect
- BMPS.
- return: VOS_TRUE -- BTC is ready for UAPSD
- VOS_FALSE -- certain BT event is active, cannot enter UAPSD
-*/
-v_BOOL_t btcIsReadyForUapsd( tHalHandle hHal );
-eHalStatus btcHandleCoexInd(tHalHandle hHal, void* pMsg);
-#endif /* End of WLAN_MDM_CODE_REDUCTION_OPT */
-
-#endif
diff --git a/drivers/staging/qcacld-2.0/CORE/SME/inc/csrApi.h b/drivers/staging/qcacld-2.0/CORE/SME/inc/csrApi.h
index 935f11a..60f7fa1 100644
--- a/drivers/staging/qcacld-2.0/CORE/SME/inc/csrApi.h
+++ b/drivers/staging/qcacld-2.0/CORE/SME/inc/csrApi.h
@@ -310,6 +310,14 @@
eCsrRequestType requestType; //11d scan or full scan
tANI_BOOLEAN p2pSearch;
tANI_BOOLEAN skipDfsChnlInP2pSearch;
+
+ uint32_t enable_scan_randomization;
+ uint8_t mac_addr[VOS_MAC_ADDR_SIZE];
+ uint8_t mac_addr_mask[VOS_MAC_ADDR_SIZE];
+ bool ie_whitelist;
+ uint32_t probe_req_ie_bitmap[PROBE_REQ_BITMAP_LEN];
+ uint32_t num_vendor_oui;
+ struct vendor_oui *voui;
}tCsrScanRequest;
typedef struct tagCsrBGScanRequest
@@ -1187,8 +1195,6 @@
tANI_U32 nInitialDwellTime; //in units of milliseconds
bool initial_scan_no_dfs_chnl;
- tANI_U32 nActiveMinChnTimeBtc; //in units of milliseconds
- tANI_U32 nActiveMaxChnTimeBtc; //in units of milliseconds
tANI_U32 disableAggWithBtc;
#ifdef WLAN_AP_STA_CONCURRENCY
tANI_U32 nPassiveMinChnTimeConc; //in units of milliseconds
diff --git a/drivers/staging/qcacld-2.0/CORE/SME/inc/csrInternal.h b/drivers/staging/qcacld-2.0/CORE/SME/inc/csrInternal.h
index d70c6ac..7d83366 100644
--- a/drivers/staging/qcacld-2.0/CORE/SME/inc/csrInternal.h
+++ b/drivers/staging/qcacld-2.0/CORE/SME/inc/csrInternal.h
@@ -597,8 +597,6 @@
tANI_U32 nInitialDwellTime; //in units of milliseconds
bool initial_scan_no_dfs_chnl;
- tANI_U32 nActiveMinChnTimeBtc; //in units of milliseconds
- tANI_U32 nActiveMaxChnTimeBtc; //in units of milliseconds
tANI_U8 disableAggWithBtc;
#ifdef WLAN_AP_STA_CONCURRENCY
tANI_U32 nPassiveMinChnTimeConc; //in units of milliseconds
diff --git a/drivers/staging/qcacld-2.0/CORE/SME/inc/oemDataApi.h b/drivers/staging/qcacld-2.0/CORE/SME/inc/oemDataApi.h
index 57c2cc4..5f5deea 100644
--- a/drivers/staging/qcacld-2.0/CORE/SME/inc/oemDataApi.h
+++ b/drivers/staging/qcacld-2.0/CORE/SME/inc/oemDataApi.h
@@ -42,14 +42,6 @@
#include "sirMacProtDef.h"
#include "csrLinkList.h"
-#ifndef OEM_DATA_REQ_SIZE
-#define OEM_DATA_REQ_SIZE 280
-#endif
-
-#ifndef OEM_DATA_RSP_SIZE
-#define OEM_DATA_RSP_SIZE 1724
-#endif
-
/* message subtype for internal purpose */
#define OEM_MESSAGE_SUBTYPE_INTERNAL 0xdeadbeef
#define OEM_MESSAGE_SUBTYPE_LEN 4
@@ -62,7 +54,7 @@
typedef struct tagOemDataReq
{
tANI_U8 sessionId;
- uint8_t data_len;
+ uint32_t data_len;
uint8_t *data;
} tOemDataReq, tOemDataReqConfig;
diff --git a/drivers/staging/qcacld-2.0/CORE/SME/inc/sme_Api.h b/drivers/staging/qcacld-2.0/CORE/SME/inc/sme_Api.h
index 15b147a..453db8c 100644
--- a/drivers/staging/qcacld-2.0/CORE/SME/inc/sme_Api.h
+++ b/drivers/staging/qcacld-2.0/CORE/SME/inc/sme_Api.h
@@ -50,7 +50,6 @@
#include "vos_lock.h"
#include "halTypes.h"
#include "sirApi.h"
-#include "btcApi.h"
#include "vos_nvitem.h"
#include "p2p_Api.h"
#include "smeInternal.h"
@@ -1827,50 +1826,6 @@
tANI_U8 sessionId );
/* ---------------------------------------------------------------------------
- \fn sme_BtcSignalBtEvent
- \brief API to signal Bluetooth (BT) event to the WLAN driver. Based on the
- BT event type and the current operating mode of Libra (full power,
- BMPS, UAPSD etc), appropriate Bluetooth Coexistence (BTC) strategy
- would be employed.
- \param hHal - The handle returned by macOpen.
- \param pBtcBtEvent - Pointer to a caller allocated object of type tSmeBtEvent
- Caller owns the memory and is responsible for freeing it.
- \return VOS_STATUS
- VOS_STATUS_E_FAILURE BT Event not passed to HAL. This can happen
- if driver has not yet been initialized or if BTC
- Events Layer has been disabled.
- VOS_STATUS_SUCCESS BT Event passed to HAL
- ---------------------------------------------------------------------------*/
-VOS_STATUS sme_BtcSignalBtEvent (tHalHandle hHal, tpSmeBtEvent pBtcBtEvent);
-
-/* ---------------------------------------------------------------------------
- \fn sme_BtcSetConfig
- \brief API to change the current Bluetooth Coexistence (BTC) configuration
- This function should be invoked only after CFG download has completed.
- Calling it after sme_HDDReadyInd is recommended.
- \param hHal - The handle returned by macOpen.
- \param pSmeBtcConfig - Pointer to a caller allocated object of type
- tSmeBtcConfig. Caller owns the memory and is responsible
- for freeing it.
- \return VOS_STATUS
- VOS_STATUS_E_FAILURE Config not passed to HAL.
- VOS_STATUS_SUCCESS Config passed to HAL
- ---------------------------------------------------------------------------*/
-VOS_STATUS sme_BtcSetConfig (tHalHandle hHal, tpSmeBtcConfig pSmeBtcConfig);
-
-/* ---------------------------------------------------------------------------
- \fn sme_BtcGetConfig
- \brief API to retrieve the current Bluetooth Coexistence (BTC) configuration
- \param hHal - The handle returned by macOpen.
- \param pSmeBtcConfig - Pointer to a caller allocated object of type tSmeBtcConfig.
- Caller owns the memory and is responsible for freeing it.
- \return VOS_STATUS
- VOS_STATUS_E_FAILURE - failure
- VOS_STATUS_SUCCESS success
- ---------------------------------------------------------------------------*/
-VOS_STATUS sme_BtcGetConfig (tHalHandle hHal, tpSmeBtcConfig pSmeBtcConfig);
-
-/* ---------------------------------------------------------------------------
\fn sme_SetCfgPrivacy
\brief API to set configure privacy parameters
\param hHal - The handle returned by macOpen.
diff --git a/drivers/staging/qcacld-2.0/CORE/SME/src/QoS/sme_Qos.c b/drivers/staging/qcacld-2.0/CORE/SME/src/QoS/sme_Qos.c
index 54a3ffae..dc57c5d 100644
--- a/drivers/staging/qcacld-2.0/CORE/SME/src/QoS/sme_Qos.c
+++ b/drivers/staging/qcacld-2.0/CORE/SME/src/QoS/sme_Qos.c
@@ -3386,14 +3386,7 @@
pTspec->suspendInterval = pTspec_Info->suspension_interval;
pTspec->svcStartTime = pTspec_Info->svc_start_time;
pTspec->tsinfo.traffic.direction = pTspec_Info->ts_info.direction;
-
- //Make sure UAPSD is allowed. BTC may want to disable UAPSD while keep QoS setup
- if (pTspec_Info->ts_info.psb && btcIsReadyForUapsd(pMac)) {
- pTspec->tsinfo.traffic.psb = pTspec_Info->ts_info.psb;
- } else {
- pTspec->tsinfo.traffic.psb = 0;
- pTspec_Info->ts_info.psb = 0;
- }
+ pTspec->tsinfo.traffic.psb = pTspec_Info->ts_info.psb;
pTspec->tsinfo.traffic.tsid = pTspec_Info->ts_info.tid;
pTspec->tsinfo.traffic.userPrio = pTspec_Info->ts_info.up;
pTspec->tsinfo.traffic.accessPolicy = SME_QOS_ACCESS_POLICY_EDCA;
@@ -3503,15 +3496,7 @@
ricIE.TSPEC.suspension_int = pTspec_Info->suspension_interval;
ricIE.TSPEC.service_start_time = pTspec_Info->svc_start_time;
ricIE.TSPEC.direction = pTspec_Info->ts_info.direction;
- //Make sure UAPSD is allowed. BTC may want to disable UAPSD while keep QoS setup
- if( pTspec_Info->ts_info.psb && btcIsReadyForUapsd(pMac) )
- {
- ricIE.TSPEC.psb = pTspec_Info->ts_info.psb;
- }
- else
- {
- ricIE.TSPEC.psb = 0;
- }
+ ricIE.TSPEC.psb = pTspec_Info->ts_info.psb;
ricIE.TSPEC.tsid = pTspec_Info->ts_info.tid;
ricIE.TSPEC.user_priority = pTspec_Info->ts_info.up;
ricIE.TSPEC.access_policy = SME_QOS_ACCESS_POLICY_EDCA;
@@ -3547,15 +3532,7 @@
ricIE.WMMTSPEC.suspension_int = pTspec_Info->suspension_interval;
ricIE.WMMTSPEC.service_start_time = pTspec_Info->svc_start_time;
ricIE.WMMTSPEC.direction = pTspec_Info->ts_info.direction;
- //Make sure UAPSD is allowed. BTC may want to disable UAPSD while keep QoS setup
- if( pTspec_Info->ts_info.psb && btcIsReadyForUapsd(pMac) )
- {
- ricIE.WMMTSPEC.psb = pTspec_Info->ts_info.psb;
- }
- else
- {
- ricIE.WMMTSPEC.psb = 0;
- }
+ ricIE.WMMTSPEC.psb = pTspec_Info->ts_info.psb;
ricIE.WMMTSPEC.tsid = pTspec_Info->ts_info.tid;
ricIE.WMMTSPEC.user_priority = pTspec_Info->ts_info.up;
ricIE.WMMTSPEC.access_policy = SME_QOS_ACCESS_POLICY_EDCA;
@@ -4126,9 +4103,7 @@
pMsg->req.tspec.svcStartTime = 0;
pMsg->req.tspec.tsinfo.traffic.direction = pTspec_Info->ts_info.direction;
//Make sure UAPSD is allowed. BTC may want to disable UAPSD while keep QoS setup
- if( pTspec_Info->ts_info.psb
- && btcIsReadyForUapsd(pMac)
- )
+ if( pTspec_Info->ts_info.psb)
{
pMsg->req.tspec.tsinfo.traffic.psb = pTspec_Info->ts_info.psb;
}
@@ -8371,14 +8346,7 @@
tCsrRoamModifyProfileFields modifyProfileFields;
//we need to do a reassoc on these AC
csrGetModifyProfileFields(pMac, sessionId, &modifyProfileFields);
- if( btcIsReadyForUapsd(pMac) )
- {
- modifyProfileFields.uapsd_mask = uapsd_mask;
- }
- else
- {
- modifyProfileFields.uapsd_mask = 0;
- }
+ modifyProfileFields.uapsd_mask = uapsd_mask;
//Do we need to inform HDD?
if(!HAL_STATUS_SUCCESS(sme_QosRequestReassoc(pMac, sessionId, &modifyProfileFields, VOS_TRUE)))
{
diff --git a/drivers/staging/qcacld-2.0/CORE/SME/src/btc/btcApi.c b/drivers/staging/qcacld-2.0/CORE/SME/src/btc/btcApi.c
deleted file mode 100644
index 2615d71..0000000
--- a/drivers/staging/qcacld-2.0/CORE/SME/src/btc/btcApi.c
+++ /dev/null
@@ -1,2075 +0,0 @@
-/*
- * Copyright (c) 2012-2016 The Linux Foundation. All rights reserved.
- *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
- * This file was originally distributed by Qualcomm Atheros, Inc.
- * under proprietary terms before Copyright ownership was assigned
- * to the Linux Foundation.
- */
-
-/******************************************************************************
-*
-* Name: btcApi.c
-*
-* Description: Routines that make up the BTC API.
-*
-
-*
-******************************************************************************/
-#include "wlan_qct_wda.h"
-#ifndef WLAN_MDM_CODE_REDUCTION_OPT
-#include "aniGlobal.h"
-#include "smsDebug.h"
-#include "btcApi.h"
-#include "cfgApi.h"
-#include "pmc.h"
-#include "smeQosInternal.h"
-#include "sme_Trace.h"
-#ifdef FEATURE_WLAN_DIAG_SUPPORT
-#include "vos_diag_core_event.h"
-#include "vos_diag_core_log.h"
-#endif /* FEATURE_WLAN_DIAG_SUPPORT */
-static void btcLogEvent (tHalHandle hHal, tpSmeBtEvent pBtEvent);
-static void btcRestoreHeartBeatMonitoringHandle(void* hHal);
-static void btcUapsdCheck( tpAniSirGlobal pMac, tpSmeBtEvent pBtEvent );
-VOS_STATUS btcCheckHeartBeatMonitoring(tHalHandle hHal, tpSmeBtEvent pBtEvent);
-static void btcPowerStateCB( v_PVOID_t pContext, tPmcState pmcState );
-static void btcPowerOffloadStateCB(v_PVOID_t pContext, tANI_U32 sessionId,
- tPmcState pmcState );
-static VOS_STATUS btcDeferEvent( tpAniSirGlobal pMac, tpSmeBtEvent pEvent );
-static VOS_STATUS btcDeferDisconnEvent( tpAniSirGlobal pMac, tpSmeBtEvent pEvent );
-#ifdef FEATURE_WLAN_DIAG_SUPPORT
-static void btcDiagEventLog (tHalHandle hHal, tpSmeBtEvent pBtEvent);
-#endif /* FEATURE_WLAN_DIAG_SUPPORT */
-/* ---------------------------------------------------------------------------
- \fn btcOpen
- \brief API to init the BTC Events Layer
- \param hHal - The handle returned by macOpen.
- \return VOS_STATUS
- VOS_STATUS_E_FAILURE success
- VOS_STATUS_SUCCESS failure
- ---------------------------------------------------------------------------*/
-VOS_STATUS btcOpen (tHalHandle hHal)
-{
- tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
- VOS_STATUS vosStatus;
- int i;
-
- /* Initialize BTC configuration. */
- pMac->btc.btcConfig.btcExecutionMode = BTC_SMART_COEXISTENCE;
-
- pMac->btc.btcReady = VOS_FALSE;
- pMac->btc.btcEventState = 0;
- pMac->btc.btcHBActive = VOS_TRUE;
- pMac->btc.btcScanCompromise = VOS_FALSE;
-
- for (i = 0; i < MWS_COEX_MAX_CONFIG; i++)
- {
- pMac->btc.btcConfig.mwsCoexConfig[i] = 0;
- }
-
- vosStatus = vos_timer_init( &pMac->btc.restoreHBTimer,
- VOS_TIMER_TYPE_SW,
- btcRestoreHeartBeatMonitoringHandle,
- (void*) hHal);
- if (!VOS_IS_STATUS_SUCCESS(vosStatus)) {
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "btcOpen: Fail to init timer");
- return VOS_STATUS_E_FAILURE;
- }
-
- if(!pMac->psOffloadEnabled)
- {
- if(!HAL_STATUS_SUCCESS(pmcRegisterDeviceStateUpdateInd(pMac,
- btcPowerStateCB, pMac)))
- {
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
- "btcOpen: Fail to register PMC callback");
- return VOS_STATUS_E_FAILURE;
- }
- }
- else
- {
- tANI_U32 i;
- for(i = 0; i < CSR_ROAM_SESSION_MAX; i++)
- {
- if(!HAL_STATUS_SUCCESS(pmcOffloadRegisterDeviceStateUpdateInd(pMac,
- i, btcPowerOffloadStateCB, pMac)))
- {
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
- "btcOpen: Fail to register PMC callback");
- return VOS_STATUS_E_FAILURE;
- }
- }
- }
- return VOS_STATUS_SUCCESS;
-}
-/* ---------------------------------------------------------------------------
- \fn btcClose
- \brief API to exit the BTC Events Layer
- \param hHal - The handle returned by macOpen.
- \return VOS_STATUS
- VOS_STATUS_E_FAILURE success
- VOS_STATUS_SUCCESS failure
- ---------------------------------------------------------------------------*/
-VOS_STATUS btcClose (tHalHandle hHal)
-{
- tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
- VOS_STATUS vosStatus;
- pMac->btc.btcReady = VOS_FALSE;
- pMac->btc.btcUapsdOk = VOS_FALSE;
- vos_timer_stop(&pMac->btc.restoreHBTimer);
- vosStatus = vos_timer_destroy(&pMac->btc.restoreHBTimer);
- if (!VOS_IS_STATUS_SUCCESS(vosStatus)) {
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "btcClose: Fail to destroy timer");
- return VOS_STATUS_E_FAILURE;
- }
-
- if(!pMac->psOffloadEnabled)
- {
- if(!HAL_STATUS_SUCCESS(
- pmcDeregisterDeviceStateUpdateInd(pMac, btcPowerStateCB)))
- {
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_FATAL,
- "%s: %d: cannot deregister pmcDeregisterDeviceStateUpdateInd()",
- __func__, __LINE__);
- }
- }
- else
- {
- tANI_U32 i;
- for(i = 0; i < CSR_ROAM_SESSION_MAX; i++)
- {
- if(!HAL_STATUS_SUCCESS(pmcOffloadDeregisterDeviceStateUpdateInd(pMac,
- i, btcPowerOffloadStateCB)))
- {
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
- "btcOpen: Fail to deregister PMC callback");
- return VOS_STATUS_E_FAILURE;
- }
- }
- }
- return VOS_STATUS_SUCCESS;
-}
-
-/* ---------------------------------------------------------------------------
- \fn btcReady
- \brief fn to inform BTC that eWNI_SME_SYS_READY_IND has been sent to PE.
- This acts as a trigger to send a message to HAL to update the BTC
- related config to FW. Note that if HDD configures any power BTC
- related stuff before this API is invoked, BTC will buffer all the
- configuration.
- \param hHal - The handle returned by macOpen.
- \return VOS_STATUS
- ---------------------------------------------------------------------------*/
-VOS_STATUS btcReady (tHalHandle hHal)
-{
- tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
- v_U32_t cfgVal = 0;
- v_U8_t i;
- pMac->btc.btcReady = VOS_TRUE;
- pMac->btc.btcUapsdOk = VOS_TRUE;
- for(i=0; i < BT_MAX_SCO_SUPPORT; i++)
- {
- pMac->btc.btcScoHandles[i] = BT_INVALID_CONN_HANDLE;
- }
-
- // Read heartbeat threshold CFG and save it.
- ccmCfgGetInt(pMac, WNI_CFG_HEART_BEAT_THRESHOLD, &cfgVal);
- pMac->btc.btcHBCount = (v_U8_t)cfgVal;
- if (btcSendCfgMsg(hHal, &(pMac->btc.btcConfig)) != VOS_STATUS_SUCCESS)
- {
- return VOS_STATUS_E_FAILURE;
- }
- return VOS_STATUS_SUCCESS;
-}
-
-static VOS_STATUS btcSendBTEvent(tpAniSirGlobal pMac, tpSmeBtEvent pBtEvent)
-{
- vos_msg_t msg;
- tpSmeBtEvent ptrSmeBtEvent = NULL;
- switch(pBtEvent->btEventType)
- {
- case BT_EVENT_CREATE_SYNC_CONNECTION:
- case BT_EVENT_SYNC_CONNECTION_UPDATED:
- if(pBtEvent->uEventParam.btSyncConnection.linkType != BT_SCO &&
- pBtEvent->uEventParam.btSyncConnection.linkType != BT_eSCO)
- {
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: "
- "Invalid link type %d for Sync Connection. BT event will be dropped ",
- __func__, pBtEvent->uEventParam.btSyncConnection.linkType);
- return VOS_STATUS_E_FAILURE;
- }
- break;
- case BT_EVENT_SYNC_CONNECTION_COMPLETE:
- if((pBtEvent->uEventParam.btSyncConnection.status == BT_CONN_STATUS_SUCCESS) &&
- ((pBtEvent->uEventParam.btSyncConnection.linkType != BT_SCO && pBtEvent->uEventParam.btSyncConnection.linkType != BT_eSCO) ||
- (pBtEvent->uEventParam.btSyncConnection.connectionHandle == BT_INVALID_CONN_HANDLE)))
- {
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: "
- "Invalid connection handle %d or link type %d for Sync Connection. BT event will be dropped ",
- __func__,
- pBtEvent->uEventParam.btSyncConnection.connectionHandle,
- pBtEvent->uEventParam.btSyncConnection.linkType);
- return VOS_STATUS_E_FAILURE;
- }
- break;
- case BT_EVENT_MODE_CHANGED:
- if(pBtEvent->uEventParam.btAclModeChange.mode >= BT_ACL_MODE_MAX)
- {
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: "
- "Invalid mode %d for ACL Connection. BT event will be dropped ",
- __func__,
- pBtEvent->uEventParam.btAclModeChange.mode);
- return VOS_STATUS_E_FAILURE;
- }
- break;
- case BT_EVENT_DEVICE_SWITCHED_OFF:
- pMac->btc.btcEventState = 0;
- break;
- default:
- break;
- }
- ptrSmeBtEvent = vos_mem_malloc(sizeof(tSmeBtEvent));
- if (NULL == ptrSmeBtEvent)
- {
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: "
- "Not able to allocate memory for BT event", __func__);
- return VOS_STATUS_E_FAILURE;
- }
- btcLogEvent(pMac, pBtEvent);
-#ifdef FEATURE_WLAN_DIAG_SUPPORT
- btcDiagEventLog(pMac, pBtEvent);
-#endif
- vos_mem_copy(ptrSmeBtEvent, pBtEvent, sizeof(tSmeBtEvent));
- msg.type = WDA_SIGNAL_BT_EVENT;
- msg.reserved = 0;
- msg.bodyptr = ptrSmeBtEvent;
- MTRACE(vos_trace(VOS_MODULE_ID_SME, TRACE_CODE_SME_TX_WDA_MSG, NO_SESSION,
- msg.type));
- if(VOS_STATUS_SUCCESS != vos_mq_post_message(VOS_MODULE_ID_WDA, &msg))
- {
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: "
- "Not able to post WDA_SIGNAL_BT_EVENT message to WDA", __func__);
- vos_mem_free( ptrSmeBtEvent );
- return VOS_STATUS_E_FAILURE;
- }
- // After successfully posting the message, check if heart beat
- // monitoring needs to be turned off
- (void)btcCheckHeartBeatMonitoring(pMac, pBtEvent);
- //Check whether BTC and UAPSD can co-exist
- btcUapsdCheck( pMac, pBtEvent );
- return VOS_STATUS_SUCCESS;
- }
-
-#ifndef WLAN_MDM_CODE_REDUCTION_OPT
-/* ---------------------------------------------------------------------------
- \fn btcSignalBTEvent
- \brief API to signal Bluetooth (BT) event to the WLAN driver. Based on the
- BT event type and the current operating mode of Libra (full power,
- BMPS, UAPSD etc), appropriate Bluetooth Coexistence (BTC) strategy
- would be employed.
- \param hHal - The handle returned by macOpen.
- \param pBtEvent - Pointer to a caller allocated object of type tSmeBtEvent.
- Caller owns the memory and is responsible for freeing it.
- \return VOS_STATUS
- VOS_STATUS_E_FAILURE – BT Event not passed to HAL. This can happen
- if driver has not yet been initialized or if BTC
- Events Layer has been disabled.
- VOS_STATUS_SUCCESS – BT Event passed to HAL
- ---------------------------------------------------------------------------*/
-VOS_STATUS btcSignalBTEvent (tHalHandle hHal, tpSmeBtEvent pBtEvent)
-{
- tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
- VOS_STATUS vosStatus;
- if( NULL == pBtEvent )
- {
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: "
- "Null pointer for SME BT Event", __func__);
- return VOS_STATUS_E_FAILURE;
- }
- if(( BTC_WLAN_ONLY == pMac->btc.btcConfig.btcExecutionMode ) ||
- ( BTC_PTA_ONLY == pMac->btc.btcConfig.btcExecutionMode ))
- {
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: "
- "BTC execution mode not set to BTC_SMART_COEXISTENCE. BT event will be dropped", __func__);
- return VOS_STATUS_E_FAILURE;
- }
- if( pBtEvent->btEventType < 0 || pBtEvent->btEventType >= BT_EVENT_TYPE_MAX )
- {
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: "
- "Invalid BT event %d being passed. BT event will be dropped",
- __func__, pBtEvent->btEventType);
- return VOS_STATUS_E_FAILURE;
- }
- //Check PMC state to make sure whether we need to defer
- //If we already have deferred events, defer the new one as well, in case PMC is in transition state
- if( pMac->btc.fReplayBTEvents || !PMC_IS_CHIP_ACCESSIBLE(pmcGetPmcState( pMac )) )
- {
- //We need to defer the event
- vosStatus = btcDeferEvent(pMac, pBtEvent);
- if( VOS_IS_STATUS_SUCCESS(vosStatus) )
- {
- pMac->btc.fReplayBTEvents = VOS_TRUE;
- return VOS_STATUS_SUCCESS;
- }
- else
- {
- return vosStatus;
- }
- }
- btcSendBTEvent(pMac, pBtEvent);
- return VOS_STATUS_SUCCESS;
-}
-#endif
-/* ---------------------------------------------------------------------------
- \fn btcCheckHeartBeatMonitoring
- \brief API to check whether heartbeat monitoring is required to be disabled
- for specific BT start events which takes significant time to complete
- during which WLAN misses beacons. To avoid WLAN-MAC from disconnecting
- for the not enough beacons received we stop the heartbeat timer during
- this start BT event till the stop of that BT event.
- \param hHal - The handle returned by macOpen.
- \param pBtEvent - Pointer to a caller allocated object of type tSmeBtEvent.
- Caller owns the memory and is responsible for freeing it.
- \return VOS_STATUS
- VOS_STATUS_E_FAILURE Config not passed to HAL.
- VOS_STATUS_SUCCESS Config passed to HAL
- ---------------------------------------------------------------------------*/
-VOS_STATUS btcCheckHeartBeatMonitoring(tHalHandle hHal, tpSmeBtEvent pBtEvent)
-{
- tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
- VOS_STATUS vosStatus;
- switch(pBtEvent->btEventType)
- {
- // Start events which requires heartbeat monitoring be disabled.
- case BT_EVENT_INQUIRY_STARTED:
- pMac->btc.btcEventState |= BT_INQUIRY_STARTED;
- break;
- case BT_EVENT_PAGE_STARTED:
- pMac->btc.btcEventState |= BT_PAGE_STARTED;
- break;
- case BT_EVENT_CREATE_ACL_CONNECTION:
- pMac->btc.btcEventState |= BT_CREATE_ACL_CONNECTION_STARTED;
- break;
- case BT_EVENT_CREATE_SYNC_CONNECTION:
- pMac->btc.btcEventState |= BT_CREATE_SYNC_CONNECTION_STARTED;
- break;
- // Stop/done events which indicates heartbeat monitoring can be enabled
- case BT_EVENT_INQUIRY_STOPPED:
- pMac->btc.btcEventState &= ~(BT_INQUIRY_STARTED);
- break;
- case BT_EVENT_PAGE_STOPPED:
- pMac->btc.btcEventState &= ~(BT_PAGE_STARTED);
- break;
- case BT_EVENT_ACL_CONNECTION_COMPLETE:
- pMac->btc.btcEventState &= ~(BT_CREATE_ACL_CONNECTION_STARTED);
- break;
- case BT_EVENT_SYNC_CONNECTION_COMPLETE:
- pMac->btc.btcEventState &= ~(BT_CREATE_SYNC_CONNECTION_STARTED);
- break;
- default:
- // Ignore other events
- return VOS_STATUS_SUCCESS;
- }
- // Check if any of the BT start events are active
- if (pMac->btc.btcEventState) {
- if (pMac->btc.btcHBActive) {
- // set heartbeat threshold CFG to zero
- ccmCfgSetInt(pMac, WNI_CFG_HEART_BEAT_THRESHOLD, 0, NULL, eANI_BOOLEAN_FALSE);
- pMac->btc.btcHBActive = VOS_FALSE;
- }
- // Deactivate and active the restore HB timer
- vos_timer_stop( &pMac->btc.restoreHBTimer);
- vosStatus= vos_timer_start( &pMac->btc.restoreHBTimer, BT_MAX_EVENT_DONE_TIMEOUT );
- if (!VOS_IS_STATUS_SUCCESS(vosStatus)) {
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "btcCheckHeartBeatMonitoring: Fail to start timer");
- return VOS_STATUS_E_FAILURE;
- }
- } else {
- // Restore CFG back to the original value only if it was disabled
- if (!pMac->btc.btcHBActive) {
- ccmCfgSetInt(pMac, WNI_CFG_HEART_BEAT_THRESHOLD, pMac->btc.btcHBCount, NULL, eANI_BOOLEAN_FALSE);
- pMac->btc.btcHBActive = VOS_TRUE;
- }
- // Deactivate the timer
- vosStatus = vos_timer_stop( &pMac->btc.restoreHBTimer);
- if (!VOS_IS_STATUS_SUCCESS(vosStatus)) {
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "btcCheckHeartBeatMonitoring: Fail to stop timer");
- return VOS_STATUS_E_FAILURE;
- }
- }
- return VOS_STATUS_SUCCESS;
-}
-/* ---------------------------------------------------------------------------
- \fn btcRestoreHeartBeatMonitoringHandle
- \brief Timer handler to handle the timeout condition when a specific BT
- stop event does not come back, in which case to restore back the
- heartbeat timer.
- \param hHal - The handle returned by macOpen.
- \return VOID
- ---------------------------------------------------------------------------*/
-void btcRestoreHeartBeatMonitoringHandle(tHalHandle hHal)
-{
- tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
- if( !pMac->btc.btcHBActive )
- {
- tPmcState pmcState;
- //Check PMC state to make sure whether we need to defer
- pmcState = pmcGetPmcState( pMac );
- if( PMC_IS_CHIP_ACCESSIBLE(pmcState) )
- {
- // Restore CFG back to the original value
- ccmCfgSetInt(pMac, WNI_CFG_HEART_BEAT_THRESHOLD, pMac->btc.btcHBCount, NULL, eANI_BOOLEAN_FALSE);
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "BT event timeout, restoring back HeartBeat timer");
- }
- else
- {
- //defer it
- pMac->btc.btcEventReplay.fRestoreHBMonitor = VOS_TRUE;
- }
- }
-}
-
-
-/* ---------------------------------------------------------------------------
- \fn btcSetConfig
- \brief API to change the current Bluetooth Coexistence (BTC) configuration
- This function should be invoked only after CFG download has completed.
- \param hHal - The handle returned by macOpen.
- \param pSmeBtcConfig - Pointer to a caller allocated object of type
- tSmeBtcConfig. Caller owns the memory and is responsible
- for freeing it.
- \return VOS_STATUS
- VOS_STATUS_E_FAILURE Config not passed to HAL.
- VOS_STATUS_SUCCESS Config passed to HAL
- ---------------------------------------------------------------------------*/
-VOS_STATUS btcSetConfig (tHalHandle hHal, tpSmeBtcConfig pSmeBtcConfig)
-{
- tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
- //Save a copy in the global BTC config
- vos_mem_copy(&(pMac->btc.btcConfig), pSmeBtcConfig, sizeof(tSmeBtcConfig));
- //Send the config down only if SME_HddReady has been invoked. If not ready,
- //BTC config will plumbed down when btcReady is eventually invoked.
- if(pMac->btc.btcReady)
- {
- if(VOS_STATUS_SUCCESS != btcSendCfgMsg(hHal, pSmeBtcConfig))
- {
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_FATAL,
- "Failure to send BTC config down");
- return VOS_STATUS_E_FAILURE;
- }
- }
- return VOS_STATUS_SUCCESS;
-}
-/* ---------------------------------------------------------------------------
- \fn btcPostBtcCfgMsg
- \brief Private API to post BTC config message to HAL
- \param hHal - The handle returned by macOpen.
- \param pSmeBtcConfig - Pointer to a caller allocated object of type
- tSmeBtcConfig. Caller owns the memory and is responsible
- for freeing it.
- \return VOS_STATUS
- VOS_STATUS_E_FAILURE Config not passed to HAL.
- VOS_STATUS_SUCCESS Config passed to HAL
- ---------------------------------------------------------------------------*/
-VOS_STATUS btcSendCfgMsg(tHalHandle hHal, tpSmeBtcConfig pSmeBtcConfig)
-{
- tpSmeBtcConfig ptrSmeBtcConfig = NULL;
- vos_msg_t msg;
- if( NULL == pSmeBtcConfig )
- {
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "btcSendCfgMsg: "
- "Null pointer for BTC Config");
- return VOS_STATUS_E_FAILURE;
- }
- if( pSmeBtcConfig->btcExecutionMode >= BT_EXEC_MODE_MAX )
- {
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "btcSendCfgMsg: "
- "Invalid BT execution mode %d being set",
- pSmeBtcConfig->btcExecutionMode);
- return VOS_STATUS_E_FAILURE;
- }
- ptrSmeBtcConfig = vos_mem_malloc(sizeof(tSmeBtcConfig));
- if (NULL == ptrSmeBtcConfig)
- {
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "btcSendCfgMsg: "
- "Not able to allocate memory for SME BTC Config");
- return VOS_STATUS_E_FAILURE;
- }
- vos_mem_copy(ptrSmeBtcConfig, pSmeBtcConfig, sizeof(tSmeBtcConfig));
- msg.type = WDA_BTC_SET_CFG;
- msg.reserved = 0;
- msg.bodyptr = ptrSmeBtcConfig;
- MTRACE(vos_trace(VOS_MODULE_ID_SME, TRACE_CODE_SME_TX_WDA_MSG, NO_SESSION,
- msg.type));
- if(VOS_STATUS_SUCCESS != vos_mq_post_message(VOS_MODULE_ID_WDA, &msg))
- {
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "btcSendCfgMsg: "
- "Not able to post WDA_BTC_SET_CFG message to WDA");
- vos_mem_free( ptrSmeBtcConfig );
- return VOS_STATUS_E_FAILURE;
- }
- return VOS_STATUS_SUCCESS;
-}
-/* ---------------------------------------------------------------------------
- \fn btcGetConfig
- \brief API to retrieve the current Bluetooth Coexistence (BTC) configuration
- \param hHal - The handle returned by macOpen.
- \param pSmeBtcConfig - Pointer to a caller allocated object of type
- tSmeBtcConfig. Caller owns the memory and is responsible
- for freeing it.
- \return VOS_STATUS
- VOS_STATUS_E_FAILURE - failure
- VOS_STATUS_SUCCESS success
- ---------------------------------------------------------------------------*/
-VOS_STATUS btcGetConfig (tHalHandle hHal, tpSmeBtcConfig pSmeBtcConfig)
-{
- tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
- if( NULL == pSmeBtcConfig )
- {
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "btcGetConfig: "
- "Null pointer for BTC Config");
- return VOS_STATUS_E_FAILURE;
- }
- vos_mem_copy(pSmeBtcConfig, &(pMac->btc.btcConfig), sizeof(tSmeBtcConfig));
- return VOS_STATUS_SUCCESS;
-}
-/*
- btcFindAclEventHist find a suited ACL event buffer
- Param: bdAddr - NULL meaning not care.
- pointer to caller allocated buffer containing the BD
- address to find a match
- handle - BT_INVALID_CONN_HANDLE == not care
- otherwise, a handle to match
- NOPTE: Either bdAddr or handle can be valid, if both of them are valid, use bdAddr only. If neither
- bdAddr nor handle is valid, return the next free slot.
-*/
-static tpSmeBtAclEventHist btcFindAclEventHist( tpAniSirGlobal pMac, v_U8_t *bdAddr, v_U16_t handle )
-{
- int i, j;
- tpSmeBtAclEventHist pRet = NULL;
- tSmeBtcEventReplay *pReplay = &pMac->btc.btcEventReplay;
- for( i = 0; (i < BT_MAX_ACL_SUPPORT) && (NULL == pRet); i++ )
- {
- if( NULL != bdAddr )
- {
- //try to match addr
- if( pReplay->btcEventHist.btAclConnectionEvent[i].bNextEventIdx )
- {
- for(j = 0; j < pReplay->btcEventHist.btAclConnectionEvent[i].bNextEventIdx; j++)
- {
- if( vos_mem_compare(pReplay->btcEventHist.btAclConnectionEvent[i].btAclConnection[j].bdAddr,
- bdAddr, 6) )
- {
- //found it
- pRet = &pReplay->btcEventHist.btAclConnectionEvent[i];
- break;
- }
- }
- }
- }
- else if( BT_INVALID_CONN_HANDLE != handle )
- {
- //try to match handle
- if( pReplay->btcEventHist.btAclConnectionEvent[i].bNextEventIdx )
- {
- for(j = 0; j < pReplay->btcEventHist.btAclConnectionEvent[i].bNextEventIdx; j++)
- {
- if( pReplay->btcEventHist.btAclConnectionEvent[i].btAclConnection[j].connectionHandle ==
- handle )
- {
- //found it
- pRet = &pReplay->btcEventHist.btAclConnectionEvent[i];
- break;
- }
- }
- }
- }
- else if( 0 == pReplay->btcEventHist.btAclConnectionEvent[i].bNextEventIdx )
- {
- pRet = &pReplay->btcEventHist.btAclConnectionEvent[i];
- break;
- }
- }
- return (pRet);
-}
-
-/*
- btcFindSyncEventHist find a suited SYNC event buffer
- Param: bdAddr - NULL meaning not care.
- pointer to caller allocated buffer containing the
- BD address to find a match
- handle - BT_INVALID_CONN_HANDLE == not care
- otherwise, a handle to match
- NOPTE: Either bdAddr or handle can be valid, if both of them are valid, use bdAddr only. If neither
- bdAddr nor handle is valid, return the next free slot.
-*/
-static tpSmeBtSyncEventHist btcFindSyncEventHist( tpAniSirGlobal pMac, v_U8_t *bdAddr, v_U16_t handle )
-{
- int i, j;
- tpSmeBtSyncEventHist pRet = NULL;
- tSmeBtcEventReplay *pReplay = &pMac->btc.btcEventReplay;
- for( i = 0; (i < BT_MAX_SCO_SUPPORT) && (NULL == pRet); i++ )
- {
- if( NULL != bdAddr )
- {
- //try to match addr
- if( pReplay->btcEventHist.btSyncConnectionEvent[i].bNextEventIdx )
- {
- for(j = 0; j < pReplay->btcEventHist.btAclConnectionEvent[i].bNextEventIdx; j++)
- {
- if( vos_mem_compare(pReplay->btcEventHist.btSyncConnectionEvent[i].btSyncConnection[j].bdAddr,
- bdAddr, 6) )
- {
- //found it
- pRet = &pReplay->btcEventHist.btSyncConnectionEvent[i];
- break;
- }
- }
- }
- }
- else if( BT_INVALID_CONN_HANDLE != handle )
- {
- //try to match handle
- if( pReplay->btcEventHist.btSyncConnectionEvent[i].bNextEventIdx )
- {
- for(j = 0; j < pReplay->btcEventHist.btAclConnectionEvent[i].bNextEventIdx; j++)
- {
- if( pReplay->btcEventHist.btSyncConnectionEvent[i].btSyncConnection[j].connectionHandle ==
- handle )
- {
- //found it
- pRet = &pReplay->btcEventHist.btSyncConnectionEvent[i];
- break;
- }
- }
- }
- }
- else if( !pReplay->btcEventHist.btSyncConnectionEvent[i].bNextEventIdx )
- {
- pRet = &pReplay->btcEventHist.btSyncConnectionEvent[i];
- break;
- }
- }
- return (pRet);
-}
-
-/*
- btcFindDisconnEventHist find a slot for the deferred disconnect event
- If handle is invalid, it returns a free slot, if any.
- If handle is valid, it tries to find a match first in case same disconnect event comes down again.
-*/
-static tpSmeBtDisconnectEventHist btcFindDisconnEventHist( tpAniSirGlobal pMac, v_U16_t handle )
-{
- tpSmeBtDisconnectEventHist pRet = NULL;
- tSmeBtcEventReplay *pReplay = &pMac->btc.btcEventReplay;
- int i;
- if( BT_INVALID_CONN_HANDLE != handle )
- {
- for(i = 0; i < BT_MAX_DISCONN_SUPPORT; i++)
- {
- if( pReplay->btcEventHist.btDisconnectEvent[i].fValid &&
- (handle == pReplay->btcEventHist.btDisconnectEvent[i].btDisconnect.connectionHandle) )
- {
- pRet = &pReplay->btcEventHist.btDisconnectEvent[i];
- break;
- }
- }
- }
- if( NULL == pRet )
- {
- //Find a free slot
- for(i = 0; i < BT_MAX_DISCONN_SUPPORT; i++)
- {
- if( !pReplay->btcEventHist.btDisconnectEvent[i].fValid )
- {
- pRet = &pReplay->btcEventHist.btDisconnectEvent[i];
- break;
- }
- }
- }
- return (pRet);
-}
-
-/*
- btcFindModeChangeEventHist find a slot for the deferred mode change event
- If handle is invalid, it returns a free slot, if any.
- If handle is valid, it tries to find a match first in case same disconnect event comes down again.
-*/
-tpSmeBtAclModeChangeEventHist btcFindModeChangeEventHist( tpAniSirGlobal pMac, v_U16_t handle )
-{
- tpSmeBtAclModeChangeEventHist pRet = NULL;
- tSmeBtcEventReplay *pReplay = &pMac->btc.btcEventReplay;
- int i;
- if( BT_INVALID_CONN_HANDLE != handle )
- {
- for(i = 0; i < BT_MAX_ACL_SUPPORT; i++)
- {
- if( pReplay->btcEventHist.btAclModeChangeEvent[i].fValid &&
- (handle == pReplay->btcEventHist.btAclModeChangeEvent[i].btAclModeChange.connectionHandle) )
- {
- pRet = &pReplay->btcEventHist.btAclModeChangeEvent[i];
- break;
- }
- }
- }
- if( NULL == pRet )
- {
- //Find a free slot
- for(i = 0; i < BT_MAX_ACL_SUPPORT; i++)
- {
- if( !pReplay->btcEventHist.btAclModeChangeEvent[i].fValid )
- {
- pRet = &pReplay->btcEventHist.btAclModeChangeEvent[i];
- break;
- }
- }
- }
- return (pRet);
-}
-
-/*
- btcFindSyncUpdateEventHist find a slot for the deferred SYNC_UPDATE event
- If handle is invalid, it returns a free slot, if any.
- If handle is valid, it tries to find a match first in case same disconnect event comes down again.
-*/
-tpSmeBtSyncUpdateHist btcFindSyncUpdateEventHist( tpAniSirGlobal pMac, v_U16_t handle )
-{
- tpSmeBtSyncUpdateHist pRet = NULL;
- tSmeBtcEventReplay *pReplay = &pMac->btc.btcEventReplay;
- int i;
- if( BT_INVALID_CONN_HANDLE != handle )
- {
- for(i = 0; i < BT_MAX_SCO_SUPPORT; i++)
- {
- if( pReplay->btcEventHist.btSyncUpdateEvent[i].fValid &&
- (handle == pReplay->btcEventHist.btSyncUpdateEvent[i].btSyncConnection.connectionHandle) )
- {
- pRet = &pReplay->btcEventHist.btSyncUpdateEvent[i];
- break;
- }
- }
- }
- if( NULL == pRet )
- {
- //Find a free slot
- for(i = 0; i < BT_MAX_SCO_SUPPORT; i++)
- {
- if( !pReplay->btcEventHist.btSyncUpdateEvent[i].fValid )
- {
- pRet = &pReplay->btcEventHist.btSyncUpdateEvent[i];
- break;
- }
- }
- }
- return (pRet);
-}
-
-/*
- Call must validate pAclEventHist
-*/
-static void btcReleaseAclEventHist( tpAniSirGlobal pMac, tpSmeBtAclEventHist pAclEventHist )
-{
- vos_mem_zero( pAclEventHist, sizeof(tSmeBtAclEventHist) );
-}
-
-/*
- Call must validate pSyncEventHist
-*/
-static void btcReleaseSyncEventHist( tpAniSirGlobal pMac, tpSmeBtSyncEventHist pSyncEventHist )
-{
- vos_mem_zero( pSyncEventHist, sizeof(tSmeBtSyncEventHist) );
-}
-
-/*To defer a ACL creation event
- We only support one ACL per BD address.
- If the last cached event another ACL create event, replace that event with the new event
- If a completion event with success status code, and the new ACL creation
- on same address, defer a new disconnect event(fake one), then cache this ACL creation event.
- Otherwise, save this create event.
-*/
-static VOS_STATUS btcDeferAclCreate( tpAniSirGlobal pMac, tpSmeBtEvent pEvent )
-{
- VOS_STATUS status = VOS_STATUS_SUCCESS;
- tpSmeBtAclEventHist pAclEventHist;
- tSmeBtAclConnectionParam *pAclEvent = NULL;
- do
- {
- //Find a match
- pAclEventHist = btcFindAclEventHist( pMac, pEvent->uEventParam.btAclConnection.bdAddr,
- BT_INVALID_CONN_HANDLE );
- if( NULL == pAclEventHist )
- {
- //No cached ACL event on this address
- //Find a free slot and save it
- pAclEventHist = btcFindAclEventHist( pMac, NULL, BT_INVALID_CONN_HANDLE );
- if( NULL != pAclEventHist )
- {
- vos_mem_copy(&pAclEventHist->btAclConnection[0], &pEvent->uEventParam.btAclConnection,
- sizeof(tSmeBtAclConnectionParam));
- pAclEventHist->btEventType[0] = BT_EVENT_CREATE_ACL_CONNECTION;
- pAclEventHist->bNextEventIdx = 1;
- }
- else
- {
- smsLog(pMac, LOGE, FL(" failed to find ACL event slot"));
- status = VOS_STATUS_E_RESOURCES;
- }
- //done
- break;
- }
- else
- {
- //There is history on this BD address
- if ((pAclEventHist->bNextEventIdx <= 0) ||
- (pAclEventHist->bNextEventIdx > BT_MAX_NUM_EVENT_ACL_DEFERRED))
- {
- VOS_ASSERT(0);
- status = VOS_STATUS_E_FAILURE;
- break;
- }
- pAclEvent = &pAclEventHist->btAclConnection[pAclEventHist->bNextEventIdx - 1];
- if(BT_EVENT_CREATE_ACL_CONNECTION == pAclEventHist->btEventType[pAclEventHist->bNextEventIdx - 1])
- {
- //The last cached event is creation, replace it with the new one
- if (pAclEvent)
- {
- vos_mem_copy(pAclEvent,
- &pEvent->uEventParam.btAclConnection,
- sizeof(tSmeBtAclConnectionParam));
- }
- //done
- break;
- }
- else if(BT_EVENT_ACL_CONNECTION_COMPLETE ==
- pAclEventHist->btEventType[pAclEventHist->bNextEventIdx - 1])
- {
- //The last cached event is completion, check the status.
- if(BT_CONN_STATUS_SUCCESS == pAclEvent->status)
- {
- tSmeBtEvent btEvent;
- //The last event we have is success completion event.
- //Should not get a creation event before creation.
- smsLog(pMac, LOGE, FL(" Missing disconnect event on handle %d"), pAclEvent->connectionHandle);
- //Fake a disconnect event
- btEvent.btEventType = BT_EVENT_DISCONNECTION_COMPLETE;
- btEvent.uEventParam.btDisconnect.connectionHandle = pAclEvent->connectionHandle;
- btcDeferDisconnEvent(pMac, &btEvent);
- }
- }
- //Need to save the new event
- if(pAclEventHist->bNextEventIdx < BT_MAX_NUM_EVENT_ACL_DEFERRED)
- {
- pAclEventHist->btEventType[pAclEventHist->bNextEventIdx] = BT_EVENT_CREATE_ACL_CONNECTION;
- vos_mem_copy(&pAclEventHist->btAclConnection[pAclEventHist->bNextEventIdx],
- &pEvent->uEventParam.btAclConnection,
- sizeof(tSmeBtAclConnectionParam));
- pAclEventHist->bNextEventIdx++;
- }
- else
- {
- smsLog(pMac, LOGE, FL(" ACL event overflow"));
- VOS_ASSERT(0);
- }
- }
- }while(0);
- return status;
-}
-
-/*Defer a ACL completion event
- If there is cached event on this BD address, check completion status.
- If status is fail and last cached event is creation, remove the creation event and drop
- this completion event. Otherwise, cache this completion event as the latest one.
-*/
-static VOS_STATUS btcDeferAclComplete( tpAniSirGlobal pMac, tpSmeBtEvent pEvent )
-{
- VOS_STATUS status = VOS_STATUS_SUCCESS;
- tpSmeBtAclEventHist pAclEventHist;
- do
- {
- //Find a match
- pAclEventHist = btcFindAclEventHist( pMac, pEvent->uEventParam.btAclConnection.bdAddr,
- BT_INVALID_CONN_HANDLE );
- if(pAclEventHist)
- {
- if (pAclEventHist->bNextEventIdx <= 0)
- {
- VOS_ASSERT(pAclEventHist->bNextEventIdx >0);
- return VOS_STATUS_E_EMPTY;
- }
- //Found one
- if(BT_CONN_STATUS_SUCCESS != pEvent->uEventParam.btAclConnection.status)
- {
- //If completion fails, and the last one is creation, remove the creation event
- if(BT_EVENT_CREATE_ACL_CONNECTION == pAclEventHist->btEventType[pAclEventHist->bNextEventIdx-1])
- {
- vos_mem_zero(&pAclEventHist->btAclConnection[pAclEventHist->bNextEventIdx-1],
- sizeof(tSmeBtAclConnectionParam));
- pAclEventHist->bNextEventIdx--;
- //Done with this event
- break;
- }
- else
- {
- smsLog(pMac, LOGE, FL(" ACL completion fail but last event(%d) not creation"),
- pAclEventHist->btEventType[pAclEventHist->bNextEventIdx-1]);
- }
- }
- }
- if( NULL == pAclEventHist )
- {
- pAclEventHist = btcFindAclEventHist( pMac, NULL, BT_INVALID_CONN_HANDLE );
- }
- if(pAclEventHist)
- {
- if(pAclEventHist->bNextEventIdx < BT_MAX_NUM_EVENT_ACL_DEFERRED)
- {
- //Save this event
- pAclEventHist->btEventType[pAclEventHist->bNextEventIdx] = BT_EVENT_ACL_CONNECTION_COMPLETE;
- vos_mem_copy(&pAclEventHist->btAclConnection[pAclEventHist->bNextEventIdx],
- &pEvent->uEventParam.btAclConnection,
- sizeof(tSmeBtAclConnectionParam));
- pAclEventHist->bNextEventIdx++;
- }
- else
- {
- smsLog(pMac, LOGE, FL(" ACL event overflow"));
- VOS_ASSERT(0);
- }
- }
- else
- {
- smsLog(pMac, LOGE, FL(" cannot find match for failed "
- "BT_EVENT_ACL_CONNECTION_COMPLETE of bdAddr "
- MAC_ADDRESS_STR),
- MAC_ADDR_ARRAY(pEvent->uEventParam.btAclConnection.bdAddr));
- status = VOS_STATUS_E_EMPTY;
- }
- }while(0);
- return (status);
-}
-
-/*To defer a SYNC creation event
- If the last cached event is another SYNC create event, replace
- that event with the new event.
- If there is a completion event with success status code, cache a new
- disconnect event(fake) first, then cache this SYNC creation event.
- Otherwise, cache this create event.
-*/
-static VOS_STATUS btcDeferSyncCreate( tpAniSirGlobal pMac, tpSmeBtEvent pEvent )
-{
- VOS_STATUS status = VOS_STATUS_SUCCESS;
- tpSmeBtSyncEventHist pSyncEventHist;
- tSmeBtSyncConnectionParam *pSyncEvent = NULL;
- do
- {
- //Find a match
- pSyncEventHist = btcFindSyncEventHist( pMac, pEvent->uEventParam.btSyncConnection.bdAddr,
- BT_INVALID_CONN_HANDLE );
- if( NULL == pSyncEventHist )
- {
- //No cached ACL event on this address
- //Find a free slot and save it
- pSyncEventHist = btcFindSyncEventHist( pMac, NULL, BT_INVALID_CONN_HANDLE );
- if( NULL != pSyncEventHist )
- {
- vos_mem_copy(&pSyncEventHist->btSyncConnection[0], &pEvent->uEventParam.btSyncConnection,
- sizeof(tSmeBtSyncConnectionParam));
- pSyncEventHist->btEventType[0] = BT_EVENT_CREATE_SYNC_CONNECTION;
- pSyncEventHist->bNextEventIdx = 1;
- }
- else
- {
- smsLog(pMac, LOGE, FL(" failed to find SYNC event slot"));
- status = VOS_STATUS_E_RESOURCES;
- }
- //done
- break;
- }
- else
- {
- //There is history on this BD address
- if ((pSyncEventHist->bNextEventIdx <= 0) ||
- (pSyncEventHist->bNextEventIdx > BT_MAX_NUM_EVENT_SCO_DEFERRED))
- {
- VOS_ASSERT(0);
- status = VOS_STATUS_E_FAILURE;
- return status;
- }
- pSyncEvent = &pSyncEventHist->btSyncConnection[pSyncEventHist->bNextEventIdx - 1];
- if(BT_EVENT_CREATE_SYNC_CONNECTION ==
- pSyncEventHist->btEventType[pSyncEventHist->bNextEventIdx - 1])
- {
- //The last cached event is creation, replace it with the new one
- if(pSyncEvent)
- {
- vos_mem_copy(pSyncEvent,
- &pEvent->uEventParam.btSyncConnection,
- sizeof(tSmeBtSyncConnectionParam));
- }
- //done
- break;
- }
- else if(BT_EVENT_SYNC_CONNECTION_COMPLETE ==
- pSyncEventHist->btEventType[pSyncEventHist->bNextEventIdx - 1])
- {
- //The last cached event is completion, check the status.
- if(BT_CONN_STATUS_SUCCESS == pSyncEvent->status)
- {
- tSmeBtEvent btEvent;
- //The last event we have is success completion event.
- //Should not get a creation event before creation.
- smsLog(pMac, LOGE, FL(" Missing disconnect event on handle %d"), pSyncEvent->connectionHandle);
- //Fake a disconnect event
- btEvent.btEventType = BT_EVENT_DISCONNECTION_COMPLETE;
- btEvent.uEventParam.btDisconnect.connectionHandle = pSyncEvent->connectionHandle;
- btcDeferDisconnEvent(pMac, &btEvent);
- }
- }
- //Need to save the new event
- if(pSyncEventHist->bNextEventIdx < BT_MAX_NUM_EVENT_SCO_DEFERRED)
- {
- pSyncEventHist->btEventType[pSyncEventHist->bNextEventIdx] = BT_EVENT_CREATE_SYNC_CONNECTION;
- vos_mem_copy(&pSyncEventHist->btSyncConnection[pSyncEventHist->bNextEventIdx],
- &pEvent->uEventParam.btSyncConnection,
- sizeof(tSmeBtSyncConnectionParam));
- pSyncEventHist->bNextEventIdx++;
- }
- else
- {
- smsLog(pMac, LOGE, FL(" SYNC event overflow"));
- }
- }
- }while(0);
- return status;
-}
-
-/*
- * Defer a SYNC completion event
- * If there is cached event on this BD address, check completion status.
- * If status is fail and last cached event is creation, remove the
- * creation event and drop this completion event.
- * Otherwise, cache this completion event as the latest one.
- */
-static VOS_STATUS btcDeferSyncComplete( tpAniSirGlobal pMac, tpSmeBtEvent pEvent )
-{
- VOS_STATUS status = VOS_STATUS_SUCCESS;
- tpSmeBtSyncEventHist pSyncEventHist;
- do
- {
- //Find a match
- pSyncEventHist = btcFindSyncEventHist( pMac, pEvent->uEventParam.btSyncConnection.bdAddr,
- BT_INVALID_CONN_HANDLE );
- if(pSyncEventHist)
- {
- if (pSyncEventHist->bNextEventIdx <= 0)
- {
- VOS_ASSERT(pSyncEventHist->bNextEventIdx >0);
- return VOS_STATUS_E_EMPTY;
- }
- //Found one
- if(BT_CONN_STATUS_SUCCESS != pEvent->uEventParam.btSyncConnection.status)
- {
- //If completion fails, and the last one is creation, remove the creation event
- if(BT_EVENT_CREATE_SYNC_CONNECTION == pSyncEventHist->btEventType[pSyncEventHist->bNextEventIdx-1])
- {
- vos_mem_zero(&pSyncEventHist->btSyncConnection[pSyncEventHist->bNextEventIdx-1],
- sizeof(tSmeBtSyncConnectionParam));
- pSyncEventHist->bNextEventIdx--;
- //Done with this event
- break;
- }
- else
- {
- smsLog(pMac, LOGE, FL(" SYNC completion fail but last event(%d) not creation"),
- pSyncEventHist->btEventType[pSyncEventHist->bNextEventIdx-1]);
- }
- }
- }
- if(NULL == pSyncEventHist)
- {
- //In case we don't defer the creation event
- pSyncEventHist = btcFindSyncEventHist( pMac, NULL, BT_INVALID_CONN_HANDLE );
- }
- if(pSyncEventHist)
- {
- if(pSyncEventHist->bNextEventIdx < BT_MAX_NUM_EVENT_ACL_DEFERRED)
- {
- //Save this event
- pSyncEventHist->btEventType[pSyncEventHist->bNextEventIdx] = BT_EVENT_SYNC_CONNECTION_COMPLETE;
- vos_mem_copy(&pSyncEventHist->btSyncConnection[pSyncEventHist->bNextEventIdx],
- &pEvent->uEventParam.btSyncConnection,
- sizeof(tSmeBtSyncConnectionParam));
- pSyncEventHist->bNextEventIdx++;
- }
- else
- {
- smsLog(pMac, LOGE, FL(" SYNC event overflow"));
- }
- }
- else
- {
- smsLog(pMac, LOGE, FL(" cannot find match for "
- "BT_EVENT_SYNC_CONNECTION_COMPLETE of bdAddr "
- MAC_ADDRESS_STR),
- MAC_ADDR_ARRAY(pEvent->uEventParam.btSyncConnection.bdAddr));
- status = VOS_STATUS_E_EMPTY;
- }
- }while(0);
- return (status);
-}
-
-//return VOS_STATUS_E_EXISTS if the event handle cannot be found
-//VOS_STATUS_SUCCESS if the event is processed
-//Other error status meaning it cannot continue due to other errors
-/*
- Defer a disconnect event for ACL
- Check if any history on this event handle.
- If both ACL_CREATION and ACL_COMPLETION is cached, remove both those events and drop
- this disconnect event.
- Otherwise save disconnect event in this ACL's bin.
- If not ACL match on this handle, not to do anything.
- Either way, remove any cached MODE_CHANGE event matches this disconnect event's handle.
-*/
-static VOS_STATUS btcDeferDisconnectEventForACL( tpAniSirGlobal pMac, tpSmeBtEvent pEvent )
-{
- VOS_STATUS status = VOS_STATUS_SUCCESS;
- tpSmeBtAclEventHist pAclEventHist;
- tpSmeBtAclModeChangeEventHist pModeChangeEventHist;
- v_BOOL_t fDone = VOS_FALSE;
- int i;
- pAclEventHist = btcFindAclEventHist( pMac, NULL,
- pEvent->uEventParam.btDisconnect.connectionHandle );
- if(pAclEventHist)
- {
- if( pAclEventHist->bNextEventIdx > BT_MAX_NUM_EVENT_ACL_DEFERRED)
- {
- smsLog(pMac, LOGE, FL(" ACL event history index:%d overflow, resetting to BT_MAX_NUM_EVENT_ACL_DEFERRED"), pAclEventHist->bNextEventIdx);
- pAclEventHist->bNextEventIdx = BT_MAX_NUM_EVENT_ACL_DEFERRED;
- }
- /* Looking back-words */
- for(i = pAclEventHist->bNextEventIdx - 1; i >= 0; i--)
- {
- if( BT_EVENT_ACL_CONNECTION_COMPLETE == pAclEventHist->btEventType[i] )
- {
- //make sure we can cancel the link
- if( (i > 0) && (BT_EVENT_CREATE_ACL_CONNECTION == pAclEventHist->btEventType[i - 1]) )
- {
- fDone = VOS_TRUE;
- if(i == 1)
- {
- //All events can be wiped off
- btcReleaseAclEventHist(pMac, pAclEventHist);
- break;
- }
- //we have both ACL creation and completion, wipe out all of them
- pAclEventHist->bNextEventIdx = (tANI_U8)(i - 1);
- vos_mem_zero(&pAclEventHist->btAclConnection[i-1], sizeof(tSmeBtAclConnectionParam));
- vos_mem_zero(&pAclEventHist->btAclConnection[i], sizeof(tSmeBtAclConnectionParam));
- break;
- }
- }
- }//for loop
- if(!fDone)
- {
- //Save this disconnect event
- if(pAclEventHist->bNextEventIdx < BT_MAX_NUM_EVENT_ACL_DEFERRED)
- {
- pAclEventHist->btEventType[pAclEventHist->bNextEventIdx] =
- BT_EVENT_DISCONNECTION_COMPLETE;
- pAclEventHist->btAclConnection[pAclEventHist->bNextEventIdx].connectionHandle =
- pEvent->uEventParam.btDisconnect.connectionHandle;
- pAclEventHist->bNextEventIdx++;
- }
- else
- {
- smsLog(pMac, LOGE, FL(" ACL event overflow"));
- status = VOS_STATUS_E_FAILURE;
- }
- }
- }
- else
- {
- status = VOS_STATUS_E_EXISTS;
- }
- //Wipe out the related mode change event if it is there
- pModeChangeEventHist = btcFindModeChangeEventHist( pMac,
- pEvent->uEventParam.btDisconnect.connectionHandle );
- if( pModeChangeEventHist && pModeChangeEventHist->fValid )
- {
- pModeChangeEventHist->fValid = VOS_FALSE;
- }
- return status;
-}
-
-/*
- * This function works the same as btcDeferDisconnectEventForACL except it
- * handles SYNC events return VOS_STATUS_E_EXISTS if the event handle cannot be
- * found VOS_STATUS_SUCCESS if the event is processed.
- * Other error status meaning it cannot continue due to other errors
- */
-/*
- Defer a disconnect event for SYNC
- Check if any SYNC history on this event handle.
- If yes and if both SYNC_CREATION and SYNC_COMPLETION is cached, remove both those events and drop
- this disconnect event.
- Otherwise save disconnect event in this SYNC's bin.
- If no match found, not to save this event here.
- Either way, remove any cached SYNC_UPDATE event matches this disconnect event's handle.
-*/
-static VOS_STATUS btcDeferDisconnectEventForSync( tpAniSirGlobal pMac, tpSmeBtEvent pEvent )
-{
- VOS_STATUS status = VOS_STATUS_SUCCESS;
- tpSmeBtSyncEventHist pSyncEventHist;
- tpSmeBtSyncUpdateHist pSyncUpdateHist;
- v_BOOL_t fDone = VOS_FALSE;
- int i;
- pSyncEventHist = btcFindSyncEventHist( pMac, NULL,
- pEvent->uEventParam.btDisconnect.connectionHandle );
- if(pSyncEventHist)
- {
- if( pSyncEventHist->bNextEventIdx > BT_MAX_NUM_EVENT_SCO_DEFERRED)
- {
- smsLog(pMac, LOGE, FL(" SYNC event history index:%d overflow, resetting to BT_MAX_NUM_EVENT_SCO_DEFERRED"), pSyncEventHist->bNextEventIdx);
- pSyncEventHist->bNextEventIdx = BT_MAX_NUM_EVENT_SCO_DEFERRED;
- }
- /* Looking back-words */
- for(i = pSyncEventHist->bNextEventIdx - 1; i >= 0; i--)
- {
- //if a mode change event exists, drop it
- if( BT_EVENT_SYNC_CONNECTION_COMPLETE == pSyncEventHist->btEventType[i] )
- {
- //make sure we can cancel the link
- if( (i > 0) && (BT_EVENT_CREATE_SYNC_CONNECTION == pSyncEventHist->btEventType[i - 1]) )
- {
- fDone = VOS_TRUE;
- if(i == 1)
- {
- //All events can be wiped off
- btcReleaseSyncEventHist(pMac, pSyncEventHist);
- break;
- }
- //we have both ACL creation and completion, wipe out all of them
- pSyncEventHist->bNextEventIdx = (tANI_U8)(i - 1);
- vos_mem_zero(&pSyncEventHist->btSyncConnection[i-1], sizeof(tSmeBtSyncConnectionParam));
- vos_mem_zero(&pSyncEventHist->btSyncConnection[i], sizeof(tSmeBtSyncConnectionParam));
- break;
- }
- }
- }//for loop
- if(!fDone)
- {
- //Save this disconnect event
- if(pSyncEventHist->bNextEventIdx < BT_MAX_NUM_EVENT_SCO_DEFERRED)
- {
- pSyncEventHist->btEventType[pSyncEventHist->bNextEventIdx] =
- BT_EVENT_DISCONNECTION_COMPLETE;
- pSyncEventHist->btSyncConnection[pSyncEventHist->bNextEventIdx].connectionHandle =
- pEvent->uEventParam.btDisconnect.connectionHandle;
- pSyncEventHist->bNextEventIdx++;
- }
- else
- {
- smsLog(pMac, LOGE, FL(" SYNC event overflow"));
- status = VOS_STATUS_E_FAILURE;
- }
- }
- }
- else
- {
- status = VOS_STATUS_E_EXISTS;
- }
- //Wipe out the related mode change event if it is there
- pSyncUpdateHist = btcFindSyncUpdateEventHist( pMac,
- pEvent->uEventParam.btDisconnect.connectionHandle );
- if( pSyncUpdateHist && pSyncUpdateHist->fValid )
- {
- pSyncUpdateHist->fValid = VOS_FALSE;
- }
- return status;
-}
-
-/*
- Defer a disconnect event.
- Try to defer it as part of the ACL event first.
- If no match is found, try SYNC.
- If still no match found, defer it at DISCONNECT event bin.
-*/
-static VOS_STATUS btcDeferDisconnEvent( tpAniSirGlobal pMac, tpSmeBtEvent pEvent )
-{
- VOS_STATUS status = VOS_STATUS_SUCCESS;
- tpSmeBtDisconnectEventHist pDisconnEventHist;
- if( BT_INVALID_CONN_HANDLE == pEvent->uEventParam.btDisconnect.connectionHandle )
- {
- smsLog( pMac, LOGE, FL(" invalid handle") );
- return (VOS_STATUS_E_INVAL);
- }
- //Check ACL first
- status = btcDeferDisconnectEventForACL(pMac, pEvent);
- if(!VOS_IS_STATUS_SUCCESS(status))
- {
- status = btcDeferDisconnectEventForSync(pMac, pEvent);
- }
- if( !VOS_IS_STATUS_SUCCESS(status) )
- {
- //Save the disconnect event
- pDisconnEventHist = btcFindDisconnEventHist( pMac,
- pEvent->uEventParam.btDisconnect.connectionHandle );
- if( pDisconnEventHist )
- {
- pDisconnEventHist->fValid = VOS_TRUE;
- vos_mem_copy( &pDisconnEventHist->btDisconnect, &pEvent->uEventParam.btDisconnect,
- sizeof(tSmeBtDisconnectParam) );
- status = VOS_STATUS_SUCCESS;
- }
- else
- {
- smsLog( pMac, LOGE, FL(" cannot find match for BT_EVENT_DISCONNECTION_COMPLETE of handle (%d)"),
- pEvent->uEventParam.btDisconnect.connectionHandle);
- status = VOS_STATUS_E_EMPTY;
- }
- }
- return (status);
-}
-
-/*
- btcDeferEvent save the event for possible replay when chip can be accessed
- This function is called only when in IMPS/Standby state
-*/
-static VOS_STATUS btcDeferEvent( tpAniSirGlobal pMac, tpSmeBtEvent pEvent )
-{
- VOS_STATUS status = VOS_STATUS_SUCCESS;
- tpSmeBtSyncUpdateHist pSyncUpdateHist;
- tpSmeBtAclModeChangeEventHist pModeChangeEventHist;
- tSmeBtcEventReplay *pReplay = &pMac->btc.btcEventReplay;
- switch(pEvent->btEventType)
- {
- case BT_EVENT_DEVICE_SWITCHED_ON:
- //Clear all events first
- vos_mem_zero( &pReplay->btcEventHist, sizeof(tSmeBtcEventHist) );
- pReplay->fBTSwitchOn = VOS_TRUE;
- pReplay->fBTSwitchOff = VOS_FALSE;
- break;
- case BT_EVENT_DEVICE_SWITCHED_OFF:
- //Clear all events first
- vos_mem_zero( &pReplay->btcEventHist, sizeof(tSmeBtcEventHist) );
- pReplay->fBTSwitchOff = VOS_TRUE;
- pReplay->fBTSwitchOn = VOS_FALSE;
- break;
- case BT_EVENT_INQUIRY_STARTED:
- pReplay->btcEventHist.nInquiryEvent++;
- break;
- case BT_EVENT_INQUIRY_STOPPED:
- pReplay->btcEventHist.nInquiryEvent--;
- break;
- case BT_EVENT_PAGE_STARTED:
- pReplay->btcEventHist.nPageEvent++;
- break;
- case BT_EVENT_PAGE_STOPPED:
- pReplay->btcEventHist.nPageEvent--;
- break;
- case BT_EVENT_CREATE_ACL_CONNECTION:
- status = btcDeferAclCreate(pMac, pEvent);
- break;
- case BT_EVENT_ACL_CONNECTION_COMPLETE:
- status = btcDeferAclComplete( pMac, pEvent );
- break;
- case BT_EVENT_CREATE_SYNC_CONNECTION:
- status = btcDeferSyncCreate(pMac, pEvent);
- break;
- case BT_EVENT_SYNC_CONNECTION_COMPLETE:
- status = btcDeferSyncComplete( pMac, pEvent );
- break;
- case BT_EVENT_SYNC_CONNECTION_UPDATED:
- if( BT_INVALID_CONN_HANDLE == pEvent->uEventParam.btDisconnect.connectionHandle )
- {
- smsLog( pMac, LOGE, FL(" invalid handle") );
- status = VOS_STATUS_E_INVAL;
- break;
- }
- //Find a match on handle. If not found, get a free slot.
- pSyncUpdateHist = btcFindSyncUpdateEventHist( pMac,
- pEvent->uEventParam.btSyncConnection.connectionHandle );
- if(pSyncUpdateHist)
- {
- pSyncUpdateHist->fValid = VOS_TRUE;
- vos_mem_copy(&pSyncUpdateHist->btSyncConnection, &pEvent->uEventParam.btSyncConnection,
- sizeof(tSmeBtSyncConnectionParam));
- }
- else
- {
- smsLog( pMac, LOGE, FL(" cannot find match for BT_EVENT_SYNC_CONNECTION_UPDATED of handle (%d)"),
- pEvent->uEventParam.btSyncConnection.connectionHandle );
- status = VOS_STATUS_E_EMPTY;
- }
- break;
- case BT_EVENT_DISCONNECTION_COMPLETE:
- status = btcDeferDisconnEvent( pMac, pEvent );
- break;
- case BT_EVENT_MODE_CHANGED:
- if( BT_INVALID_CONN_HANDLE == pEvent->uEventParam.btDisconnect.connectionHandle )
- {
- smsLog( pMac, LOGE, FL(" invalid handle") );
- status = VOS_STATUS_E_INVAL;
- break;
- }
- //Find a match on handle, If not found, return a free slot
- pModeChangeEventHist = btcFindModeChangeEventHist( pMac,
- pEvent->uEventParam.btAclModeChange.connectionHandle );
- if(pModeChangeEventHist)
- {
- pModeChangeEventHist->fValid = VOS_TRUE;
- vos_mem_copy( &pModeChangeEventHist->btAclModeChange,
- &pEvent->uEventParam.btAclModeChange, sizeof(tSmeBtAclModeChangeParam) );
- }
- else
- {
- smsLog( pMac, LOGE, FL(" cannot find match for BT_EVENT_MODE_CHANGED of handle (%d)"),
- pEvent->uEventParam.btAclModeChange.connectionHandle);
- status = VOS_STATUS_E_EMPTY;
- }
- break;
- case BT_EVENT_A2DP_STREAM_START:
- pReplay->btcEventHist.fA2DPStarted = VOS_TRUE;
- pReplay->btcEventHist.fA2DPStopped = VOS_FALSE;
- break;
- case BT_EVENT_A2DP_STREAM_STOP:
- pReplay->btcEventHist.fA2DPStopped = VOS_TRUE;
- pReplay->btcEventHist.fA2DPStarted = VOS_FALSE;
- break;
- default:
- smsLog( pMac, LOGE, FL(" event (%d) is not deferred"), pEvent->btEventType );
- status = VOS_STATUS_E_NOSUPPORT;
- break;
- }
- return (status);
-}
-
-/*
- Replay all cached events in the following order
- 1. If BT_SWITCH_OFF event, send it.
- 2. Send INQUIRY event (START or STOP),if available
- 3. Send PAGE event (START or STOP), if available
- 4. Send DISCONNECT events, these DISCONNECT events are not tied to
- any ACL/SYNC event that we have cached
- 5. Send ACL events (possible events, CREATION, COMPLETION, DISCONNECT)
- 6. Send MODE_CHANGE events, if available
- 7. Send A2DP event(START or STOP), if available
- 8. Send SYNC events (possible events, CREATION, COMPLETION, DISCONNECT)
- 9. Send SYNC_UPDATE events, if available
-*/
-static void btcReplayEvents( tpAniSirGlobal pMac )
-{
- int i, j;
- tSmeBtEvent btEvent;
- tpSmeBtAclEventHist pAclHist;
- tpSmeBtSyncEventHist pSyncHist;
- tSmeBtcEventReplay *pReplay = &pMac->btc.btcEventReplay;
- //Always turn on HB monitor first.
- //It is independent of BT events even though BT event causes this
- if( pReplay->fRestoreHBMonitor )
- {
- pReplay->fRestoreHBMonitor = VOS_FALSE;
- //Only do it when needed
- if( !pMac->btc.btcHBActive )
- {
- ccmCfgSetInt(pMac, WNI_CFG_HEART_BEAT_THRESHOLD, pMac->btc.btcHBCount, NULL, eANI_BOOLEAN_FALSE);
- pMac->btc.btcHBActive = VOS_TRUE;
- }
- }
- if( pMac->btc.fReplayBTEvents )
- {
- /*Set the flag to false here so btcSignalBTEvent won't defer any further.
- This works because SME has it global lock*/
- pMac->btc.fReplayBTEvents = VOS_FALSE;
- if( pReplay->fBTSwitchOff )
- {
- vos_mem_zero( &btEvent, sizeof(tSmeBtEvent) );
- btEvent.btEventType = BT_EVENT_DEVICE_SWITCHED_OFF;
- btcSendBTEvent( pMac, &btEvent );
- pReplay->fBTSwitchOff = VOS_FALSE;
- }
- else if( pReplay->fBTSwitchOn )
- {
- vos_mem_zero( &btEvent, sizeof(tSmeBtEvent) );
- btEvent.btEventType = BT_EVENT_DEVICE_SWITCHED_ON;
- btcSendBTEvent( pMac, &btEvent );
- pReplay->fBTSwitchOn = VOS_FALSE;
- }
- //Do inquire first
- if( pReplay->btcEventHist.nInquiryEvent > 0 )
- {
- vos_mem_zero( &btEvent, sizeof(tSmeBtEvent) );
- btEvent.btEventType = BT_EVENT_INQUIRY_STARTED;
- i = pReplay->btcEventHist.nInquiryEvent;
- while(i--)
- {
- btcSendBTEvent( pMac, &btEvent );
- }
- }
- else if( pReplay->btcEventHist.nInquiryEvent < 0 )
- {
- vos_mem_zero( &btEvent, sizeof(tSmeBtEvent) );
- btEvent.btEventType = BT_EVENT_INQUIRY_STOPPED;
- i = pReplay->btcEventHist.nInquiryEvent;
- while(i++)
- {
- btcSendBTEvent( pMac, &btEvent );
- }
- }
- //Page
- if( pReplay->btcEventHist.nPageEvent > 0 )
- {
- vos_mem_zero( &btEvent, sizeof(tSmeBtEvent) );
- btEvent.btEventType = BT_EVENT_PAGE_STARTED;
- i = pReplay->btcEventHist.nPageEvent;
- while(i--)
- {
- btcSendBTEvent( pMac, &btEvent );
- }
- }
- else if( pReplay->btcEventHist.nPageEvent < 0 )
- {
- vos_mem_zero( &btEvent, sizeof(tSmeBtEvent) );
- btEvent.btEventType = BT_EVENT_PAGE_STOPPED;
- i = pReplay->btcEventHist.nPageEvent;
- while(i++)
- {
- btcSendBTEvent( pMac, &btEvent );
- }
- }
- //Replay non-completion disconnect events first
- //Disconnect
- for( i = 0; i < BT_MAX_DISCONN_SUPPORT; i++ )
- {
- if( pReplay->btcEventHist.btDisconnectEvent[i].fValid )
- {
- vos_mem_zero( &btEvent, sizeof(tSmeBtEvent) );
- btEvent.btEventType = BT_EVENT_DISCONNECTION_COMPLETE;
- vos_mem_copy( &btEvent.uEventParam.btDisconnect,
- &pReplay->btcEventHist.btDisconnectEvent[i].btDisconnect, sizeof(tSmeBtDisconnectParam) );
- btcSendBTEvent( pMac, &btEvent );
- }
- }
- //ACL
- for( i = 0; i < BT_MAX_ACL_SUPPORT; i++ )
- {
- if( pReplay->btcEventHist.btAclConnectionEvent[i].bNextEventIdx )
- {
- pAclHist = &pReplay->btcEventHist.btAclConnectionEvent[i];
- //Replay all ACL events for this BD address/handle
- for(j = 0; j < pAclHist->bNextEventIdx; j++)
- {
- vos_mem_zero( &btEvent, sizeof(tSmeBtEvent) );
- vos_mem_zero( &btEvent, sizeof(tSmeBtEvent) );
- btEvent.btEventType = pAclHist->btEventType[j];
- if(BT_EVENT_DISCONNECTION_COMPLETE != btEvent.btEventType)
- {
- //It must be CREATE or CONNECTION_COMPLETE
- vos_mem_copy( &btEvent.uEventParam.btAclConnection,
- &pAclHist->btAclConnection[j], sizeof(tSmeBtAclConnectionParam) );
- }
- else
- {
- btEvent.uEventParam.btDisconnect.connectionHandle = pAclHist->btAclConnection[j].connectionHandle;
- }
- btcSendBTEvent( pMac, &btEvent );
- }
- }
- }
- //Mode change
- for( i = 0; i < BT_MAX_ACL_SUPPORT; i++ )
- {
- if( pReplay->btcEventHist.btAclModeChangeEvent[i].fValid )
- {
- vos_mem_zero( &btEvent, sizeof(tSmeBtEvent) );
- btEvent.btEventType = BT_EVENT_MODE_CHANGED;
- vos_mem_copy( &btEvent.uEventParam.btAclModeChange,
- &pReplay->btcEventHist.btAclModeChangeEvent[i].btAclModeChange, sizeof(tSmeBtAclModeChangeParam) );
- btcSendBTEvent( pMac, &btEvent );
- }
- }
- //A2DP
- if( pReplay->btcEventHist.fA2DPStarted )
- {
- vos_mem_zero( &btEvent, sizeof(tSmeBtEvent) );
- btEvent.btEventType = BT_EVENT_A2DP_STREAM_START;
- btcSendBTEvent( pMac, &btEvent );
- }
- else if( pReplay->btcEventHist.fA2DPStopped )
- {
- vos_mem_zero( &btEvent, sizeof(tSmeBtEvent) );
- btEvent.btEventType = BT_EVENT_A2DP_STREAM_STOP;
- btcSendBTEvent( pMac, &btEvent );
- }
- //SCO
- for( i = 0; i < BT_MAX_SCO_SUPPORT; i++ )
- {
- if( pReplay->btcEventHist.btSyncConnectionEvent[i].bNextEventIdx )
- {
- pSyncHist = &pReplay->btcEventHist.btSyncConnectionEvent[i];
- //Replay all SYNC events for this BD address/handle
- for(j = 0; j < pSyncHist->bNextEventIdx; j++)
- {
- vos_mem_zero( &btEvent, sizeof(tSmeBtEvent) );
- vos_mem_zero( &btEvent, sizeof(tSmeBtEvent) );
- btEvent.btEventType = pSyncHist->btEventType[j];
- if(BT_EVENT_DISCONNECTION_COMPLETE != btEvent.btEventType)
- {
- //Must be CREATION or CONNECTION_COMPLETE
- vos_mem_copy( &btEvent.uEventParam.btSyncConnection,
- &pSyncHist->btSyncConnection[j], sizeof(tSmeBtSyncConnectionParam) );
- }
- else
- {
- btEvent.uEventParam.btDisconnect.connectionHandle = pSyncHist->btSyncConnection[j].connectionHandle;
- }
- btcSendBTEvent( pMac, &btEvent );
- }
- }
- }
- //SYNC update
- for( i = 0; i < BT_MAX_SCO_SUPPORT; i++ )
- {
- if( pReplay->btcEventHist.btSyncUpdateEvent[i].fValid )
- {
- vos_mem_zero( &btEvent, sizeof(tSmeBtEvent) );
- btEvent.btEventType = BT_EVENT_SYNC_CONNECTION_UPDATED;
- vos_mem_copy( &btEvent.uEventParam.btSyncConnection,
- &pReplay->btcEventHist.btSyncUpdateEvent[i].btSyncConnection,
- sizeof(tSmeBtSyncConnectionParam) );
- btcSendBTEvent( pMac, &btEvent );
- }
- }
- //Clear all events
- vos_mem_zero( &pReplay->btcEventHist, sizeof(tSmeBtcEventHist) );
- }
-}
-
-static void btcPowerStateCB( v_PVOID_t pContext, tPmcState pmcState )
-{
- tpAniSirGlobal pMac = PMAC_STRUCT(pContext);
- if( FULL_POWER == pmcState )
- {
- btcReplayEvents( pMac );
- }
-}
-
-static void btcPowerOffloadStateCB(v_PVOID_t pContext, tANI_U32 sessionId,
- tPmcState pmcState )
-{
- tpAniSirGlobal pMac = PMAC_STRUCT(pContext);
- if(FULL_POWER == pmcState)
- {
- btcReplayEvents(pMac);
- }
-}
-
-
-/* ---------------------------------------------------------------------------
- \fn btcLogEvent
- \brief API to log the the current Bluetooth event
- \param hHal - The handle returned by macOpen.
- \param pSmeBtcConfig - Pointer to a caller allocated object of type
- tSmeBtEvent. Caller owns the memory and is responsible
- for freeing it.
- \return None
- ---------------------------------------------------------------------------*/
-static void btcLogEvent (tHalHandle hHal, tpSmeBtEvent pBtEvent)
-{
- v_U8_t bdAddrRev[6];
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: "
- "Bluetooth Event %d received", __func__, pBtEvent->btEventType);
- switch(pBtEvent->btEventType)
- {
- case BT_EVENT_CREATE_SYNC_CONNECTION:
- case BT_EVENT_SYNC_CONNECTION_COMPLETE:
- case BT_EVENT_SYNC_CONNECTION_UPDATED:
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "SCO Connection: "
- "connectionHandle = %d status = %d linkType %d "
- "scoInterval %d scoWindow %d retransmisisonWindow = %d ",
- pBtEvent->uEventParam.btSyncConnection.connectionHandle,
- pBtEvent->uEventParam.btSyncConnection.status,
- pBtEvent->uEventParam.btSyncConnection.linkType,
- pBtEvent->uEventParam.btSyncConnection.scoInterval,
- pBtEvent->uEventParam.btSyncConnection.scoWindow,
- pBtEvent->uEventParam.btSyncConnection.retransmisisonWindow);
-
- bdAddrRev[0] = pBtEvent->uEventParam.btSyncConnection.bdAddr[5];
- bdAddrRev[1] = pBtEvent->uEventParam.btSyncConnection.bdAddr[4];
- bdAddrRev[2] = pBtEvent->uEventParam.btSyncConnection.bdAddr[3];
- bdAddrRev[3] = pBtEvent->uEventParam.btSyncConnection.bdAddr[2];
- bdAddrRev[4] = pBtEvent->uEventParam.btSyncConnection.bdAddr[1];
- bdAddrRev[5] = pBtEvent->uEventParam.btSyncConnection.bdAddr[0];
-
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "BD ADDR = "
- MAC_ADDRESS_STR, MAC_ADDR_ARRAY(bdAddrRev));
- break;
- case BT_EVENT_CREATE_ACL_CONNECTION:
- case BT_EVENT_ACL_CONNECTION_COMPLETE:
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "ACL Connection: "
- "connectionHandle = %d status = %d ",
- pBtEvent->uEventParam.btAclConnection.connectionHandle,
- pBtEvent->uEventParam.btAclConnection.status);
-
- bdAddrRev[0] = pBtEvent->uEventParam.btAclConnection.bdAddr[5];
- bdAddrRev[1] = pBtEvent->uEventParam.btAclConnection.bdAddr[4];
- bdAddrRev[2] = pBtEvent->uEventParam.btAclConnection.bdAddr[3];
- bdAddrRev[3] = pBtEvent->uEventParam.btAclConnection.bdAddr[2];
- bdAddrRev[4] = pBtEvent->uEventParam.btAclConnection.bdAddr[1];
- bdAddrRev[5] = pBtEvent->uEventParam.btAclConnection.bdAddr[0];
-
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "BD ADDR = "
- MAC_ADDRESS_STR, MAC_ADDR_ARRAY(bdAddrRev));
- break;
- case BT_EVENT_MODE_CHANGED:
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "ACL Mode change : "
- "connectionHandle %d mode %d ",
- pBtEvent->uEventParam.btAclModeChange.connectionHandle,
- pBtEvent->uEventParam.btAclModeChange.mode);
- break;
- case BT_EVENT_DISCONNECTION_COMPLETE:
- VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "Disconnect Event : "
- "connectionHandle %d ", pBtEvent->uEventParam.btAclModeChange.connectionHandle);
- break;
- default:
- break;
- }
- }
-
-/*
- Caller can check whether BTC's current event allows UAPSD. This doesn't affect
- BMPS.
- return: VOS_TRUE -- BTC is ready for UAPSD
- VOS_FALSE -- certain BT event is active, cannot enter UAPSD
-*/
-v_BOOL_t btcIsReadyForUapsd( tHalHandle hHal )
-{
- tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
- return( pMac->btc.btcUapsdOk );
-}
-
-/*
- Base on the BT event, this function sets the flag on whether to allow UAPSD
- At this time, we are only interested in SCO and A2DP.
- A2DP tracking is through BT_EVENT_A2DP_STREAM_START and BT_EVENT_A2DP_STREAM_STOP
- SCO is through BT_EVENT_SYNC_CONNECTION_COMPLETE and BT_EVENT_DISCONNECTION_COMPLETE
- BT_EVENT_DEVICE_SWITCHED_OFF overwrites them all
-*/
-void btcUapsdCheck( tpAniSirGlobal pMac, tpSmeBtEvent pBtEvent )
-{
- v_U8_t i;
- v_BOOL_t fLastUapsdState = pMac->btc.btcUapsdOk, fMoreSCO = VOS_FALSE;
- switch( pBtEvent->btEventType )
- {
- case BT_EVENT_DISCONNECTION_COMPLETE:
- if( (VOS_FALSE == pMac->btc.btcUapsdOk) &&
- BT_INVALID_CONN_HANDLE != pBtEvent->uEventParam.btDisconnect.connectionHandle )
- {
- //Check whether all SCO connections are gone
- for(i=0; i < BT_MAX_SCO_SUPPORT; i++)
- {
- if( (BT_INVALID_CONN_HANDLE != pMac->btc.btcScoHandles[i]) &&
- (pMac->btc.btcScoHandles[i] != pBtEvent->uEventParam.btDisconnect.connectionHandle) )
- {
- //We still have outstanding SCO connection
- fMoreSCO = VOS_TRUE;
- }
- else if( pMac->btc.btcScoHandles[i] == pBtEvent->uEventParam.btDisconnect.connectionHandle )
- {
- pMac->btc.btcScoHandles[i] = BT_INVALID_CONN_HANDLE;
- }
- }
- if( !fMoreSCO && !pMac->btc.fA2DPUp )
- {
- //All SCO is disconnected
- smsLog( pMac, LOGE, "BT event (DISCONNECTION) happens, UAPSD-allowed flag (%d) change to TRUE",
- pMac->btc.btcUapsdOk );
- pMac->btc.btcUapsdOk = VOS_TRUE;
- }
- }
- break;
- case BT_EVENT_DEVICE_SWITCHED_OFF:
- smsLog( pMac, LOGE, "BT event (DEVICE_OFF) happens, UAPSD-allowed flag (%d) change to TRUE",
- pMac->btc.btcUapsdOk );
- //Clean up SCO
- for(i=0; i < BT_MAX_SCO_SUPPORT; i++)
- {
- pMac->btc.btcScoHandles[i] = BT_INVALID_CONN_HANDLE;
- }
- pMac->btc.fA2DPUp = VOS_FALSE;
- pMac->btc.btcUapsdOk = VOS_TRUE;
- break;
- case BT_EVENT_A2DP_STREAM_STOP:
- smsLog( pMac, LOGE, "BT event (A2DP_STREAM_STOP) happens, UAPSD-allowed flag (%d)",
- pMac->btc.btcUapsdOk );
- pMac->btc.fA2DPUp = VOS_FALSE;
- //Check whether SCO is on
- for(i=0; i < BT_MAX_SCO_SUPPORT; i++)
- {
- if(pMac->btc.btcScoHandles[i] != BT_INVALID_CONN_HANDLE)
- {
- break;
- }
- }
- if( BT_MAX_SCO_SUPPORT == i )
- {
- pMac->btc.fA2DPTrafStop = VOS_TRUE;
- smsLog( pMac, LOGE, "BT_EVENT_A2DP_STREAM_STOP: UAPSD-allowed flag is now %d",
- pMac->btc.btcUapsdOk );
- }
- break;
-
- case BT_EVENT_MODE_CHANGED:
- smsLog( pMac, LOGE, "BT event (BT_EVENT_MODE_CHANGED) happens, Mode (%d) UAPSD-allowed flag (%d)",
- pBtEvent->uEventParam.btAclModeChange.mode, pMac->btc.btcUapsdOk );
- if(pBtEvent->uEventParam.btAclModeChange.mode == BT_ACL_SNIFF)
- {
- //Check whether SCO is on
- for(i=0; i < BT_MAX_SCO_SUPPORT; i++)
- {
- if(pMac->btc.btcScoHandles[i] != BT_INVALID_CONN_HANDLE)
- {
- break;
- }
- }
- if( BT_MAX_SCO_SUPPORT == i )
- {
- if(VOS_TRUE == pMac->btc.fA2DPTrafStop)
- {
- pMac->btc.btcUapsdOk = VOS_TRUE;
- pMac->btc.fA2DPTrafStop = VOS_FALSE;
- }
- smsLog( pMac, LOGE, "BT_EVENT_MODE_CHANGED with Mode:%d UAPSD-allowed flag is now %d",
- pBtEvent->uEventParam.btAclModeChange.mode,pMac->btc.btcUapsdOk );
- }
- }
- break;
- case BT_EVENT_CREATE_SYNC_CONNECTION:
- {
- pMac->btc.btcUapsdOk = VOS_FALSE;
- smsLog( pMac, LOGE, "BT_EVENT_CREATE_SYNC_CONNECTION (%d) happens, UAPSD-allowed flag (%d) change to FALSE",
- pBtEvent->btEventType, pMac->btc.btcUapsdOk );
- }
- break;
- case BT_EVENT_SYNC_CONNECTION_COMPLETE:
- //Make sure it is a success
- if( BT_CONN_STATUS_FAIL != pBtEvent->uEventParam.btSyncConnection.status )
- {
- /* Save the handle for later use */
- for( i = 0; i < BT_MAX_SCO_SUPPORT; i++)
- {
- VOS_ASSERT(BT_INVALID_CONN_HANDLE != pBtEvent->uEventParam.btSyncConnection.connectionHandle);
- if( (BT_INVALID_CONN_HANDLE == pMac->btc.btcScoHandles[i]) &&
- (BT_INVALID_CONN_HANDLE != pBtEvent->uEventParam.btSyncConnection.connectionHandle))
- {
- pMac->btc.btcScoHandles[i] = pBtEvent->uEventParam.btSyncConnection.connectionHandle;
- break;
- }
- }
-
- if( i >= BT_MAX_SCO_SUPPORT )
- {
- smsLog(pMac, LOGE, FL("Too many SCO, ignore this one"));
- }
- }
- else
- {
- //Check whether SCO is on
- for(i=0; i < BT_MAX_SCO_SUPPORT; i++)
- {
- if(pMac->btc.btcScoHandles[i] != BT_INVALID_CONN_HANDLE)
- {
- break;
- }
- }
- /*If No Other Sco/A2DP is ON reenable UAPSD*/
- if( (BT_MAX_SCO_SUPPORT == i) && !pMac->btc.fA2DPUp)
- {
- pMac->btc.btcUapsdOk = VOS_TRUE;
- }
- smsLog(pMac, LOGE, FL("TSYNC complete failed"));
- }
- break;
- case BT_EVENT_A2DP_STREAM_START:
- smsLog( pMac, LOGE, "BT_EVENT_A2DP_STREAM_START (%d) happens, UAPSD-allowed flag (%d) change to FALSE",
- pBtEvent->btEventType, pMac->btc.btcUapsdOk );
- pMac->btc.fA2DPTrafStop = VOS_FALSE;
- pMac->btc.btcUapsdOk = VOS_FALSE;
- pMac->btc.fA2DPUp = VOS_TRUE;
- break;
- default:
- //No change for these events
- smsLog( pMac, LOGE, "BT event (%d) happens, UAPSD-allowed flag (%d) no change",
- pBtEvent->btEventType, pMac->btc.btcUapsdOk );
- break;
- }
- if(fLastUapsdState != pMac->btc.btcUapsdOk)
- {
- sme_QosTriggerUapsdChange( pMac );
- }
-}
-
-/* ---------------------------------------------------------------------------
- \fn btcHandleCoexInd
- \brief API to handle Coex indication from WDI
- \param pMac - The handle returned by macOpen.
- \return eHalStatus
- eHAL_STATUS_FAILURE success
- eHAL_STATUS_SUCCESS failure
- ---------------------------------------------------------------------------*/
-eHalStatus btcHandleCoexInd(tHalHandle hHal, void* pMsg)
-{
- tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
- eHalStatus status = eHAL_STATUS_SUCCESS;
- tSirSmeCoexInd *pSmeCoexInd = (tSirSmeCoexInd *)pMsg;
-
- if (NULL == pMsg)
- {
- smsLog(pMac, LOGE, "in %s msg ptr is NULL", __func__);
- status = eHAL_STATUS_FAILURE;
- }
- else
- {
- // DEBUG
- smsLog(pMac, LOG1, "Coex indication in %s(), type %d",
- __func__, pSmeCoexInd->coexIndType);
-
- // suspend heartbeat monitoring
- if (pSmeCoexInd->coexIndType == SIR_COEX_IND_TYPE_DISABLE_HB_MONITOR)
- {
- // set heartbeat threshold CFG to zero
- ccmCfgSetInt(pMac, WNI_CFG_HEART_BEAT_THRESHOLD, 0, NULL, eANI_BOOLEAN_FALSE);
- pMac->btc.btcHBActive = VOS_FALSE;
- }
-
- // resume heartbeat monitoring
- else if (pSmeCoexInd->coexIndType == SIR_COEX_IND_TYPE_ENABLE_HB_MONITOR)
- {
- if (!pMac->btc.btcHBActive)
- {
- ccmCfgSetInt(pMac, WNI_CFG_HEART_BEAT_THRESHOLD, pMac->btc.btcHBCount, NULL, eANI_BOOLEAN_FALSE);
- pMac->btc.btcHBActive = VOS_TRUE;
- }
- }
- else if (pSmeCoexInd->coexIndType == SIR_COEX_IND_TYPE_SCAN_COMPROMISED)
- {
- pMac->btc.btcScanCompromise = VOS_TRUE;
- smsLog(pMac, LOGW, "Coex indication in %s(), type - SIR_COEX_IND_TYPE_SCAN_COMPROMISED",
- __func__);
- }
- else if (pSmeCoexInd->coexIndType == SIR_COEX_IND_TYPE_SCAN_NOT_COMPROMISED)
- {
- pMac->btc.btcScanCompromise = VOS_FALSE;
- smsLog(pMac, LOGW, "Coex indication in %s(), type - SIR_COEX_IND_TYPE_SCAN_NOT_COMPROMISED",
- __func__);
- }
- else if (pSmeCoexInd->coexIndType == SIR_COEX_IND_TYPE_DISABLE_AGGREGATION_IN_2p4)
- {
- if (pMac->roam.configParam.disableAggWithBtc)
- {
- ccmCfgSetInt(pMac, WNI_CFG_DEL_ALL_RX_BA_SESSIONS_2_4_G_BTC, 1,
- NULL, eANI_BOOLEAN_FALSE);
- pMac->btc.btcBssfordisableaggr[0] = pSmeCoexInd->coexIndData[0] & 0xFF;
- pMac->btc.btcBssfordisableaggr[1] = pSmeCoexInd->coexIndData[0] >> 8;
- pMac->btc.btcBssfordisableaggr[2] = pSmeCoexInd->coexIndData[1] & 0xFF;
- pMac->btc.btcBssfordisableaggr[3] = pSmeCoexInd->coexIndData[1] >> 8;
- pMac->btc.btcBssfordisableaggr[4] = pSmeCoexInd->coexIndData[2] & 0xFF;
- pMac->btc.btcBssfordisableaggr[5] = pSmeCoexInd->coexIndData[2] >> 8;
- smsLog(pMac, LOGW, "Coex indication in %s(), "
- "type - SIR_COEX_IND_TYPE_DISABLE_AGGREGATION_IN_2p4 "
- "for BSSID "MAC_ADDRESS_STR,__func__,
- MAC_ADDR_ARRAY(pMac->btc.btcBssfordisableaggr));
- }
- }
- else if (pSmeCoexInd->coexIndType == SIR_COEX_IND_TYPE_ENABLE_AGGREGATION_IN_2p4)
- {
- if (pMac->roam.configParam.disableAggWithBtc)
- {
- ccmCfgSetInt(pMac, WNI_CFG_DEL_ALL_RX_BA_SESSIONS_2_4_G_BTC, 0,
- NULL, eANI_BOOLEAN_FALSE);
- smsLog(pMac, LOGW,
- "Coex indication in %s(), type - SIR_COEX_IND_TYPE_ENABLE_AGGREGATION_IN_2p4",
- __func__);
- }
- }
- // unknown indication type
- else
- {
- smsLog(pMac, LOGE, "unknown Coex indication type in %s()", __func__);
- }
- }
-
- return(status);
-}
-
-#ifdef FEATURE_WLAN_DIAG_SUPPORT
-/* ---------------------------------------------------------------------------
- \fn btcDiagEventLog
- \brief API to log the the current Bluetooth event
- \param hHal - The handle returned by macOpen.
- \param pSmeBtcConfig - Pointer to a caller allocated object of type
- tSmeBtEvent. Caller owns the memory and is responsible
- for freeing it.
- \return None
- ---------------------------------------------------------------------------*/
-static void btcDiagEventLog (tHalHandle hHal, tpSmeBtEvent pBtEvent)
-{
- //vos_event_wlan_btc_type *log_ptr = NULL;
- WLAN_VOS_DIAG_EVENT_DEF(btDiagEvent, vos_event_wlan_btc_type);
- {
- btDiagEvent.eventId = pBtEvent->btEventType;
- switch(pBtEvent->btEventType)
- {
- case BT_EVENT_CREATE_SYNC_CONNECTION:
- case BT_EVENT_SYNC_CONNECTION_COMPLETE:
- case BT_EVENT_SYNC_CONNECTION_UPDATED:
- btDiagEvent.connHandle = pBtEvent->uEventParam.btSyncConnection.connectionHandle;
- btDiagEvent.connStatus = pBtEvent->uEventParam.btSyncConnection.status;
- btDiagEvent.linkType = pBtEvent->uEventParam.btSyncConnection.linkType;
- btDiagEvent.scoInterval = pBtEvent->uEventParam.btSyncConnection.scoInterval;
- btDiagEvent.scoWindow = pBtEvent->uEventParam.btSyncConnection.scoWindow;
- btDiagEvent.retransWindow = pBtEvent->uEventParam.btSyncConnection.retransmisisonWindow;
- vos_mem_copy(btDiagEvent.btAddr, pBtEvent->uEventParam.btSyncConnection.bdAddr,
- sizeof(btDiagEvent.btAddr));
- break;
- case BT_EVENT_CREATE_ACL_CONNECTION:
- case BT_EVENT_ACL_CONNECTION_COMPLETE:
- btDiagEvent.connHandle = pBtEvent->uEventParam.btAclConnection.connectionHandle;
- btDiagEvent.connStatus = pBtEvent->uEventParam.btAclConnection.status;
- vos_mem_copy(btDiagEvent.btAddr, pBtEvent->uEventParam.btAclConnection.bdAddr,
- sizeof(btDiagEvent.btAddr));
- break;
- case BT_EVENT_MODE_CHANGED:
- btDiagEvent.connHandle = pBtEvent->uEventParam.btAclModeChange.connectionHandle;
- btDiagEvent.mode = pBtEvent->uEventParam.btAclModeChange.mode;
- break;
- case BT_EVENT_DISCONNECTION_COMPLETE:
- btDiagEvent.connHandle = pBtEvent->uEventParam.btAclModeChange.connectionHandle;
- break;
- default:
- break;
- }
- }
- WLAN_VOS_DIAG_EVENT_REPORT(&btDiagEvent, EVENT_WLAN_BTC);
-}
-#endif /* FEATURE_WLAN_DIAG_SUPPORT */
-#endif /* WLAN_MDM_CODE_REDUCTION_OPT*/
diff --git a/drivers/staging/qcacld-2.0/CORE/SME/src/csr/csrApiRoam.c b/drivers/staging/qcacld-2.0/CORE/SME/src/csr/csrApiRoam.c
index fac373a..14fca6d 100644
--- a/drivers/staging/qcacld-2.0/CORE/SME/src/csr/csrApiRoam.c
+++ b/drivers/staging/qcacld-2.0/CORE/SME/src/csr/csrApiRoam.c
@@ -1197,8 +1197,6 @@
pMac->roam.configParam.nActiveMinChnTime = CSR_ACTIVE_MIN_CHANNEL_TIME;
pMac->roam.configParam.nPassiveMaxChnTime = CSR_PASSIVE_MAX_CHANNEL_TIME;
pMac->roam.configParam.nPassiveMinChnTime = CSR_PASSIVE_MIN_CHANNEL_TIME;
- pMac->roam.configParam.nActiveMaxChnTimeBtc = CSR_ACTIVE_MAX_CHANNEL_TIME_BTC;
- pMac->roam.configParam.nActiveMinChnTimeBtc = CSR_ACTIVE_MIN_CHANNEL_TIME_BTC;
pMac->roam.configParam.disableAggWithBtc = eANI_BOOLEAN_TRUE;
#ifdef WLAN_AP_STA_CONCURRENCY
pMac->roam.configParam.nActiveMaxChnTimeConc = CSR_ACTIVE_MAX_CHANNEL_TIME_CONC;
@@ -1802,14 +1800,6 @@
cfgSetInt(pMac, WNI_CFG_PASSIVE_MINIMUM_CHANNEL_TIME,
pParam->nPassiveMinChnTime);
}
- if (pParam->nActiveMaxChnTimeBtc)
- {
- pMac->roam.configParam.nActiveMaxChnTimeBtc = pParam->nActiveMaxChnTimeBtc;
- }
- if (pParam->nActiveMinChnTimeBtc)
- {
- pMac->roam.configParam.nActiveMinChnTimeBtc = pParam->nActiveMinChnTimeBtc;
- }
#ifdef WLAN_AP_STA_CONCURRENCY
if (pParam->nActiveMaxChnTimeConc)
{
@@ -2092,8 +2082,6 @@
pParam->nActiveMinChnTime = pMac->roam.configParam.nActiveMinChnTime;
pParam->nPassiveMaxChnTime = pMac->roam.configParam.nPassiveMaxChnTime;
pParam->nPassiveMinChnTime = pMac->roam.configParam.nPassiveMinChnTime;
- pParam->nActiveMaxChnTimeBtc = pMac->roam.configParam.nActiveMaxChnTimeBtc;
- pParam->nActiveMinChnTimeBtc = pMac->roam.configParam.nActiveMinChnTimeBtc;
pParam->disableAggWithBtc = pMac->roam.configParam.disableAggWithBtc;
#ifdef WLAN_AP_STA_CONCURRENCY
pParam->nActiveMaxChnTimeConc = pMac->roam.configParam.nActiveMaxChnTimeConc;
@@ -13619,20 +13607,8 @@
smsLog(pMac, LOGE, FL("can not find any valid channel"));
*pBuf++ = 0; //tSirSupChnl->numChnl
}
- //Check whether it is ok to enter UAPSD
-#ifndef WLAN_MDM_CODE_REDUCTION_OPT
- if( btcIsReadyForUapsd(pMac) )
-#endif /* WLAN_MDM_CODE_REDUCTION_OPT*/
- {
- *pBuf++ = uapsdMask;
- }
-#ifndef WLAN_MDM_CODE_REDUCTION_OPT
- else
- {
- smsLog(pMac, LOGE, FL(" BTC doesn't allow UAPSD for uapsd_mask(0x%X)"), uapsdMask);
- *pBuf++ = 0;
- }
-#endif /* WLAN_MDM_CODE_REDUCTION_OPT*/
+
+ *pBuf++ = uapsdMask;
// move the entire BssDescription into the join request.
vos_mem_copy(pBuf, pBssDescription,
diff --git a/drivers/staging/qcacld-2.0/CORE/SME/src/csr/csrApiScan.c b/drivers/staging/qcacld-2.0/CORE/SME/src/csr/csrApiScan.c
index 19753fb..3832728 100644
--- a/drivers/staging/qcacld-2.0/CORE/SME/src/csr/csrApiScan.c
+++ b/drivers/staging/qcacld-2.0/CORE/SME/src/csr/csrApiScan.c
@@ -282,8 +282,6 @@
pScanRequest->maxChnTime = pMac->roam.configParam.nPassiveMaxChnTimeConc;
pScanRequest->minChnTime = pMac->roam.configParam.nPassiveMinChnTimeConc;
}
- pScanRequest->maxChnTimeBtc = pMac->roam.configParam.nActiveMaxChnTimeBtc;
- pScanRequest->minChnTimeBtc = pMac->roam.configParam.nActiveMinChnTimeBtc;
pScanRequest->restTime = pMac->roam.configParam.nRestTimeConc;
pScanRequest->min_rest_time = pMac->roam.configParam.min_rest_time_conc;
@@ -311,8 +309,6 @@
pScanRequest->maxChnTime = pMac->roam.configParam.nPassiveMaxChnTime;
pScanRequest->minChnTime = pMac->roam.configParam.nPassiveMinChnTime;
}
- pScanRequest->maxChnTimeBtc = pMac->roam.configParam.nActiveMaxChnTimeBtc;
- pScanRequest->minChnTimeBtc = pMac->roam.configParam.nActiveMinChnTimeBtc;
#ifdef WLAN_AP_STA_CONCURRENCY
/* No rest time/Idle time if no sessions are connected. */
@@ -726,7 +722,6 @@
*/
status = csrRoamGetConnectState(pMac,sessionId,&ConnectState);
if (HAL_STATUS_SUCCESS(status) &&
- pMac->btc.fA2DPUp &&
(eCSR_ASSOC_STATE_TYPE_INFRA_ASSOCIATED != ConnectState) &&
(eCSR_ASSOC_STATE_TYPE_IBSS_CONNECTED != ConnectState))
{
@@ -738,8 +733,6 @@
pScanRequest->minChnTime);
}
- pScanRequest->maxChnTimeBtc = pMac->roam.configParam.nActiveMaxChnTimeBtc;
- pScanRequest->minChnTimeBtc = pMac->roam.configParam.nActiveMinChnTimeBtc;
//Need to make the following atomic
pScanCmd->u.scanCmd.scanID = pMac->scan.nextScanID++; //let it wrap around
@@ -804,9 +797,6 @@
p11dScanCmd->u.scanCmd.reason = eCsrScanIdleScan;
scanReq.maxChnTime = pMac->roam.configParam.nActiveMaxChnTime;
scanReq.minChnTime = pMac->roam.configParam.nActiveMinChnTime;
-
- scanReq.maxChnTimeBtc = pMac->roam.configParam.nActiveMaxChnTimeBtc;
- scanReq.minChnTimeBtc = pMac->roam.configParam.nActiveMinChnTimeBtc;
}
if (pMac->roam.configParam.nInitialDwellTime)
{
@@ -1057,8 +1047,6 @@
scanReq.requestType = reqType;
scanReq.maxChnTime = pMac->roam.configParam.nActiveMaxChnTime;
scanReq.minChnTime = pMac->roam.configParam.nActiveMinChnTime;
- scanReq.maxChnTimeBtc = pMac->roam.configParam.nActiveMaxChnTimeBtc;
- scanReq.minChnTimeBtc = pMac->roam.configParam.nActiveMinChnTimeBtc;
//Scan with invalid sessionId.
//This results in SME using the first available session to scan.
status = csrScanRequest(pMac, CSR_SESSION_ID_INVALID, &scanReq,
@@ -1349,8 +1337,6 @@
pCommand->u.scanCmd.pContext = NULL;
pCommand->u.scanCmd.u.scanRequest.maxChnTime = pMac->roam.configParam.nActiveMaxChnTime;
pCommand->u.scanCmd.u.scanRequest.minChnTime = pMac->roam.configParam.nActiveMinChnTime;
- pCommand->u.scanCmd.u.scanRequest.maxChnTimeBtc = pMac->roam.configParam.nActiveMaxChnTimeBtc;
- pCommand->u.scanCmd.u.scanRequest.minChnTimeBtc = pMac->roam.configParam.nActiveMinChnTimeBtc;
pCommand->u.scanCmd.u.scanRequest.scanType = eSIR_ACTIVE_SCAN;
if(pSession->connectedProfile.SSID.length)
{
@@ -1526,8 +1512,6 @@
pCommand->u.scanCmd.pContext = NULL;
pCommand->u.scanCmd.u.scanRequest.maxChnTime = pMac->roam.configParam.nActiveMaxChnTime;
pCommand->u.scanCmd.u.scanRequest.minChnTime = pMac->roam.configParam.nActiveMinChnTime;
- pCommand->u.scanCmd.u.scanRequest.maxChnTimeBtc = pMac->roam.configParam.nActiveMaxChnTimeBtc;
- pCommand->u.scanCmd.u.scanRequest.minChnTimeBtc = pMac->roam.configParam.nActiveMinChnTimeBtc;
pCommand->u.scanCmd.u.scanRequest.scanType = eSIR_ACTIVE_SCAN;
if(pSession->pCurRoamProfile)
{
@@ -1640,8 +1624,6 @@
pCommand->u.scanCmd.pContext = NULL;
pCommand->u.scanCmd.u.scanRequest.maxChnTime = pMac->roam.configParam.nActiveMaxChnTime;
pCommand->u.scanCmd.u.scanRequest.minChnTime = pMac->roam.configParam.nActiveMinChnTime;
- pCommand->u.scanCmd.u.scanRequest.maxChnTimeBtc = pMac->roam.configParam.nActiveMaxChnTimeBtc;
- pCommand->u.scanCmd.u.scanRequest.minChnTimeBtc = pMac->roam.configParam.nActiveMinChnTimeBtc;
pCommand->u.scanCmd.u.scanRequest.scanType = eSIR_ACTIVE_SCAN;
vos_mem_copy(&pCommand->u.scanCmd.u.scanRequest.bssid, bAddr, sizeof(tCsrBssid));
//Put to the head of pending queue
@@ -5784,7 +5766,8 @@
msgLen = (tANI_U16)(sizeof( tSirSmeScanReq ) - sizeof( pMsg->channelList.channelNumber ) +
( sizeof( pMsg->channelList.channelNumber ) * pScanReq->ChannelInfo.numOfChannels )) +
- ( pScanReq->uIEFieldLen ) ;
+ ( pScanReq->uIEFieldLen ) +
+ pScanReq->num_vendor_oui * sizeof(struct vendor_oui);
pMsg = vos_mem_malloc(msgLen);
if ( NULL == pMsg )
@@ -5907,8 +5890,7 @@
pMsg->minChannelTime = pal_cpu_to_be32(minChnTime);
pMsg->maxChannelTime = pal_cpu_to_be32(maxChnTime);
- pMsg->minChannelTimeBtc = pMac->roam.configParam.nActiveMinChnTimeBtc;
- pMsg->maxChannelTimeBtc = pMac->roam.configParam.nActiveMaxChnTimeBtc;
+
//hidden SSID option
pMsg->hiddenSsid = pScanReqParam->hiddenSsid;
/* maximum rest time */
@@ -5949,6 +5931,36 @@
pMsg->backgroundScanMode = eSIR_ROAMING_SCAN;
}
+ pMsg->enable_scan_randomization =
+ pScanReq->enable_scan_randomization;
+ if (pMsg->enable_scan_randomization) {
+ vos_mem_copy(pMsg->mac_addr, pScanReq->mac_addr,
+ VOS_MAC_ADDR_SIZE);
+ vos_mem_copy(pMsg->mac_addr_mask, pScanReq->mac_addr_mask,
+ VOS_MAC_ADDR_SIZE);
+ }
+
+ pMsg->ie_whitelist = pScanReq->ie_whitelist;
+ if (pMsg->ie_whitelist)
+ vos_mem_copy(pMsg->probe_req_ie_bitmap,
+ pScanReq->probe_req_ie_bitmap,
+ PROBE_REQ_BITMAP_LEN * sizeof(uint32_t));
+ pMsg->num_vendor_oui = pScanReq->num_vendor_oui;
+ pMsg->oui_field_len = pScanReq->num_vendor_oui *
+ sizeof(struct vendor_oui);
+ pMsg->oui_field_offset = (tANI_U16)(sizeof( tSirSmeScanReq ) -
+ sizeof( pMsg->channelList.channelNumber ) +
+ (sizeof( pMsg->channelList.channelNumber ) *
+ pScanReq->ChannelInfo.numOfChannels )) +
+ pScanReq->uIEFieldLen;
+
+ if (pScanReq->num_vendor_oui != 0)
+ {
+ vos_mem_copy((tANI_U8 *)pMsg + pMsg->oui_field_offset,
+ (uint8_t*)(pScanReq->voui),
+ pMsg->oui_field_len);
+ }
+
}while(0);
smsLog(pMac, LOG1, FL("domainIdCurrent %s (%d) scanType %s (%d)"
"bssType %s (%d), requestType %s(%d)"
@@ -6428,6 +6440,7 @@
pDstReq->pIEField = NULL;
pDstReq->ChannelInfo.ChannelList = NULL;
pDstReq->SSIDs.SSIDList = NULL;
+ pDstReq->voui = NULL;
if(pSrcReq->uIEFieldLen == 0)
{
@@ -6616,6 +6629,35 @@
pDstReq->p2pSearch = pSrcReq->p2pSearch;
pDstReq->skipDfsChnlInP2pSearch = pSrcReq->skipDfsChnlInP2pSearch;
+ if (pSrcReq->num_vendor_oui == 0)
+ {
+ pDstReq->num_vendor_oui = 0;
+ pDstReq->voui = NULL;
+ }
+ else
+ {
+ pDstReq->voui = vos_mem_malloc(pSrcReq->num_vendor_oui *
+ sizeof(*pDstReq->voui));
+ if (NULL == pDstReq->voui)
+ status = eHAL_STATUS_FAILURE;
+ else
+ status = eHAL_STATUS_SUCCESS;
+
+ if (HAL_STATUS_SUCCESS(status))
+ {
+ pDstReq->num_vendor_oui = pSrcReq->num_vendor_oui;
+ vos_mem_copy(pDstReq->voui,
+ pSrcReq->voui,
+ pSrcReq->num_vendor_oui *
+ sizeof(*pDstReq->voui));
+ }
+ else
+ {
+ pDstReq->num_vendor_oui = 0;
+ smsLog(pMac, LOGE, FL("No memory for voui"));
+ break;
+ }
+ }
}
}while(0);
@@ -6650,6 +6692,13 @@
}
pReq->SSIDs.numOfSSIDs = 0;
+ if(pReq->voui)
+ {
+ vos_mem_free(pReq->voui);
+ pReq->voui = NULL;
+ }
+ pReq->num_vendor_oui = 0;
+
return eHAL_STATUS_SUCCESS;
}
@@ -7630,10 +7679,6 @@
pScanCmd->u.scanCmd.u.scanRequest.minChnTime =
pMac->roam.configParam.nActiveMinChnTime;
}
- pScanCmd->u.scanCmd.u.scanRequest.maxChnTimeBtc =
- pMac->roam.configParam.nActiveMaxChnTimeBtc;
- pScanCmd->u.scanCmd.u.scanRequest.minChnTimeBtc =
- pMac->roam.configParam.nActiveMinChnTimeBtc;
if(pProfile->BSSIDs.numOfBSSIDs == 1)
{
vos_mem_copy(pScanCmd->u.scanCmd.u.scanRequest.bssid,
diff --git a/drivers/staging/qcacld-2.0/CORE/SME/src/csr/csrLogDump.c b/drivers/staging/qcacld-2.0/CORE/SME/src/csr/csrLogDump.c
index 0a64afd..3bdfc0d 100644
--- a/drivers/staging/qcacld-2.0/CORE/SME/src/csr/csrLogDump.c
+++ b/drivers/staging/qcacld-2.0/CORE/SME/src/csr/csrLogDump.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013, 2016 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -31,7 +31,6 @@
============================================================================*/
#include "aniGlobal.h"
#include "csrApi.h"
-#include "btcApi.h"
#include "logDump.h"
#include "smsDebug.h"
#include "smeInside.h"
@@ -55,50 +54,7 @@
}
return p;
}
-static char *dump_btcSetEvent( tpAniSirGlobal pMac, tANI_U32 arg1,
- tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p )
-{
- tSmeBtEvent btEvent;
- if( arg1 < BT_EVENT_TYPE_MAX )
- {
- smsLog(pMac, LOGE, FL(" signal BT event (%d) handle (%d) 3rd param(%d)"), arg1, arg2, arg3);
- vos_mem_zero(&btEvent, sizeof(tSmeBtEvent));
- btEvent.btEventType = arg1;
- switch( arg1 )
- {
- case BT_EVENT_SYNC_CONNECTION_COMPLETE:
- case BT_EVENT_SYNC_CONNECTION_UPDATED:
- btEvent.uEventParam.btSyncConnection.connectionHandle = (v_U16_t)arg2;
- btEvent.uEventParam.btSyncConnection.status = (v_U8_t)arg3;
- break;
- case BT_EVENT_DISCONNECTION_COMPLETE:
- btEvent.uEventParam.btDisconnect.connectionHandle = (v_U16_t)arg2;
- break;
- case BT_EVENT_CREATE_ACL_CONNECTION:
- case BT_EVENT_ACL_CONNECTION_COMPLETE:
- btEvent.uEventParam.btAclConnection.connectionHandle = (v_U16_t)arg2;
- btEvent.uEventParam.btAclConnection.status = (v_U8_t)arg3;
- break;
- case BT_EVENT_MODE_CHANGED:
- btEvent.uEventParam.btAclModeChange.connectionHandle = (v_U16_t)arg2;
- break;
- default:
- break;
- }
-#ifndef WLAN_MDM_CODE_REDUCTION_OPT
- if(HAL_STATUS_SUCCESS(sme_AcquireGlobalLock( &pMac->sme )))
- {
- btcSignalBTEvent(pMac, &btEvent);
- sme_ReleaseGlobalLock( &pMac->sme );
- }
-#endif
- }
- else
- {
- smsLog(pMac, LOGE, FL(" invalid event (%d)"), arg1);
- }
- return p;
-}
+
static char* dump_csrApConcScanParams( tpAniSirGlobal pMac, tANI_U32 arg1,
tANI_U32 arg2, tANI_U32 arg3, tANI_U32 arg4, char *p )
{
@@ -123,7 +79,6 @@
static tDumpFuncEntry csrMenuDumpTable[] = {
{0, "CSR (850-860)", NULL},
{851, "CSR: CSR testing connection to AniNet", dump_csr},
- {852, "BTC: Fake BT events (event, handle)", dump_btcSetEvent},
{853, "CSR: Split Scan related params", dump_csrApConcScanParams},
};
diff --git a/drivers/staging/qcacld-2.0/CORE/SME/src/pmc/pmc.c b/drivers/staging/qcacld-2.0/CORE/SME/src/pmc/pmc.c
index 6a0ae87a..d5caa8a 100644
--- a/drivers/staging/qcacld-2.0/CORE/SME/src/pmc/pmc.c
+++ b/drivers/staging/qcacld-2.0/CORE/SME/src/pmc/pmc.c
@@ -1193,77 +1193,36 @@
else
{
pMac->pmc.uapsdSessionRequired = TRUE;
- //Check BTC state
-#ifndef WLAN_MDM_CODE_REDUCTION_OPT
- if( btcIsReadyForUapsd( pMac ) )
-#endif /* WLAN_MDM_CODE_REDUCTION_OPT*/
+
+ /* Put device in BMPS mode first. This step should NEVER fail.
+ That is why no need to buffer the UAPSD request*/
+ if(pmcEnterRequestBmpsState(hHal) != eHAL_STATUS_SUCCESS)
{
- /* Put device in BMPS mode first. This step should NEVER fail.
- That is why no need to buffer the UAPSD request*/
- if(pmcEnterRequestBmpsState(hHal) != eHAL_STATUS_SUCCESS)
- {
- pmcLog(pMac, LOGE, "PMC: Device in Full Power. Enter Request Bmps failed. "
- "UAPSD request will be dropped ");
- return eHAL_STATUS_FAILURE;
- }
+ pmcLog(pMac, LOGE, "PMC: Device in Full Power. Enter Request Bmps failed. "
+ "UAPSD request will be dropped ");
+ return eHAL_STATUS_FAILURE;
}
-#ifndef WLAN_MDM_CODE_REDUCTION_OPT
- else
- {
- (void)pmcStartTrafficTimer(hHal, pMac->pmc.bmpsConfig.trafficMeasurePeriod);
- }
-#endif /* WLAN_MDM_CODE_REDUCTION_OPT*/
}
break;
case BMPS:
- //It is already in BMPS mode, check BTC state
-#ifndef WLAN_MDM_CODE_REDUCTION_OPT
- if( btcIsReadyForUapsd(pMac) )
-#endif /* WLAN_MDM_CODE_REDUCTION_OPT*/
- {
/* Tell MAC to have device enter UAPSD mode. */
- if (pmcIssueCommand(hHal, 0, eSmeCommandEnterUapsd, NULL, 0, FALSE)
- != eHAL_STATUS_SUCCESS)
- {
- pmcLog(pMac, LOGE, "PMC: failure to send message "
- "eWNI_PMC_ENTER_BMPS_REQ");
- return eHAL_STATUS_FAILURE;
- }
- }
-#ifndef WLAN_MDM_CODE_REDUCTION_OPT
- else
+ if (pmcIssueCommand(hHal, 0, eSmeCommandEnterUapsd, NULL, 0, FALSE)
+ != eHAL_STATUS_SUCCESS)
{
- //Not ready for UAPSD at this time, save it first and wake up the chip
- pmcLog(pMac, LOGE, " PMC state = %d",pMac->pmc.pmcState);
- pMac->pmc.uapsdSessionRequired = TRUE;
- /* While BTC traffic is going on, STA can be in BMPS
- * and need not go to Full Power */
- //fFullPower = VOS_TRUE;
+ pmcLog(pMac, LOGE, "PMC: failure to send message "
+ "eWNI_PMC_ENTER_BMPS_REQ");
+ return eHAL_STATUS_FAILURE;
}
-#endif /* WLAN_MDM_CODE_REDUCTION_OPT*/
break;
case REQUEST_START_UAPSD:
-#ifndef WLAN_MDM_CODE_REDUCTION_OPT
- if( !btcIsReadyForUapsd(pMac) )
- {
- //BTC rejects UAPSD, bring it back to full power
- fFullPower = VOS_TRUE;
- }
-#endif
+
break;
case REQUEST_BMPS:
/* Buffer request for UAPSD mode. */
pMac->pmc.uapsdSessionRequired = TRUE;
-#ifndef WLAN_MDM_CODE_REDUCTION_OPT
- if( !btcIsReadyForUapsd(pMac) )
- {
- //BTC rejects UAPSD, bring it back to full power
- fFullPower = VOS_TRUE;
- }
-#endif /* WLAN_MDM_CODE_REDUCTION_OPT*/
break;
default:
diff --git a/drivers/staging/qcacld-2.0/CORE/SME/src/pmc/pmcApi.c b/drivers/staging/qcacld-2.0/CORE/SME/src/pmc/pmcApi.c
index 9d791ec..48a8079 100644
--- a/drivers/staging/qcacld-2.0/CORE/SME/src/pmc/pmcApi.c
+++ b/drivers/staging/qcacld-2.0/CORE/SME/src/pmc/pmcApi.c
@@ -2854,14 +2854,17 @@
return eHAL_STATUS_FAILURE;
}
- pRequestBuf = vos_mem_malloc(sizeof(tSirPNOScanReq));
+ pRequestBuf = vos_mem_malloc(sizeof(tSirPNOScanReq) +
+ (pRequest->num_vendor_oui) *
+ (sizeof(struct vendor_oui)));
if (NULL == pRequestBuf)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to allocate memory for PNO request", __func__);
return eHAL_STATUS_FAILED_ALLOC;
}
- vos_mem_copy(pRequestBuf, pRequest, sizeof(tSirPNOScanReq));
+ vos_mem_copy(pRequestBuf, pRequest, sizeof(tSirPNOScanReq) +
+ (pRequest->num_vendor_oui) * (sizeof(struct vendor_oui)));
/*Must translate the mode first*/
ucDot11Mode = (tANI_U8) csrTranslateToWNICfgDot11Mode(pMac,
diff --git a/drivers/staging/qcacld-2.0/CORE/SME/src/sme_common/sme_Api.c b/drivers/staging/qcacld-2.0/CORE/SME/src/sme_common/sme_Api.c
index e2b2876..a4e85e6 100644
--- a/drivers/staging/qcacld-2.0/CORE/SME/src/sme_common/sme_Api.c
+++ b/drivers/staging/qcacld-2.0/CORE/SME/src/sme_common/sme_Api.c
@@ -1471,12 +1471,6 @@
break;
}
- status = btcOpen(pMac);
- if ( ! HAL_STATUS_SUCCESS( status ) ) {
- smsLog( pMac, LOGE,
- "btcOpen open failed during initialization with status=%d", status );
- break;
- }
#endif
#ifdef FEATURE_OEM_DATA_SUPPORT
status = oemData_OemDataReqOpen(pMac);
@@ -2159,14 +2153,6 @@
smsLog( pMac, LOGE, "pmcReady failed with status=%d", status );
break;
}
-#ifndef WLAN_MDM_CODE_REDUCTION_OPT
- if(VOS_STATUS_SUCCESS != btcReady(hHal))
- {
- status = eHAL_STATUS_FAILURE;
- smsLog( pMac, LOGE, "btcReady failed");
- break;
- }
-#endif
#if defined WLAN_FEATURE_VOWIFI
if(VOS_STATUS_SUCCESS != rrmReady(hHal))
@@ -2186,14 +2172,6 @@
smsLog( pMac, LOGE, "csrReady failed with status=%d", status );
break;
}
-#ifndef WLAN_MDM_CODE_REDUCTION_OPT
- if(VOS_STATUS_SUCCESS != btcReady(hHal))
- {
- status = eHAL_STATUS_FAILURE;
- smsLog( pMac, LOGE, "btcReady failed");
- break;
- }
-#endif
#if defined WLAN_FEATURE_VOWIFI
if(VOS_STATUS_SUCCESS != rrmReady(hHal))
@@ -2831,19 +2809,6 @@
smsLog( pMac, LOGE, "Empty rsp message for meas (eWNI_SME_ACTION_FRAME_SEND_CNF), nothing to process");
}
break;
- case eWNI_SME_COEX_IND:
- MTRACE(vos_trace(VOS_MODULE_ID_SME, TRACE_CODE_SME_RX_WDA_MSG,
- NO_SESSION, pMsg->type));
- if(pMsg->bodyptr)
- {
- status = btcHandleCoexInd((void *)pMac, pMsg->bodyptr);
- vos_mem_free(pMsg->bodyptr);
- }
- else
- {
- smsLog(pMac, LOGE, "Empty rsp message for meas (eWNI_SME_COEX_IND), nothing to process");
- }
- break;
#ifdef FEATURE_WLAN_SCAN_PNO
case eWNI_SME_PREF_NETWORK_FOUND_IND:
@@ -3539,13 +3504,6 @@
#endif
#ifndef WLAN_MDM_CODE_REDUCTION_OPT
- status = btcClose(hHal);
- if ( ! HAL_STATUS_SUCCESS( status ) ) {
- smsLog( pMac, LOGE, "BTC close failed during sme close with status=%d",
- status );
- fail_status = status;
- }
-
status = sme_QosClose(pMac);
if ( ! HAL_STATUS_SUCCESS( status ) ) {
smsLog( pMac, LOGE, "Qos close failed during sme close with status=%d",
@@ -6832,94 +6790,6 @@
}
/* ---------------------------------------------------------------------------
- \fn sme_BtcSignalBtEvent
- \brief API to signal Bluetooth (BT) event to the WLAN driver. Based on the
- BT event type and the current operating mode of Libra (full power,
- BMPS, UAPSD etc), appropriate Bluetooth Coexistence (BTC) strategy
- would be employed.
- \param hHal - The handle returned by macOpen.
- \param pBtEvent - Pointer to a caller allocated object of type tSmeBtEvent
- Caller owns the memory and is responsible for freeing it.
- \return VOS_STATUS
- VOS_STATUS_E_FAILURE BT Event not passed to HAL. This can happen
- if BTC execution mode is set to BTC_WLAN_ONLY
- or BTC_PTA_ONLY.
- VOS_STATUS_SUCCESS BT Event passed to HAL
- ---------------------------------------------------------------------------*/
-VOS_STATUS sme_BtcSignalBtEvent (tHalHandle hHal, tpSmeBtEvent pBtEvent)
-{
- VOS_STATUS status = VOS_STATUS_E_FAILURE;
-
-#ifndef WLAN_MDM_CODE_REDUCTION_OPT
- tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
-
- MTRACE(vos_trace(VOS_MODULE_ID_SME,
- TRACE_CODE_SME_RX_HDD_BTC_SIGNALEVENT, NO_SESSION, 0));
- if ( eHAL_STATUS_SUCCESS == sme_AcquireGlobalLock( &pMac->sme ) )
- {
- status = btcSignalBTEvent (hHal, pBtEvent);
- sme_ReleaseGlobalLock( &pMac->sme );
- }
-#endif
- return (status);
-}
-
-/* ---------------------------------------------------------------------------
- \fn sme_BtcSetConfig
- \brief API to change the current Bluetooth Coexistence (BTC) configuration
- This function should be invoked only after CFG download has completed.
- Calling it after sme_HDDReadyInd is recommended.
- \param hHal - The handle returned by macOpen.
- \param pSmeBtcConfig - Pointer to a caller allocated object of type tSmeBtcConfig.
- Caller owns the memory and is responsible for freeing it.
- \return VOS_STATUS
- VOS_STATUS_E_FAILURE Config not passed to HAL.
- VOS_STATUS_SUCCESS Config passed to HAL
- ---------------------------------------------------------------------------*/
-VOS_STATUS sme_BtcSetConfig (tHalHandle hHal, tpSmeBtcConfig pSmeBtcConfig)
-{
- VOS_STATUS status = VOS_STATUS_E_FAILURE;
-#ifndef WLAN_MDM_CODE_REDUCTION_OPT
- tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
- MTRACE(vos_trace(VOS_MODULE_ID_SME,
- TRACE_CODE_SME_RX_HDD_BTC_SETCONFIG, NO_SESSION, 0));
- if ( eHAL_STATUS_SUCCESS == sme_AcquireGlobalLock( &pMac->sme ) )
- {
- status = btcSetConfig (hHal, pSmeBtcConfig);
- sme_ReleaseGlobalLock( &pMac->sme );
- }
-#endif
- return (status);
-}
-
-/* ---------------------------------------------------------------------------
- \fn sme_BtcGetConfig
- \brief API to retrieve the current Bluetooth Coexistence (BTC) configuration
- \param hHal - The handle returned by macOpen.
- \param pSmeBtcConfig - Pointer to a caller allocated object of type
- tSmeBtcConfig. Caller owns the memory and is responsible
- for freeing it.
- \return VOS_STATUS
- VOS_STATUS_E_FAILURE - failure
- VOS_STATUS_SUCCESS success
- ---------------------------------------------------------------------------*/
-VOS_STATUS sme_BtcGetConfig (tHalHandle hHal, tpSmeBtcConfig pSmeBtcConfig)
-{
- VOS_STATUS status = VOS_STATUS_E_FAILURE;
-#ifndef WLAN_MDM_CODE_REDUCTION_OPT
- tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
-
- MTRACE(vos_trace(VOS_MODULE_ID_SME,
- TRACE_CODE_SME_RX_HDD_BTC_GETCONFIG, NO_SESSION, 0));
- if ( eHAL_STATUS_SUCCESS == sme_AcquireGlobalLock( &pMac->sme ) )
- {
- status = btcGetConfig (hHal, pSmeBtcConfig);
- sme_ReleaseGlobalLock( &pMac->sme );
- }
-#endif
- return (status);
-}
-/* ---------------------------------------------------------------------------
\fn sme_SetCfgPrivacy
\brief API to set configure privacy parameters
\param hHal - The handle returned by macOpen.
diff --git a/drivers/staging/qcacld-2.0/CORE/SVC/inc/wlan_btc_svc.h b/drivers/staging/qcacld-2.0/CORE/SVC/inc/wlan_btc_svc.h
deleted file mode 100644
index 7eee2de..0000000
--- a/drivers/staging/qcacld-2.0/CORE/SVC/inc/wlan_btc_svc.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2012 The Linux Foundation. All rights reserved.
- *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
- * This file was originally distributed by Qualcomm Atheros, Inc.
- * under proprietary terms before Copyright ownership was assigned
- * to the Linux Foundation.
- */
-
-/******************************************************************************
- * wlan_btc_svc.h
- *
- ******************************************************************************/
-
-#ifndef WLAN_BTC_SVC_H
-#define WLAN_BTC_SVC_H
-
-void send_btc_nlink_msg (int type, int dest_pid);
-int btc_activate_service(void *pAdapter);
-
-#endif
diff --git a/drivers/staging/qcacld-2.0/CORE/SVC/inc/wlan_nlink_srv.h b/drivers/staging/qcacld-2.0/CORE/SVC/inc/wlan_nlink_srv.h
index b84eb67..051162f 100644
--- a/drivers/staging/qcacld-2.0/CORE/SVC/inc/wlan_nlink_srv.h
+++ b/drivers/staging/qcacld-2.0/CORE/SVC/inc/wlan_nlink_srv.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -66,8 +66,15 @@
#endif /* WLAN_KD_READY_NOTIFIER */
int nl_srv_register(tWlanNlModTypes msg_type, nl_srv_msg_callback msg_handler);
int nl_srv_unregister(tWlanNlModTypes msg_type, nl_srv_msg_callback msg_handler);
-int nl_srv_ucast(struct sk_buff * skb, int dst_pid, int flag);
-int nl_srv_bcast(struct sk_buff * skb);
+
+#ifdef CNSS_GENL
+int nl_srv_ucast(struct sk_buff *skb, int dst_pid, int flag,
+ int app_id, int mcgroup_id);
+int nl_srv_bcast(struct sk_buff *skb, int mcgroup_id, int app_id);
+#else
+int nl_srv_ucast(struct sk_buff *skb, int dst_pid, int flag);
+int nl_srv_bcast(struct sk_buff *skb);
+#endif
int nl_srv_is_initialized(void);
#else
diff --git a/drivers/staging/qcacld-2.0/CORE/SVC/inc/wlan_ptt_sock_svc.h b/drivers/staging/qcacld-2.0/CORE/SVC/inc/wlan_ptt_sock_svc.h
index 9a2c718..a4e7148 100644
--- a/drivers/staging/qcacld-2.0/CORE/SVC/inc/wlan_ptt_sock_svc.h
+++ b/drivers/staging/qcacld-2.0/CORE/SVC/inc/wlan_ptt_sock_svc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -93,7 +93,7 @@
* Length : 4 bytes [LEN_PAYLOAD]
* Payload : LEN_PAYLOAD bytes
*/
-int ptt_sock_activate_svc(void *pAdapter);
+int ptt_sock_activate_svc(void *hdd_ctx);
int ptt_sock_send_msg_to_app(tAniHdr *wmsg, int radio, int src_mod, int pid);
/*
@@ -110,6 +110,19 @@
tAniNlModTypes type; // module id
int pid; // process id
} tAniNlAppRegReq;
+
+/**
+ * struct sptt_app_reg_req - PTT register request structure
+ * @radio: Radio ID
+ * @wmsg: ANI header
+ *
+ * payload structure received as nl data from PTT app/user space
+ */
+typedef struct sptt_app_reg_req {
+ int radio;
+ tAniHdr wmsg;
+} ptt_app_reg_req;
+
typedef struct sAniNlAppRegRsp {
tAniHdr wniHdr; // Generic WNI msg header
tAniNlAppRegReq regReq; // The original request msg
diff --git a/drivers/staging/qcacld-2.0/CORE/SVC/src/btc/wlan_btc_svc.c b/drivers/staging/qcacld-2.0/CORE/SVC/src/btc/wlan_btc_svc.c
deleted file mode 100644
index 7d4aeaf..0000000
--- a/drivers/staging/qcacld-2.0/CORE/SVC/src/btc/wlan_btc_svc.c
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
- * Copyright (c) 2013 The Linux Foundation. All rights reserved.
- *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
- * This file was originally distributed by Qualcomm Atheros, Inc.
- * under proprietary terms before Copyright ownership was assigned
- * to the Linux Foundation.
- */
-
-
-/******************************************************************************
- * wlan_btc_svc.c
- *
- ******************************************************************************/
-#include <wlan_nlink_srv.h>
-#include <wlan_btc_svc.h>
-#include <halTypes.h>
-#include <vos_status.h>
-#include <btcApi.h>
-#include <wlan_hdd_includes.h>
-#include <vos_trace.h>
-// Global variables
-static struct hdd_context_s *pHddCtx;
-
-static int gWiFiChannel; /* WiFi associated channel 1-13, or 0 (none) */
-static int gAmpChannel; /* AMP associated channel 1-13, or 0 (none) */
-static int gBtcDriverMode = WLAN_HDD_INFRA_STATION; /* Driver mode in BTC */
-
-
-// Forward declrarion
-static int btc_msg_callback (struct sk_buff * skb);
-/*
- * Send a netlink message to the user space.
- * Destination pid as zero implies broadcast
- */
-void send_btc_nlink_msg (int type, int dest_pid)
-{
- struct sk_buff *skb;
- struct nlmsghdr *nlh;
- tAniMsgHdr *aniHdr;
- tWlanAssocData *assocData;
- skb = alloc_skb(NLMSG_SPACE(WLAN_NL_MAX_PAYLOAD), GFP_KERNEL);
- if(skb == NULL) {
- VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
- "BTC: alloc_skb failed\n");
- return;
- }
- nlh = (struct nlmsghdr *)skb->data;
- nlh->nlmsg_pid = 0; /* from kernel */
- nlh->nlmsg_flags = 0;
- nlh->nlmsg_seq = 0;
- nlh->nlmsg_type = WLAN_NL_MSG_BTC;
- aniHdr = NLMSG_DATA(nlh);
- aniHdr->type = type;
-
- /* Set BTC driver mode correctly based on received events type */
- if(type == WLAN_BTC_SOFTAP_BSS_START)
- {
- /* Event is SoftAP BSS Start set BTC driver mode to SoftAP */
- gBtcDriverMode = WLAN_HDD_SOFTAP;
- }
- if(type == WLAN_STA_ASSOC_DONE_IND)
- {
- /* Event is STA Assoc done set BTC driver mode to INFRA STA*/
- gBtcDriverMode = WLAN_HDD_INFRA_STATION;
- }
-
- switch( type )
- {
- case WLAN_STA_DISASSOC_DONE_IND:
- VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_LOW,
- "WiFi unassociated; gAmpChannel %d gWiFiChannel %d", gAmpChannel, gWiFiChannel);
-
- /* If AMP is using a channel (non-zero), no message sent.
- Or, if WiFi wasn't using a channel before, no message sent.
- Logic presumes same channel has to be used for WiFi and AMP if both are active.
- In any case, track the WiFi channel in use (none) */
- if((gAmpChannel != 0) || (gWiFiChannel == 0))
- {
- VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_LOW,
- "No msg for AFH will be sent");
- gWiFiChannel = 0;
- kfree_skb(skb);
- return;
- }
- gWiFiChannel = 0;
-
- /* No Break: Fall into next cases */
-
- case WLAN_MODULE_UP_IND:
- case WLAN_MODULE_DOWN_IND:
- aniHdr->length = 0;
- nlh->nlmsg_len = NLMSG_LENGTH((sizeof(tAniMsgHdr)));
- skb_put(skb, NLMSG_SPACE(sizeof(tAniMsgHdr)));
- break;
- case WLAN_BTC_SOFTAP_BSS_START:
- case WLAN_BTC_QUERY_STATE_RSP:
- case WLAN_STA_ASSOC_DONE_IND:
- aniHdr->length = sizeof(tWlanAssocData);
- nlh->nlmsg_len = NLMSG_LENGTH((sizeof(tAniMsgHdr) + sizeof(tWlanAssocData)));
- assocData = ( tWlanAssocData *)((char*)aniHdr + sizeof(tAniMsgHdr));
-
- assocData->channel = hdd_get_operating_channel( pHddCtx, gBtcDriverMode );
-
- VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_LOW,
- "New WiFi channel %d gAmpChannel %d gWiFiChannel %d",
- assocData->channel, gAmpChannel, gWiFiChannel);
-
- /* If WiFi has finished associating */
- if(type == WLAN_STA_ASSOC_DONE_IND)
- {
- /* If AMP is using a channel (non-zero), no message sent.
- Or, if the WiFi channel did not change, no message sent.
- Logic presumes same channel has to be used for WiFi and AMP if both are active.
- In any case, track the WiFi channel in use (1-13 or none, in assocData->channel) */
- if((gAmpChannel != 0) || (assocData->channel == gWiFiChannel))
- {
- VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_LOW,
- "No msg for AFH will be sent");
- gWiFiChannel = assocData->channel;
- kfree_skb(skb);
- return;
- }
- }
- if(type == WLAN_BTC_SOFTAP_BSS_START)
- {
- /*Replace WLAN_BTC_SOFTAP_BSS_START by WLAN_STA_ASSOC_DONE_IND*/
- aniHdr->type = WLAN_STA_ASSOC_DONE_IND;
- }
- gWiFiChannel = assocData->channel;
- skb_put(skb, NLMSG_SPACE((sizeof(tAniMsgHdr)+ sizeof(tWlanAssocData))));
- break;
-
- case WLAN_AMP_ASSOC_DONE_IND:
-
- /* This is an overloaded type. It means that AMP is connected (dest_pid is channel 1-13),
- or it means AMP is now disconnected (dest_pid is 0) */
-
- VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_LOW,
- "New AMP channel %d gAmpChannel %d gWiFiChannel %d", dest_pid, gAmpChannel, gWiFiChannel);
- /* If WiFi is using a channel (non-zero), no message sent.
- Or, if the AMP channel did not change, no message sent.
- Logic presumes same channel has to be used for WiFi and AMP if both are active.
- In any case, track the AMP channel in use (1-13 or none, in dest_pid) */
- if((gWiFiChannel != 0) || (dest_pid == gAmpChannel))
- {
- VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_LOW,
- "No msg for AFH will be sent");
- gAmpChannel = dest_pid;
- kfree_skb(skb);
- return;
- }
-
- gAmpChannel = dest_pid;
-
- /* Fix overloaded parameters and finish message formatting */
- if(dest_pid != 0)
- {
- aniHdr->type = WLAN_STA_ASSOC_DONE_IND;
- aniHdr->length = sizeof(tWlanAssocData);
- nlh->nlmsg_len = NLMSG_LENGTH((sizeof(tAniMsgHdr) + sizeof(tWlanAssocData)));
- assocData = ( tWlanAssocData *)((char*)aniHdr + sizeof(tAniMsgHdr));
- assocData->channel = dest_pid;
- skb_put(skb, NLMSG_SPACE((sizeof(tAniMsgHdr)+ sizeof(tWlanAssocData))));
- }
- else
- {
- aniHdr->type = WLAN_STA_DISASSOC_DONE_IND;
- aniHdr->length = 0;
- nlh->nlmsg_len = NLMSG_LENGTH((sizeof(tAniMsgHdr)));
- skb_put(skb, NLMSG_SPACE(sizeof(tAniMsgHdr)));
- }
- dest_pid = 0;
- break;
-
- default:
- VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
- "BTC: Attempt to send unknown nlink message %d\n", type);
- kfree_skb(skb);
- return;
- }
- if(dest_pid == 0)
- (void)nl_srv_bcast(skb);
- else
- (void)nl_srv_ucast(skb, dest_pid, MSG_DONTWAIT);
-}
-/*
- * Activate BTC handler. This will register a handler to receive
- * netlink messages addressed to WLAN_NL_MSG_BTC from user space
- */
-int btc_activate_service(void *pAdapter)
-{
- pHddCtx = (struct hdd_context_s*)pAdapter;
-
- //Register the msg handler for msgs addressed to ANI_NL_MSG_BTC
- nl_srv_register(WLAN_NL_MSG_BTC, btc_msg_callback);
- return 0;
-}
-/*
- * Callback function invoked by Netlink service for all netlink
- * messages (from user space) addressed to WLAN_NL_MSG_BTC
- */
-int btc_msg_callback (struct sk_buff * skb)
-{
- struct nlmsghdr *nlh;
- tAniMsgHdr *msg_hdr;
- tSmeBtEvent *btEvent = NULL;
- nlh = (struct nlmsghdr *)skb->data;
- msg_hdr = NLMSG_DATA(nlh);
-
- /* Continue with parsing payload. */
- switch(msg_hdr->type)
- {
- case WLAN_BTC_QUERY_STATE_REQ:
- VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
- "BTC: Received probe from BTC Service\n");
- send_btc_nlink_msg(WLAN_BTC_QUERY_STATE_RSP, nlh->nlmsg_pid);
- break;
- case WLAN_BTC_BT_EVENT_IND:
- VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
- "BTC: Received Bluetooth event indication\n");
- if(msg_hdr->length != sizeof(tSmeBtEvent)) {
- VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
- "BTC: Size mismatch in BT event data\n");
- break;
- }
- btEvent = (tSmeBtEvent*)((char*)msg_hdr + sizeof(tAniMsgHdr));
- (void)sme_BtcSignalBtEvent(pHddCtx->hHal, btEvent);
- break;
- default:
- VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
- "BTC: Received Invalid Msg type [%d]\n", msg_hdr->type);
- break;
- }
- return 0;
-}
diff --git a/drivers/staging/qcacld-2.0/CORE/SVC/src/logging/wlan_logging_sock_svc.c b/drivers/staging/qcacld-2.0/CORE/SVC/src/logging/wlan_logging_sock_svc.c
index e16de3b..e88ee62 100644
--- a/drivers/staging/qcacld-2.0/CORE/SVC/src/logging/wlan_logging_sock_svc.c
+++ b/drivers/staging/qcacld-2.0/CORE/SVC/src/logging/wlan_logging_sock_svc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -48,6 +48,9 @@
#include "limApi.h"
#include "ol_txrx_api.h"
#include "csrApi.h"
+#ifdef CNSS_GENL
+#include <net/cnss_nl.h>
+#endif
#define MAX_NUM_PKT_LOG 32
@@ -81,7 +84,8 @@
#define ANI_NL_MSG_LOG_TYPE 89
#define ANI_NL_MSG_READY_IND_TYPE 90
-#define MAX_LOGMSG_LENGTH 4096
+#define MAX_LOGMSG_LENGTH 2048
+#define MAX_SKBMSG_LENGTH 4096
#define MAX_PKTSTATS_LENGTH 2048
#define MAX_PKTSTATS_BUFF 16
@@ -177,6 +181,7 @@
/* PID of the APP to log the message */
static int gapp_pid = INVALID_PID;
+#ifndef CNSS_GENL
/* Utility function to send a netlink message to an application
* in user space
*/
@@ -230,6 +235,7 @@
return err;
}
+#endif
/**
* is_data_path_module() - To check for a Datapath module
@@ -508,6 +514,42 @@
}
/**
+ * nl_srv_bcast_diag() - Wrapper to send bcast msgs to diag events mcast grp
+ * @skb: sk buffer pointer
+ *
+ * Sends the bcast message to diag events multicast group with generic nl socket
+ * if CNSS_GENL is enabled. Else, use the legacy netlink socket to send.
+ *
+ * Return: zero on success, error code otherwise
+ */
+static int nl_srv_bcast_diag(struct sk_buff *skb)
+{
+#ifdef CNSS_GENL
+ return nl_srv_bcast(skb, CLD80211_MCGRP_DIAG_EVENTS, ANI_NL_MSG_PUMAC);
+#else
+ return nl_srv_bcast(skb);
+#endif
+}
+
+/**
+ * nl_srv_bcast_host_logs() - Wrapper to send bcast msgs to host logs mcast grp
+ * @skb: sk buffer pointer
+ *
+ * Sends the bcast message to host logs multicast group with generic nl socket
+ * if CNSS_GENL is enabled. Else, use the legacy netlink socket to send.
+ *
+ * Return: zero on success, error code otherwise
+ */
+static int nl_srv_bcast_host_logs(struct sk_buff *skb)
+{
+#ifdef CNSS_GENL
+ return nl_srv_bcast(skb, CLD80211_MCGRP_HOST_LOGS, ANI_NL_MSG_LOG);
+#else
+ return nl_srv_bcast(skb);
+#endif
+}
+
+/**
* pktlog_send_per_pkt_stats_to_user() - This function is used to send the per
* packet statistics to the user
*
@@ -527,11 +569,11 @@
while (!list_empty(&gwlan_logging.pkt_stat_filled_list)
&& !gwlan_logging.exit) {
- skb_new = dev_alloc_skb(MAX_PKTSTATS_LENGTH);
+ skb_new = dev_alloc_skb(MAX_SKBMSG_LENGTH);
if (skb_new == NULL) {
if (!rate_limit) {
pr_err("%s: dev_alloc_skb() failed for msg size[%d] drop count = %u\n",
- __func__, MAX_LOGMSG_LENGTH,
+ __func__, MAX_SKBMSG_LENGTH,
gwlan_logging.drop_count);
}
rate_limit = 1;
@@ -552,7 +594,8 @@
free_old_skb = true;
goto err;
}
- ret = nl_srv_bcast(pstats_msg->skb);
+
+ ret = nl_srv_bcast_diag(pstats_msg->skb);
if (ret < 0) {
pr_info("%s: Send Failed %d drop_count = %u\n",
__func__, ret,
@@ -650,7 +693,7 @@
&gwlan_logging.free_list);
spin_unlock_irqrestore(&gwlan_logging.spin_lock, flags);
- ret = nl_srv_bcast(skb);
+ ret = nl_srv_bcast_host_logs(skb);
/* print every 64th drop count */
if (ret < 0 && (!(gwlan_logging.drop_count % 0x40))) {
pr_err("%s: Send Failed %d drop_count = %u\n",
@@ -812,6 +855,30 @@
return 0;
}
+#ifdef CNSS_GENL
+/**
+ * register_logging_sock_handler() - Logging sock handler registration
+ *
+ * Dummy API to register the command handler for logger socket app.
+ *
+ * Return: None
+ */
+static void register_logging_sock_handler(void)
+{
+}
+
+/**
+ * unregister_logging_sock_handler() - Logging sock handler unregistration
+ *
+ * Dummy API to unregister the command handler for logger socket app.
+ *
+ * Return: None
+ */
+static void unregister_logging_sock_handler(void)
+{
+}
+
+#else
/*
* Process all the Netlink messages from Logger Socket app in user space
*/
@@ -870,6 +937,33 @@
return ret;
}
+/**
+ * register_logging_sock_handler() - Logging sock handler registration
+ *
+ * API to register the command handler for logger socket app. Registers
+ * legacy handler
+ *
+ * Return: None
+ */
+static void register_logging_sock_handler(void)
+{
+ nl_srv_register(ANI_NL_MSG_LOG, wlan_logging_proc_sock_rx_msg);
+}
+
+/**
+ * unregister_logging_sock_handler() - Logging sock handler unregistration
+ *
+ * API to unregister the command handler for logger socket app. Unregisters
+ * legacy handler
+ *
+ * Return: None
+ */
+static void unregister_logging_sock_handler(void)
+{
+ nl_srv_unregister(ANI_NL_MSG_LOG, wlan_logging_proc_sock_rx_msg);
+}
+#endif
+
int wlan_logging_sock_activate_svc(int log_fe_to_console, int num_buf)
{
int i, j, pkt_stats_size;
@@ -962,7 +1056,7 @@
gwlan_logging.is_active = true;
gwlan_logging.is_flush_complete = false;
- nl_srv_register(ANI_NL_MSG_LOG, wlan_logging_proc_sock_rx_msg);
+ register_logging_sock_handler();
pr_info("%s: Activated wlan_logging svc\n", __func__);
return 0;
@@ -994,7 +1088,7 @@
if (!gplog_msg)
return 0;
- nl_srv_unregister(ANI_NL_MSG_LOG, wlan_logging_proc_sock_rx_msg);
+ unregister_logging_sock_handler();
clear_default_logtoapp_log_level();
gapp_pid = INVALID_PID;
diff --git a/drivers/staging/qcacld-2.0/CORE/SVC/src/nlink/wlan_nlink_srv.c b/drivers/staging/qcacld-2.0/CORE/SVC/src/nlink/wlan_nlink_srv.c
index 8512bf0..60b03b3 100644
--- a/drivers/staging/qcacld-2.0/CORE/SVC/src/nlink/wlan_nlink_srv.c
+++ b/drivers/staging/qcacld-2.0/CORE/SVC/src/nlink/wlan_nlink_srv.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -52,6 +52,13 @@
#include <wlan_nlink_srv.h>
#include <vos_trace.h>
+#ifdef CNSS_GENL
+#include <vos_memory.h>
+#include <wlan_nlink_common.h>
+#include <net/genetlink.h>
+#include <net/cnss_nl.h>
+#endif
+
/* Global variables */
static DEFINE_MUTEX(nl_srv_sem);
static struct sock *nl_srv_sock;
@@ -155,6 +162,195 @@
return retcode;
}
+#ifdef CNSS_GENL
+
+/**
+ * nl80211hdr_put() - API to fill genlmsg header
+ * @skb: Sk buffer
+ * @portid: Port ID
+ * @seq: Sequence number
+ * @flags: Flags
+ * @cmd: Command id
+ *
+ * API to fill genl message header for brodcast events to user space
+ *
+ * Return: Pointer to user specific header/payload
+ */
+static inline void *nl80211hdr_put(struct sk_buff *skb, uint32_t portid,
+ uint32_t seq, int flags, uint8_t cmd)
+{
+ struct genl_family *cld80211_fam = cld80211_get_genl_family();
+
+ return genlmsg_put(skb, portid, seq, cld80211_fam, flags, cmd);
+}
+
+/**
+ * cld80211_fill_data() - API to fill payload to nl message
+ * @msg: Sk buffer
+ * @portid: Port ID
+ * @seq: Sequence number
+ * @flags: Flags
+ * @cmd: Command ID
+ * @buf: data buffer/payload to be filled
+ * @len: length of the payload ie. @buf
+ *
+ * API to fill the payload/data of the nl message to be sent
+ *
+ * Return: zero on success
+ */
+static int cld80211_fill_data(struct sk_buff *msg, uint32_t portid,
+ uint32_t seq, int flags, uint8_t cmd,
+ uint8_t *buf, int len)
+{
+ void *hdr;
+ struct nlattr *nest;
+
+ hdr = nl80211hdr_put(msg, portid, seq, flags, cmd);
+ if (!hdr) {
+ VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
+ "nl80211 hdr put failed");
+ return -EPERM;
+ }
+
+ nest = nla_nest_start(msg, CLD80211_ATTR_VENDOR_DATA);
+ if (!nest) {
+ VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
+ "nla_nest_start failed");
+ goto nla_put_failure;
+ }
+
+ if (nla_put(msg, CLD80211_ATTR_DATA, len, buf)) {
+ VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
+ "nla_put failed");
+ goto nla_put_failure;
+ }
+
+ nla_nest_end(msg, nest);
+ genlmsg_end(msg, hdr);
+
+ return 0;
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EPERM;
+}
+
+/**
+ * send_msg_to_cld80211() - API to send message to user space Application
+ * @cld_mcgroup_id: Multicast group ID
+ * @pid: Port ID
+ * @app_id: Application ID
+ * @buf: Data/payload buffer to be sent
+ * @len: Length of the data ie. @buf
+ *
+ * API to send the nl message to user space application.
+ *
+ * Return: zero on success
+ */
+static int send_msg_to_cld80211(enum cld80211_multicast_groups cld_mcgroup_id,
+ int pid, int app_id, uint8_t *buf, int len)
+{
+ struct sk_buff *msg;
+ int status;
+ int flags = GFP_KERNEL;
+ int mcgroup_id;
+
+ if (in_interrupt() || irqs_disabled() || in_atomic())
+ flags = GFP_ATOMIC;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, flags);
+ if (!msg) {
+ VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
+ "nlmsg malloc fails");
+ return -EPERM;
+ }
+
+ status = cld80211_fill_data(msg, pid, 0, 0, app_id, buf, len);
+ if (status) {
+ nlmsg_free(msg);
+ return -EPERM;
+ }
+ mcgroup_id = cld80211_get_mcgrp_id(cld_mcgroup_id);
+ if (mcgroup_id == -1) {
+ nlmsg_free(msg);
+ return -EINVAL;
+ }
+ genlmsg_multicast_netns(&init_net, msg, 0, mcgroup_id, flags);
+ return 0;
+}
+
+/**
+ * nl_srv_bcast() - wrapper function to do broadcast events to user space apps
+ * @skb: the socket buffer to send
+ * @mcgroup_id: multicast group id
+ * @app_id: application id
+ *
+ * This function is common wrapper to send broadcast events to different
+ * user space applications.
+ *
+ * return: none
+ */
+int nl_srv_bcast(struct sk_buff *skb, int mcgroup_id, int app_id)
+{
+ struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
+ void *msg = NLMSG_DATA(nlh);
+ uint32_t msg_len = nlmsg_len(nlh);
+ uint8_t *tempbuf;
+ int status;
+
+ tempbuf = (uint8_t *)vos_mem_malloc(msg_len);
+ vos_mem_copy(tempbuf, msg, msg_len);
+ status = send_msg_to_cld80211(mcgroup_id, 0, app_id, tempbuf, msg_len);
+ if (status) {
+ VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
+ "send msg to cld80211 fails for app id %d", app_id);
+ return -EPERM;
+ }
+
+ dev_kfree_skb(skb);
+ vos_mem_free(tempbuf);
+ return 0;
+}
+
+/**
+ * nl_srv_ucast() - wrapper function to do unicast events to user space apps
+ * @skb: the socket buffer to send
+ * @dst_pid: destination process IF
+ * @flag: flags
+ * @app_id: application id
+ * @mcgroup_id: Multicast group ID
+ *
+ * This function is common wrapper to send unicast events to different
+ * user space applications. This internally used broadcast API with multicast
+ * group mcgrp_id. This wrapper serves as a common API in both
+ * new generic netlink infra and legacy implementation.
+ *
+ * return: zero on success, error code otherwise
+ */
+int nl_srv_ucast(struct sk_buff *skb, int dst_pid, int flag,
+ int app_id, int mcgroup_id)
+{
+ struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
+ void *msg = NLMSG_DATA(nlh);
+ uint32_t msg_len = nlmsg_len(nlh);
+ uint8_t *tempbuf;
+ int status;
+
+ tempbuf = (uint8_t *)vos_mem_malloc(msg_len);
+ vos_mem_copy(tempbuf, msg, msg_len);
+ status = send_msg_to_cld80211(mcgroup_id, dst_pid, app_id,
+ tempbuf, msg_len);
+ if (status) {
+ VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
+ "send msg to cld80211 fails for app id %d", app_id);
+ return -EPERM;
+ }
+
+ dev_kfree_skb(skb);
+ vos_mem_free(tempbuf);
+ return 0;
+}
+
+#else
/*
* Unicast the message to the process in user space identfied
* by the dst-pid
@@ -183,10 +379,6 @@
return err;
}
-/*
- * Broadcast the message. Broadcast will return an error if
- * there are no listeners
- */
int nl_srv_bcast(struct sk_buff *skb)
{
int err = 0;
@@ -215,6 +407,8 @@
return err;
}
+#endif
+
/*
* Processes the Netlink socket input queue.
* Dequeue skb's from the socket input queue and process
diff --git a/drivers/staging/qcacld-2.0/CORE/SVC/src/ptt/wlan_ptt_sock_svc.c b/drivers/staging/qcacld-2.0/CORE/SVC/src/ptt/wlan_ptt_sock_svc.c
index 1338b8f..ec86254 100644
--- a/drivers/staging/qcacld-2.0/CORE/SVC/src/ptt/wlan_ptt_sock_svc.c
+++ b/drivers/staging/qcacld-2.0/CORE/SVC/src/ptt/wlan_ptt_sock_svc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -40,6 +40,12 @@
#include <vos_types.h>
#include <vos_trace.h>
#include <wlan_hdd_ftm.h>
+#ifdef CNSS_GENL
+#include <net/cnss_nl.h>
+#else
+
+static struct hdd_context_s *hdd_ctx_handle;
+#endif
#define PTT_SOCK_DEBUG
#ifdef PTT_SOCK_DEBUG
@@ -48,7 +54,6 @@
#define PTT_TRACE(level, args...)
#endif
// Global variables
-static struct hdd_context_s *pAdapterHandle;
#ifdef PTT_SOCK_DEBUG_VERBOSE
//Utility function to perform a hex dump
@@ -64,6 +69,46 @@
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,"\n");
}
#endif
+
+/**
+ * nl_srv_ucast_ptt() - Wrapper function to send ucast msgs to PTT
+ * @skb: sk buffer pointer
+ * @dst_pid: Destination PID
+ * @flag: flags
+ *
+ * Sends the ucast message to PTT with generic nl socket if CNSS_GENL
+ * is enabled. Else, use the legacy netlink socket to send.
+ *
+ * Return: zero on success, error code otherwise
+ */
+static int nl_srv_ucast_ptt(struct sk_buff *skb, int dst_pid, int flag)
+{
+#ifdef CNSS_GENL
+ return nl_srv_ucast(skb, dst_pid, flag, ANI_NL_MSG_PUMAC,
+ CLD80211_MCGRP_DIAG_EVENTS);
+#else
+ return nl_srv_ucast(skb, dst_pid, flag);
+#endif
+}
+
+/**
+ * nl_srv_bcast_ptt() - Wrapper function to send bcast msgs to DIAG mcast group
+ * @skb: sk buffer pointer
+ *
+ * Sends the bcast message to DIAG multicast group with generic nl socket
+ * if CNSS_GENL is enabled. Else, use the legacy netlink socket to send.
+ *
+ * Return: zero on success, error code otherwise
+ */
+static int nl_srv_bcast_ptt(struct sk_buff *skb)
+{
+#ifdef CNSS_GENL
+ return nl_srv_bcast(skb, CLD80211_MCGRP_DIAG_EVENTS, ANI_NL_MSG_PUMAC);
+#else
+ return nl_srv_bcast(skb);
+#endif
+}
+
//Utility function to send a netlink message to an application in user space
int ptt_sock_send_msg_to_app(tAniHdr *wmsg, int radio, int src_mod, int pid)
{
@@ -102,9 +147,9 @@
#endif
if (pid != -1) {
- err = nl_srv_ucast(skb, pid, MSG_DONTWAIT);
+ err = nl_srv_ucast_ptt(skb, pid, MSG_DONTWAIT);
} else {
- err = nl_srv_bcast(skb);
+ err = nl_srv_bcast_ptt(skb);
}
if (err) {
PTT_TRACE(VOS_TRACE_LEVEL_INFO,
@@ -113,6 +158,8 @@
}
return err;
}
+
+#ifndef CNSS_GENL
/*
* Process tregisteration request and send registration response messages
* to the PTT Socket App in user space
@@ -128,7 +175,7 @@
rspmsg.regReq.type = reg_req->type;
/* Save the pid */
- pAdapterHandle->ptt_pid = reg_req->pid;
+ hdd_ctx_handle->ptt_pid = reg_req->pid;
rspmsg.regReq.pid= reg_req->pid;
rspmsg.wniHdr.type = cpu_to_be16(ANI_MSG_APP_REG_RSP);
rspmsg.wniHdr.length = cpu_to_be16(sizeof(rspmsg));
@@ -139,6 +186,7 @@
__func__, reg_req->pid);
}
}
+
/*
* Process all the messages from the PTT Socket App in user space
*/
@@ -181,12 +229,72 @@
}
return 0;
}
-int ptt_sock_activate_svc(void *pAdapter)
+#endif
+
+#ifdef CNSS_GENL
+/**
+ * ptt_cmd_handler() - Handler function for PTT commands
+ * @data: Data to be parsed
+ * @data_len: Length of the data received
+ * @ctx: Registered context reference
+ * @pid: Process id of the user space application
+ *
+ * This function handles the command from PTT user space application
+ *
+ * Return: None
+ */
+static void ptt_cmd_handler(const void *data, int data_len, void *ctx, int pid)
{
- pAdapterHandle = (struct hdd_context_s*)pAdapter;
- pAdapterHandle->ptt_pid = INVALID_PID;
+ ptt_app_reg_req *payload;
+ struct nlattr *tb[CLD80211_ATTR_MAX + 1];
+
+ if (nla_parse(tb, CLD80211_ATTR_MAX, data, data_len, NULL)) {
+ PTT_TRACE(VOS_TRACE_LEVEL_ERROR, "Invalid ATTR");
+ return;
+ }
+
+ if (!tb[CLD80211_ATTR_DATA]) {
+ PTT_TRACE(VOS_TRACE_LEVEL_ERROR, "attr ATTR_DATA failed");
+ return;
+ }
+
+ payload = (ptt_app_reg_req *)(nla_data(tb[CLD80211_ATTR_DATA]));
+ switch (payload->wmsg.type) {
+ case ANI_MSG_APP_REG_REQ:
+ ptt_sock_send_msg_to_app(&payload->wmsg, payload->radio,
+ ANI_NL_MSG_PUMAC, pid);
+ break;
+ default:
+ PTT_TRACE(VOS_TRACE_LEVEL_ERROR, "Unknown msg type %d",
+ payload->wmsg.type);
+ break;
+ }
+}
+
+/**
+ * ptt_sock_activate_svc() - API to register PTT/PUMAC command handler
+ * @pAdapter: Pointer to adapter
+ *
+ * API to register the PTT/PUMAC command handlers. Argument @pAdapter
+ * is sent for prototype compatibility between new genl and legacy
+ * implementation
+ *
+ * Return: 0
+ */
+int ptt_sock_activate_svc(void *hdd_ctx)
+{
+ register_cld_cmd_cb(ANI_NL_MSG_PUMAC, ptt_cmd_handler, NULL);
+ register_cld_cmd_cb(ANI_NL_MSG_PTT, ptt_cmd_handler, NULL);
+ return 0;
+}
+#else
+int ptt_sock_activate_svc(void *hdd_ctx)
+{
+ hdd_ctx_handle = (struct hdd_context_s *)hdd_ctx;
+ hdd_ctx_handle->ptt_pid = INVALID_PID;
nl_srv_register(ANI_NL_MSG_PUMAC, ptt_sock_rx_nlink_msg);
nl_srv_register(ANI_NL_MSG_PTT, ptt_sock_rx_nlink_msg);
return 0;
}
+#endif
#endif // PTT_SOCK_SVC_ENABLE
diff --git a/drivers/staging/qcacld-2.0/CORE/SYS/legacy/src/utils/src/macTrace.c b/drivers/staging/qcacld-2.0/CORE/SYS/legacy/src/utils/src/macTrace.c
index 993abbd..153aaae 100644
--- a/drivers/staging/qcacld-2.0/CORE/SYS/legacy/src/utils/src/macTrace.c
+++ b/drivers/staging/qcacld-2.0/CORE/SYS/legacy/src/utils/src/macTrace.c
@@ -499,7 +499,6 @@
CASE_RETURN_STRING(eWNI_SME_ESE_ADJACENT_AP_REPORT);
#endif
CASE_RETURN_STRING(eWNI_SME_REGISTER_MGMT_FRAME_REQ);
- CASE_RETURN_STRING(eWNI_SME_COEX_IND);
#ifdef FEATURE_WLAN_SCAN_PNO
CASE_RETURN_STRING(eWNI_SME_PREF_NETWORK_FOUND_IND);
#endif // FEATURE_WLAN_SCAN_PNO
diff --git a/drivers/staging/qcacld-2.0/CORE/UTILS/FWLOG/dbglog_host.c b/drivers/staging/qcacld-2.0/CORE/UTILS/FWLOG/dbglog_host.c
index ff73b3c..6a49343 100644
--- a/drivers/staging/qcacld-2.0/CORE/UTILS/FWLOG/dbglog_host.c
+++ b/drivers/staging/qcacld-2.0/CORE/UTILS/FWLOG/dbglog_host.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -42,6 +42,9 @@
#include <wlan_hdd_wext.h>
#include <net/sock.h>
#include <linux/netlink.h>
+#ifdef CNSS_GENL
+#include <net/cnss_nl.h>
+#endif
#ifdef WLAN_OPEN_SOURCE
#include <linux/debugfs.h>
@@ -1634,6 +1637,24 @@
}
#endif /* WLAN_OPEN_SOURCE */
+/**
+ * nl_srv_bcast_fw_logs() - Wrapper func to send bcast msgs to FW logs mcast grp
+ * @skb: sk buffer pointer
+ *
+ * Sends the bcast message to FW logs multicast group with generic nl socket
+ * if CNSS_GENL is enabled. Else, use the legacy netlink socket to send.
+ *
+ * Return: zero on success, error code otherwise
+ */
+static int nl_srv_bcast_fw_logs(struct sk_buff *skb)
+{
+#ifdef CNSS_GENL
+ return nl_srv_bcast(skb, CLD80211_MCGRP_FW_LOGS, WLAN_NL_MSG_CNSS_DIAG);
+#else
+ return nl_srv_bcast(skb);
+#endif
+}
+
/*
* Package the data from the fw diag WMI event handler.
* Pass this data to cnss-diag service
@@ -1645,6 +1666,9 @@
struct sk_buff *skb_out;
struct nlmsghdr *nlh;
int res = 0;
+ tAniNlHdr *wnl;
+ int radio = 0;
+ int msg_len = 0;
if (WARN_ON(len > ATH6KL_FWLOG_PAYLOAD_SIZE))
return -ENODEV;
@@ -1654,20 +1678,23 @@
if (vos_is_multicast_logging())
{
- skb_out = nlmsg_new(len, 0);
+ msg_len = len + sizeof(radio);
+ skb_out = nlmsg_new(msg_len, 0);
if (!skb_out)
{
AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to allocate new skb\n"));
return -1;
}
- nlh = nlmsg_put(skb_out, 0, 0, WLAN_NL_MSG_CNSS_DIAG, len, 0);
- memcpy(nlmsg_data(nlh), buffer, len);
+ nlh = nlmsg_put(skb_out, 0, 0, WLAN_NL_MSG_CNSS_DIAG, msg_len, 0);
+ wnl = (tAniNlHdr *)nlh;
+ wnl->radio = 0;
+ memcpy(nlmsg_data(nlh) + sizeof(radio), buffer, len);
- res = nl_srv_bcast(skb_out);
+ res = nl_srv_bcast_fw_logs(skb_out);
if (res < 0)
{
AR_DEBUG_PRINTF(ATH_DEBUG_RSVD1,
- ("%s: nl_srv_bcast failed 0x%x \n", __func__, res));
+ ("%s: nl_srv_bcast_fw_logs failed 0x%x \n", __func__, res));
return res;
}
}
@@ -1683,6 +1710,8 @@
int res = 0;
struct dbglog_slot *slot;
size_t slot_len;
+ tAniNlHdr *wnl;
+ int radio = 0;
if (WARN_ON(len > ATH6KL_FWLOG_PAYLOAD_SIZE))
return -ENODEV;
@@ -1691,7 +1720,7 @@
return -EIO;
if (vos_is_multicast_logging()) {
- slot_len = sizeof(*slot) + ATH6KL_FWLOG_PAYLOAD_SIZE;
+ slot_len = sizeof(*slot) + ATH6KL_FWLOG_PAYLOAD_SIZE + sizeof(radio);
skb_out = nlmsg_new(slot_len, 0);
if (!skb_out) {
@@ -1701,7 +1730,9 @@
}
nlh = nlmsg_put(skb_out, 0, 0, WLAN_NL_MSG_CNSS_DIAG, slot_len, 0);
- slot = (struct dbglog_slot *) nlmsg_data(nlh);
+ wnl = (tAniNlHdr *)nlh;
+ wnl->radio = 0;
+ slot = (struct dbglog_slot *) (nlmsg_data(nlh) + sizeof(radio));
slot->diag_type = cmd;
slot->timestamp = cpu_to_le32(jiffies);
slot->length = cpu_to_le32(len);
@@ -1709,10 +1740,10 @@
slot->dropped = get_version;
memcpy(slot->payload, buffer, len);
- res = nl_srv_bcast(skb_out);
+ res = nl_srv_bcast_fw_logs(skb_out);
if (res < 0) {
AR_DEBUG_PRINTF(ATH_DEBUG_RSVD1,
- ("%s: nl_srv_bcast failed 0x%x \n", __func__, res));
+ ("%s: nl_srv_bcast_fw_logs failed 0x%x \n", __func__, res));
return res;
}
}
@@ -1755,11 +1786,11 @@
slot->dropped = cpu_to_le32(dropped);
memcpy(slot->payload, buffer, len);
- res = nl_srv_bcast(skb_out);
+ res = nl_srv_bcast_fw_logs(skb_out);
if (res < 0)
{
AR_DEBUG_PRINTF(ATH_DEBUG_RSVD1,
- ("%s: nl_srv_bcast failed 0x%x \n", __func__, res));
+ ("%s: nl_srv_bcast_fw_logs failed 0x%x \n", __func__, res));
return res;
}
}
@@ -3915,6 +3946,41 @@
}
#endif /* WLAN_OPEN_SOURCE */
+/**
+ * cnss_diag_handle_crash_inject() - API to handle crash inject command
+ * @slot: pointer to struct dbglog_slot
+ *
+ * API to handle CNSS diag crash inject command
+ *
+ * Return: None
+ */
+static void cnss_diag_handle_crash_inject(struct dbglog_slot *slot)
+{
+ switch (slot->diag_type) {
+ case DIAG_TYPE_CRASH_INJECT:
+ if (slot->length == 2) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
+ ("%s : DIAG_TYPE_CRASH_INJECT: %d %d\n", __func__,
+ slot->payload[0], slot->payload[1]));
+ if (!tgt_assert_enable) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
+ ("%s: tgt Assert Disabled\n", __func__));
+ return;
+ }
+ process_wma_set_command_twoargs(0,
+ (int)GEN_PARAM_CRASH_INJECT,
+ slot->payload[0],
+ slot->payload[1], GEN_CMD);
+ }
+ else
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("crash_inject cmd error\n"));
+ break;
+ default:
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown cmd error\n"));
+ break;
+ }
+}
+
/**---------------------------------------------------------------------------
\brief cnss_diag_msg_callback() - Call back invoked by netlink service
@@ -3928,46 +3994,77 @@
--------------------------------------------------------------------------*/
int cnss_diag_msg_callback(struct sk_buff *skb)
{
- struct nlmsghdr *nlh;
- struct dbglog_slot *slot;
- A_UINT8 *msg;
+ struct nlmsghdr *nlh;
+ A_UINT8 *msg;
- nlh = (struct nlmsghdr *)skb->data;
- if (!nlh)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Netlink header null \n", __func__));
- return -1;
- }
+ nlh = (struct nlmsghdr *)skb->data;
+ if (!nlh) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: Netlink header null \n", __func__));
+ return -1;
+ }
- msg = NLMSG_DATA(nlh);
+ msg = NLMSG_DATA(nlh);
+ cnss_diag_handle_crash_inject((struct dbglog_slot *)msg);
- slot = (struct dbglog_slot *)msg;
- switch (slot->diag_type) {
- case DIAG_TYPE_CRASH_INJECT:
- if (slot->length == 2) {
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
- ("%s : DIAG_TYPE_CRASH_INJECT: %d %d\n", __func__,
- slot->payload[0], slot->payload[1]));
- if (!tgt_assert_enable) {
- AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
- ("%s: tgt Assert Disabled\n", __func__));
- return 0;
- }
- process_wma_set_command_twoargs(0,
- (int)GEN_PARAM_CRASH_INJECT,
- slot->payload[0],
- slot->payload[1], GEN_CMD);
- }
- else
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("crash_inject cmd error\n"));
- break;
- default:
- break;
- }
- return 0;
-
+ return 0;
}
+#ifdef CNSS_GENL
+
+/**
+ * cnss_diag_cmd_handler() - API to handle CNSS diag command
+ * @data: Data received
+ * @data_len: length of the data received
+ * @ctx: Pointer to stored context
+ * @pid: Process ID
+ *
+ * API to handle CNSS diag commands from user space
+ *
+ * Return: None
+ */
+static void cnss_diag_cmd_handler(const void *data, int data_len,
+ void *ctx, int pid)
+{
+ struct dbglog_slot *slot = NULL;
+ struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_MAX + 1];
+
+ if (nla_parse(tb, CLD80211_ATTR_MAX, data, data_len, NULL)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: nla parse fails \n",
+ __func__));
+ return;
+ }
+
+ if (!tb[CLD80211_ATTR_DATA]) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: attr VENDOR_DATA fails \n",
+ __func__));
+ return;
+ }
+ slot = (struct dbglog_slot *)nla_data(tb[CLD80211_ATTR_DATA]);
+
+ if (!slot) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("%s: data NULL \n", __func__));
+ return;
+ }
+
+ cnss_diag_handle_crash_inject(slot);
+ return;
+}
+
+/**
+ * cnss_diag_activate_service() - API to register CNSS diag cmd handler
+ *
+ * API to register the CNSS diag command handler using new genl infra.
+ * Return type is zero to match with legacy prototype
+ *
+ * Return: 0
+ */
+int cnss_diag_activate_service(void)
+{
+ register_cld_cmd_cb(WLAN_NL_MSG_CNSS_DIAG, cnss_diag_cmd_handler, NULL);
+ return 0;
+}
+
+#else
/**---------------------------------------------------------------------------
\brief cnss_diag_activate_service() - Activate cnss_diag message handler
@@ -3979,7 +4076,7 @@
\return - 0 for success, non zero for failure
--------------------------------------------------------------------------*/
-int cnss_diag_activate_service()
+int cnss_diag_activate_service(void)
{
int ret = 0;
@@ -3993,6 +4090,7 @@
kd_nl_init = TRUE;
return 0;
}
+#endif
A_BOOL
dbglog_wow_print_handler(
diff --git a/drivers/staging/qcacld-2.0/CORE/WDA/inc/legacy/halMsgApi.h b/drivers/staging/qcacld-2.0/CORE/WDA/inc/legacy/halMsgApi.h
index a8e71f6..6112507 100644
--- a/drivers/staging/qcacld-2.0/CORE/WDA/inc/legacy/halMsgApi.h
+++ b/drivers/staging/qcacld-2.0/CORE/WDA/inc/legacy/halMsgApi.h
@@ -741,18 +741,11 @@
#ifdef FEATURE_OEM_DATA_SUPPORT
-#ifndef OEM_DATA_REQ_SIZE
-#define OEM_DATA_REQ_SIZE 280
-#endif
-#ifndef OEM_DATA_RSP_SIZE
-#define OEM_DATA_RSP_SIZE 1724
-#endif
-
typedef struct
{
tSirMacAddr selfMacAddr;
eHalStatus status;
- uint8_t data_len;
+ uint32_t data_len;
uint8_t *data;
} tStartOemDataReq, *tpStartOemDataReq;
diff --git a/drivers/staging/qcacld-2.0/Kbuild b/drivers/staging/qcacld-2.0/Kbuild
index f08d947..ae16760 100644
--- a/drivers/staging/qcacld-2.0/Kbuild
+++ b/drivers/staging/qcacld-2.0/Kbuild
@@ -616,7 +616,6 @@
$(SME_SRC_DIR)/sme_common/sme_FTApi.o \
$(SME_SRC_DIR)/sme_common/sme_Trace.o
-SME_BTC_OBJS := $(SME_SRC_DIR)/btc/btcApi.o
SME_OEM_DATA_OBJS := $(SME_SRC_DIR)/oemData/oemDataApi.o
@@ -632,8 +631,7 @@
SME_NDP_OBJS += $(SME_SRC_DIR)/nan/nan_datapath_api.o
endif
-SME_OBJS := $(SME_BTC_OBJS) \
- $(SME_CCM_OBJS) \
+SME_OBJS := $(SME_CCM_OBJS) \
$(SME_CMN_OBJS) \
$(SME_CSR_OBJS) \
$(SME_OEM_DATA_OBJS) \
@@ -652,9 +650,6 @@
SVC_INC := -I$(WLAN_ROOT)/$(SVC_INC_DIR) \
-I$(WLAN_ROOT)/$(SVC_DIR)/external
-BTC_SRC_DIR := $(SVC_SRC_DIR)/btc
-BTC_OBJS := $(BTC_SRC_DIR)/wlan_btc_svc.o
-
NLINK_SRC_DIR := $(SVC_SRC_DIR)/nlink
NLINK_OBJS := $(NLINK_SRC_DIR)/wlan_nlink_srv.o
@@ -664,8 +659,7 @@
WLAN_LOGGING_SRC_DIR := $(SVC_SRC_DIR)/logging
WLAN_LOGGING_OBJS := $(WLAN_LOGGING_SRC_DIR)/wlan_logging_sock_svc.o
-SVC_OBJS := $(BTC_OBJS) \
- $(NLINK_OBJS) \
+SVC_OBJS := $(NLINK_OBJS) \
$(PTT_OBJS) \
$(WLAN_LOGGING_OBJS)
@@ -1126,6 +1120,10 @@
CDEFINES += -DWLAN_FEATURE_ROAM_OFFLOAD
endif
+ifeq ($(CONFIG_CNSS_GENL), y)
+CDEFINES += -DCNSS_GENL
+endif
+
ifeq ($(CONFIG_PRIMA_WLAN_OKC),y)
CDEFINES += -DFEATURE_WLAN_OKC
endif
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
index 7c68725..7784148 100644
--- a/drivers/usb/gadget/android.c
+++ b/drivers/usb/gadget/android.c
@@ -527,22 +527,60 @@
bool enabled;
struct ffs_data *data;
struct android_dev *dev;
+ struct android_usb_function *android_func;
+ struct list_head list_item;
};
+#define MAX_FFS_FUNCTIONS 5
+static struct list_head ffs_configs;
+static struct mutex ffs_configs_lock;
+
static int ffs_function_init(struct android_usb_function *f,
struct usb_composite_dev *cdev)
{
- f->config = kzalloc(sizeof(struct functionfs_config), GFP_KERNEL);
- if (!f->config)
- return -ENOMEM;
+ int i;
+ struct functionfs_config *config;
+ struct functionfs_config *next;
+ struct android_usb_function *nf;
+ INIT_LIST_HEAD(&ffs_configs);
+ mutex_init(&ffs_configs_lock);
+
+ for (i = 0; i < MAX_FFS_FUNCTIONS; i++) {
+ nf = kmalloc(sizeof(*nf), GFP_KERNEL);
+ config = kzalloc(sizeof(*config), GFP_KERNEL);
+ if (!nf || !config) {
+ kfree(nf);
+ kfree(config);
+ list_for_each_entry_safe(config, next,
+ &ffs_configs, list_item) {
+ list_del(&config->list_item);
+ kfree(config->android_func);
+ kfree(config);
+ }
+ return -ENOMEM;
+ }
+
+ memcpy(nf, f, sizeof(struct android_usb_function));
+ nf->config = config;
+ config->android_func = nf;
+
+ list_add_tail(&config->list_item, &ffs_configs);
+ }
return functionfs_init();
}
static void ffs_function_cleanup(struct android_usb_function *f)
{
+ struct functionfs_config *config;
+ struct functionfs_config *next;
+ list_for_each_entry_safe(config, next, &ffs_configs, list_item) {
+ list_del(&config->list_item);
+
+ kfree(config->android_func);
+ kfree(config);
+ }
functionfs_cleanup();
- kfree(f->config);
}
static void ffs_function_enable(struct android_usb_function *f)
@@ -635,68 +673,90 @@
static int functionfs_ready_callback(struct ffs_data *ffs)
{
- struct android_dev *dev = ffs_function.android_dev;
- struct functionfs_config *config = ffs_function.config;
+ struct android_dev *dev;
+ struct functionfs_config *config = NULL;
+ struct functionfs_config *cur;
int ret = 0;
- /* dev is null in case ADB is not in the composition */
- if (dev) {
- mutex_lock(&dev->mutex);
- ret = functionfs_bind(ffs, dev->cdev);
- if (ret) {
- mutex_unlock(&dev->mutex);
- return ret;
+ mutex_lock(&ffs_configs_lock);
+ list_for_each_entry(cur, &ffs_configs, list_item) {
+ if (!cur->opened && cur->android_func->android_dev) {
+ config = cur;
+ break;
}
- } else {
- /* android ffs_func requires daemon to start only after enable*/
- pr_debug("start adbd only in ADB composition\n");
+ }
+ if (!config) {
+ pr_err("ffs function %s was not enabled\n",
+ ffs->dev_name);
+ mutex_unlock(&ffs_configs_lock);
return -ENODEV;
}
+ dev = config->android_func->android_dev;
+ mutex_lock(&dev->mutex);
+ ret = functionfs_bind(ffs, dev->cdev);
+ if (ret) {
+ mutex_unlock(&dev->mutex);
+ mutex_unlock(&ffs_configs_lock);
+ return ret;
+ }
config->data = ffs;
config->opened = true;
- /* Save dev in case the adb function will get disabled */
config->dev = dev;
+ mutex_unlock(&ffs_configs_lock);
+
if (config->enabled)
android_enable(dev);
mutex_unlock(&dev->mutex);
- return 0;
+ return ret;
}
static void functionfs_closed_callback(struct ffs_data *ffs)
{
- struct android_dev *dev = ffs_function.android_dev;
- struct functionfs_config *config = ffs_function.config;
+ struct android_dev *dev;
+ struct functionfs_config *config = NULL;
+ struct functionfs_config *cur;
- /*
- * In case new composition is without ADB or ADB got disabled by the
- * time ffs_daemon was stopped then use saved one
- */
+ mutex_lock(&ffs_configs_lock);
+ list_for_each_entry(cur, &ffs_configs, list_item) {
+ if (cur->data == ffs) {
+ config = cur;
+ break;
+ }
+ }
+ if (!config) {
+ pr_err("ffs closed callback failed %s!\n", ffs->dev_name);
+ mutex_unlock(&ffs_configs_lock);
+ return;
+ }
+ dev = config->android_func->android_dev;
if (!dev)
dev = config->dev;
/* fatal-error: It should never happen */
- if (!dev)
- pr_err("adb_closed_callback: config->dev is NULL");
+ if (!dev) {
+ pr_err("functionfs_closed_callback: config->dev is NULL");
+ mutex_unlock(&ffs_configs_lock);
+ return;
+ }
- if (dev)
- mutex_lock(&dev->mutex);
+ mutex_lock(&dev->mutex);
- if (config->enabled && dev)
+ if (config->enabled)
android_disable(dev);
config->dev = NULL;
config->opened = false;
config->data = NULL;
+ mutex_unlock(&ffs_configs_lock);
functionfs_unbind(ffs);
- if (dev)
- mutex_unlock(&dev->mutex);
+ mutex_unlock(&dev->mutex);
}
static void *functionfs_acquire_dev_callback(const char *dev_name)
@@ -3073,6 +3133,49 @@
return -EINVAL;
}
+static int android_enable_ffs_function(struct android_dev *dev,
+ struct android_configuration *conf,
+ char *alias) {
+ struct functionfs_config *config;
+ struct android_usb_function *match = NULL;
+ struct android_usb_function_holder *f_holder;
+ struct android_usb_platform_data *pdata = dev->pdata;
+ struct usb_gadget *gadget = dev->cdev->gadget;
+
+ list_for_each_entry(config, &ffs_configs, list_item) {
+ /* Function already enabled */
+ if (config->android_func->android_dev)
+ continue;
+ if (config->data) {
+ if (!strcmp(alias, config->data->dev_name))
+ match = config->android_func;
+ else
+ continue;
+ }
+
+ /* If no match found, choose first empty function */
+ if (!match)
+ match = config->android_func;
+ }
+ if (!match) {
+ pr_err("too many ffs functions enabled, max is %d\n",
+ MAX_FFS_FUNCTIONS);
+ return -EOVERFLOW;
+ }
+ f_holder = kzalloc(sizeof(*f_holder), GFP_KERNEL);
+ if (unlikely(!f_holder)) {
+ return -ENOMEM;
+ }
+
+ match->android_dev = dev;
+ f_holder->f = match;
+ list_add_tail(&f_holder->enabled_list,
+ &conf->enabled_functions);
+ pr_debug("func:%s is enabled.\n", match->name);
+ check_streaming_func(gadget, pdata, match->name);
+ return 0;
+}
+
/*-------------------------------------------------------------------------*/
/* /sys/class/android_usb/android%d/ interface */
@@ -3162,10 +3265,12 @@
int is_ffs;
int ffs_enabled = 0;
+ mutex_lock(&ffs_configs_lock);
mutex_lock(&dev->mutex);
if (dev->enabled) {
mutex_unlock(&dev->mutex);
+ mutex_unlock(&ffs_configs_lock);
return -EBUSY;
}
@@ -3216,9 +3321,8 @@
}
if (is_ffs) {
- if (ffs_enabled)
- continue;
- err = android_enable_function(dev, conf, "ffs");
+ err = android_enable_ffs_function(dev,
+ conf, name);
if (err)
pr_err("android_usb: Cannot enable ffs (%d)",
err);
@@ -3246,6 +3350,7 @@
}
mutex_unlock(&dev->mutex);
+ mutex_unlock(&ffs_configs_lock);
return size;
}
diff --git a/drivers/usb/gadget/f_accessory.c b/drivers/usb/gadget/f_accessory.c
index cd3d407..137be79 100644
--- a/drivers/usb/gadget/f_accessory.c
+++ b/drivers/usb/gadget/f_accessory.c
@@ -709,10 +709,17 @@
break;
}
- if (count > BULK_BUFFER_SIZE)
+ if (count > BULK_BUFFER_SIZE) {
xfer = BULK_BUFFER_SIZE;
- else
+ /* ZLP, They will be more TX requests so not yet. */
+ req->zero = 0;
+ } else {
xfer = count;
+ /* If the data length is a multple of the
+ * maxpacket size then send a zero length packet(ZLP).
+ */
+ req->zero = ((xfer % dev->ep_in->maxpacket) == 0);
+ }
if (copy_from_user(req->buf, buf, xfer)) {
r = -EFAULT;
break;
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 7fc4f21..fd97c83 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -22,6 +22,7 @@
#include <linux/pagemap.h>
#include <linux/export.h>
#include <linux/hid.h>
+#include <linux/workqueue.h>
#include <asm/unaligned.h>
#include <linux/usb/composite.h>
@@ -30,6 +31,8 @@
#define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */
+#define ENDPOINT_ALLOC_MAX 1 << 25 /* Max endpoint buffer size, 32 MB */
+
/* Debugging ****************************************************************/
@@ -73,6 +76,26 @@
FFS_ACTIVE,
/*
+ * Function is visible to host, but it's not functional. All
+ * setup requests are stalled and transfers on another endpoints
+ * are refused. All epfiles, except ep0, are deleted so there
+ * is no way to perform any operations on them.
+ *
+ * This state is set after closing all functionfs files, when
+ * mount parameter "no_disconnect=1" has been set. Function will
+ * remain in deactivated state until filesystem is umounted or
+ * ep0 is opened again. In the second case functionfs state will
+ * be reset, and it will be ready for descriptors and strings
+ * writing.
+ *
+ * This is useful only when functionfs is composed to gadget
+ * with another function which can perform some critical
+ * operations, and it's strongly desired to have this operations
+ * completed, even after functionfs files closure.
+ */
+ FFS_DEACTIVATED,
+
+ /*
* All endpoints have been closed. This state is also set if
* we encounter an unrecoverable error. The only
* unrecoverable error is situation when after reading strings
@@ -141,8 +164,6 @@
struct usb_request *ep0req; /* P: mutex */
struct completion ep0req_completion; /* P: mutex */
int ep0req_status; /* P: mutex */
- struct completion epin_completion;
- struct completion epout_completion;
/* reference counter */
atomic_t ref;
@@ -237,6 +258,9 @@
kgid_t gid;
} file_perms;
+ bool no_disconnect;
+ struct work_struct reset_work;
+
/*
* The endpoint files, filled by ffs_epfiles_create(),
* destroyed by ffs_epfiles_destroy().
@@ -335,6 +359,9 @@
unsigned char isoc; /* P: ffs->eps_lock */
unsigned char _pad;
+
+ unsigned long buf_len;
+ char *buffer;
};
static int __must_check ffs_epfiles_create(struct ffs_data *ffs);
@@ -772,11 +799,10 @@
{
struct ffs_epfile *epfile = file->private_data;
struct ffs_ep *ep;
- struct ffs_data *ffs = epfile->ffs;
char *data = NULL;
ssize_t ret;
int halt;
- int buffer_len = 0;
+ size_t buffer_len = 0;
pr_debug("%s: len %zu, read %d\n", __func__, len, read);
@@ -837,7 +863,10 @@
/* Allocate & copy */
if (!halt && !data) {
- data = kzalloc(buffer_len, GFP_KERNEL);
+
+ data = buffer_len > epfile->buf_len ?
+ kzalloc(buffer_len, GFP_KERNEL) :
+ epfile->buffer;
if (unlikely(!data))
return -ENOMEM;
@@ -874,27 +903,21 @@
ret = -EBADMSG;
} else {
/* Fire the request */
- struct completion *done;
+ DECLARE_COMPLETION_ONSTACK(done);
struct usb_request *req = ep->req;
req->complete = ffs_epfile_io_complete;
req->buf = data;
req->length = buffer_len;
+ req->context = &done;
- if (read) {
- INIT_COMPLETION(ffs->epout_completion);
- req->context = done = &ffs->epout_completion;
- } else {
- INIT_COMPLETION(ffs->epin_completion);
- req->context = done = &ffs->epin_completion;
- }
ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
spin_unlock_irq(&epfile->ffs->eps_lock);
if (unlikely(ret < 0)) {
ret = -EIO;
- } else if (unlikely(wait_for_completion_interruptible(done))) {
+ } else if (unlikely(wait_for_completion_interruptible(&done))) {
spin_lock_irq(&epfile->ffs->eps_lock);
/*
* While we were acquiring lock endpoint got disabled
@@ -917,14 +940,14 @@
spin_unlock_irq(&epfile->ffs->eps_lock);
if (read && ret > 0) {
if (len != MAX_BUF_LEN && ret < len)
- pr_err("less data(%zd) recieved than intended length(%zu)\n",
+ pr_debug("less data(%zd) received than intended length(%zu)\n",
ret, len);
if (ret > len) {
- ret = -EOVERFLOW;
- pr_err("More data(%zd) recieved than intended length(%zu)\n",
+ pr_err("More data(%zd) received than intended length(%zu)\n",
ret, len);
+ ret = -EOVERFLOW;
} else if (unlikely(copy_to_user(
- buf, data, ret))) {
+ buf, data, ret))) {
pr_err("Fail to copy to user len:%zd\n",
ret);
ret = -EFAULT;
@@ -935,7 +958,8 @@
mutex_unlock(&epfile->mutex);
error:
- kfree(data);
+ if (buffer_len > epfile->buf_len)
+ kfree(data);
if (ret < 0 && ret != -ERESTARTSYS)
pr_err_ratelimited("%s(): Error: returning %zd value\n",
__func__, ret);
@@ -984,6 +1008,9 @@
ENTER();
atomic_set(&epfile->error, 1);
+ epfile->buf_len = 0;
+ kfree(epfile->buffer);
+ epfile->buffer = NULL;
ffs_data_closed(epfile->ffs);
file->private_data = NULL;
@@ -1002,7 +1029,7 @@
return -ENODEV;
spin_lock_irq(&epfile->ffs->eps_lock);
- if (likely(epfile->ep)) {
+ if (epfile->ep) {
switch (code) {
case FUNCTIONFS_FIFO_STATUS:
ret = usb_ep_fifo_status(epfile->ep->ep);
@@ -1017,6 +1044,51 @@
case FUNCTIONFS_ENDPOINT_REVMAP:
ret = epfile->ep->num;
break;
+ case FUNCTIONFS_ENDPOINT_DESC:
+ {
+ int desc_idx;
+ struct usb_endpoint_descriptor *desc;
+
+ switch (epfile->ffs->gadget->speed) {
+ case USB_SPEED_SUPER:
+ desc_idx = 2;
+ break;
+ case USB_SPEED_HIGH:
+ desc_idx = 1;
+ break;
+ default:
+ desc_idx = 0;
+ }
+ desc = epfile->ep->descs[desc_idx];
+
+ spin_unlock_irq(&epfile->ffs->eps_lock);
+ ret = copy_to_user((void *)value, desc, sizeof(*desc));
+ if (ret)
+ ret = -EFAULT;
+ return ret;
+ }
+ case FUNCTIONFS_ENDPOINT_ALLOC:
+ {
+ void *temp = epfile->buffer;
+ epfile->buffer = NULL;
+ epfile->buf_len = 0;
+ spin_unlock_irq(&epfile->ffs->eps_lock);
+ kfree(temp);
+ if (!value)
+ return 0;
+ if (value > ENDPOINT_ALLOC_MAX)
+ return -EINVAL;
+
+ temp = kzalloc(value, GFP_KERNEL);
+ if (!temp)
+ return -ENOMEM;
+
+ spin_lock_irq(&epfile->ffs->eps_lock);
+ epfile->buffer = temp;
+ epfile->buf_len = value;
+ ret = 0;
+ break;
+ }
default:
ret = -ENOTTY;
}
@@ -1117,6 +1189,7 @@
struct ffs_file_perms perms;
umode_t root_mode;
const char *dev_name;
+ bool no_disconnect;
struct ffs_data *ffs_data;
};
@@ -1187,6 +1260,12 @@
/* Interpret option */
switch (eq - opts) {
+ case 13:
+ if (!memcmp(opts, "no_disconnect", 13))
+ data->no_disconnect = !!value;
+ else
+ goto invalid;
+ break;
case 5:
if (!memcmp(opts, "rmode", 5))
data->root_mode = (value & 0555) | S_IFDIR;
@@ -1251,6 +1330,7 @@
.gid = GLOBAL_ROOT_GID,
},
.root_mode = S_IFDIR | 0500,
+ .no_disconnect = false,
};
struct dentry *rv;
int ret;
@@ -1267,6 +1347,7 @@
if (unlikely(!ffs))
return ERR_PTR(-ENOMEM);
ffs->file_perms = data.perms;
+ ffs->no_disconnect = data.no_disconnect;
ffs->dev_name = kstrdup(dev_name, GFP_KERNEL);
if (unlikely(!ffs->dev_name)) {
@@ -1298,6 +1379,7 @@
kill_litter_super(sb);
if (sb->s_fs_info) {
functionfs_release_dev_callback(sb->s_fs_info);
+ ffs_data_closed(sb->s_fs_info);
ffs_data_put(sb->s_fs_info);
}
}
@@ -1356,7 +1438,11 @@
smp_mb__before_atomic();
atomic_inc(&ffs->ref);
- atomic_inc(&ffs->opened);
+ if (atomic_add_return(1, &ffs->opened) == 1 &&
+ ffs->state == FFS_DEACTIVATED) {
+ ffs->state = FFS_CLOSING;
+ ffs_data_reset(ffs);
+ }
}
static void ffs_data_put(struct ffs_data *ffs)
@@ -1380,6 +1466,21 @@
smp_mb__before_atomic();
if (atomic_dec_and_test(&ffs->opened)) {
+ if (ffs->no_disconnect) {
+ ffs->state = FFS_DEACTIVATED;
+ if (ffs->epfiles) {
+ ffs_epfiles_destroy(ffs->epfiles,
+ ffs->eps_count);
+ ffs->epfiles = NULL;
+ }
+ if (ffs->setup_state == FFS_SETUP_PENDING)
+ __ffs_ep0_stall(ffs);
+ } else {
+ ffs->state = FFS_CLOSING;
+ ffs_data_reset(ffs);
+ }
+ }
+ if (atomic_read(&ffs->opened) < 0) {
ffs->state = FFS_CLOSING;
ffs_data_reset(ffs);
}
@@ -1402,8 +1503,6 @@
spin_lock_init(&ffs->eps_lock);
init_waitqueue_head(&ffs->ev.waitq);
init_completion(&ffs->ep0req_completion);
- init_completion(&ffs->epout_completion);
- init_completion(&ffs->epin_completion);
/* XXX REVISIT need to update it in some places, or do we? */
ffs->ev.can_stall = 1;
@@ -1646,14 +1745,17 @@
spin_lock_irqsave(&func->ffs->eps_lock, flags);
do {
- atomic_set(&epfile->error, 1);
+ if (epfile)
+ atomic_set(&epfile->error, 1);
/* pending requests get nuked */
if (likely(ep->ep))
usb_ep_disable(ep->ep);
- epfile->ep = NULL;
-
++ep;
- ++epfile;
+
+ if (epfile) {
+ epfile->ep = NULL;
+ ++epfile;
+ }
} while (--count);
spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
}
@@ -2481,6 +2583,13 @@
/* Other USB function hooks *************************************************/
+static void ffs_reset_work(struct work_struct *work)
+{
+ struct ffs_data *ffs = container_of(work,
+ struct ffs_data, reset_work);
+ ffs_data_reset(ffs);
+}
+
static void ffs_func_unbind(struct usb_configuration *c,
struct usb_function *f)
{
@@ -2517,6 +2626,13 @@
ffs->func = NULL;
}
+ if (ffs->state == FFS_DEACTIVATED) {
+ ffs->state = FFS_CLOSING;
+ INIT_WORK(&ffs->reset_work, ffs_reset_work);
+ schedule_work(&ffs->reset_work);
+ return -ENODEV;
+ }
+
if (ffs->state != FFS_ACTIVE)
return -ENODEV;
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index 0160ac5..35756ef 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -2,7 +2,7 @@
* Core MDSS framebuffer driver.
*
* Copyright (C) 2007 Google Incorporated
- * Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -3479,6 +3479,7 @@
goto buf_sync_err_2;
}
+
ret = copy_to_user(buf_sync->rel_fen_fd, &rel_fen_fd, sizeof(int));
if (ret) {
pr_err("%s: copy_to_user failed\n", sync_pt_data->fence_name);
@@ -3515,6 +3516,7 @@
goto buf_sync_err_3;
}
+
ret = copy_to_user(buf_sync->retire_fen_fd, &retire_fen_fd,
sizeof(int));
if (ret) {
diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
index 82b5e6c..b2b92ce 100644
--- a/drivers/video/msm/mdss/mdss_mdp.h
+++ b/drivers/video/msm/mdss/mdss_mdp.h
@@ -21,6 +21,7 @@
#include <linux/notifier.h>
#include <linux/irqreturn.h>
#include <linux/kref.h>
+#include <linux/kthread.h>
#include "mdss.h"
#include "mdss_mdp_hwio.h"
@@ -579,11 +580,14 @@
struct sw_sync_timeline *vsync_timeline;
struct mdss_mdp_vsync_handler vsync_retire_handler;
- struct work_struct retire_work;
int retire_cnt;
bool kickoff_released;
u32 cursor_ndx[2];
bool dyn_mode_switch; /* Used in prepare, bw calc for new mode */
+
+ struct kthread_worker worker;
+ struct kthread_work vsync_work;
+ struct task_struct *thread;
};
struct mdss_mdp_set_ot_params {
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
index 2b384af..aa67968 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
@@ -1213,7 +1213,7 @@
int mdss_mdp_cmd_stop(struct mdss_mdp_ctl *ctl, int panel_power_state)
{
struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
- struct mdss_mdp_cmd_ctx *sctx;
+ struct mdss_mdp_cmd_ctx *sctx = NULL;
struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
bool panel_off = false;
bool turn_off_clocks = false;
@@ -1551,4 +1551,3 @@
return 0;
}
-
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index 94803ab..95f3790 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -4594,12 +4594,12 @@
/*
* the retire work can still schedule after above retire_signal
* api call. Flush workqueue guarantees that current caller
- * context is blocked till retire_work finishes. Any work
+ * context is blocked till vsync_work finishes. Any work
* schedule after flush call should not cause any issue because
* retire_signal api checks for retire_cnt with sync_mutex lock.
*/
- flush_work(&mdp5_data->retire_work);
+ flush_kthread_work(&mdp5_data->vsync_work);
}
ctl_stop:
@@ -4786,13 +4786,13 @@
}
mdp5_data = mfd_to_mdp5_data(mfd);
- schedule_work(&mdp5_data->retire_work);
+ queue_kthread_work(&mdp5_data->worker, &mdp5_data->vsync_work);
}
-static void __vsync_retire_work_handler(struct work_struct *work)
+static void __vsync_retire_work_handler(struct kthread_work *work)
{
struct mdss_overlay_private *mdp5_data =
- container_of(work, typeof(*mdp5_data), retire_work);
+ container_of(work, typeof(*mdp5_data), vsync_work);
if (!mdp5_data->ctl || !mdp5_data->ctl->mfd)
return;
@@ -4880,6 +4880,7 @@
{
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
char name[24];
+ struct sched_param param = { .sched_priority = 5 };
snprintf(name, sizeof(name), "mdss_fb%d_retire", mfd->index);
mdp5_data->vsync_timeline = sw_sync_timeline_create(name);
@@ -4887,12 +4888,25 @@
pr_err("cannot vsync create time line");
return -ENOMEM;
}
+
+ init_kthread_worker(&mdp5_data->worker);
+ init_kthread_work(&mdp5_data->vsync_work, __vsync_retire_work_handler);
+
+ mdp5_data->thread = kthread_run(kthread_worker_fn,
+ &mdp5_data->worker, "vsync_retire_work");
+
+ if (IS_ERR(mdp5_data->thread)) {
+ pr_err("unable to start vsync thread\n");
+ mdp5_data->thread = NULL;
+ return -ENOMEM;
+ }
+
+ sched_setscheduler(mdp5_data->thread, SCHED_FIFO, ¶m);
mfd->mdp_sync_pt_data.get_retire_fence = __vsync_retire_get_fence;
mdp5_data->vsync_retire_handler.vsync_handler =
__vsync_retire_handle_vsync;
mdp5_data->vsync_retire_handler.cmd_post_flush = false;
- INIT_WORK(&mdp5_data->retire_work, __vsync_retire_work_handler);
return 0;
}
diff --git a/fs/attr.c b/fs/attr.c
index 66fa625..64fc598 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -167,7 +167,7 @@
}
EXPORT_SYMBOL(setattr_copy);
-int notify_change(struct dentry * dentry, struct iattr * attr)
+int notify_change2(struct vfsmount *mnt, struct dentry * dentry, struct iattr * attr)
{
struct inode *inode = dentry->d_inode;
umode_t mode = inode->i_mode;
@@ -239,7 +239,9 @@
if (error)
return error;
- if (inode->i_op->setattr)
+ if (mnt && inode->i_op->setattr2)
+ error = inode->i_op->setattr2(mnt, dentry, attr);
+ else if (inode->i_op->setattr)
error = inode->i_op->setattr(dentry, attr);
else
error = simple_setattr(dentry, attr);
@@ -252,4 +254,10 @@
return error;
}
+EXPORT_SYMBOL(notify_change2);
+
+int notify_change(struct dentry * dentry, struct iattr * attr)
+{
+ return notify_change2(NULL, dentry, attr);
+}
EXPORT_SYMBOL(notify_change);
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index ed29cb5..1ae7822 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -567,6 +567,98 @@
#endif
}
+#ifdef CONFIG_KUSER_HELPERS_SELECTIVE_DISABLE
+#define ANDROID_NOTE_OWNER"Android"
+#define ANDROID_KUSER_HELPER_TYPE 0x3L
+#define ANDROID_KUSER_HELPER_ON 0x1L
+
+static int should_call_arch_setup_additional_pages(struct linux_binprm *bprm,
+ struct elfhdr *elf_ex,
+ struct elf_phdr *elf_ppnt)
+{
+ Elf64_Half i;
+
+ /* We want to allow vdso, but not kuser_helpers */
+ if (elf_ex->e_ident[EI_CLASS] == ELFCLASS64)
+ return true;
+
+ for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++) {
+ int retval;
+ void *elf_pnotes;
+ struct elf32_note *next_elf_notep;
+ Elf64_Xword left_to_read;
+
+ if (elf_ppnt->p_type != PT_NOTE)
+ continue;
+
+ /*
+ * This code is seeing if we have a special note.
+ * The note tells us that this binary still needs
+ * arch_setup_additional_pages to be called.
+ */
+ if (elf_ppnt->p_filesz < sizeof(struct elf32_note))
+ break;
+
+ elf_pnotes = kmalloc(elf_ppnt->p_filesz, GFP_KERNEL);
+ if (!elf_pnotes)
+ return -ENOMEM;
+
+ retval = kernel_read(bprm->file, elf_ppnt->p_offset,
+ (char *)elf_pnotes, elf_ppnt->p_filesz);
+ if (retval < 0) {
+ kfree(elf_pnotes);
+ return retval;
+ }
+
+ if((Elf64_Xword) retval != elf_ppnt->p_filesz) {
+ kfree(elf_pnotes);
+ return -EIO;
+ }
+
+ /*
+ * Now that we have read in all the notes and find ours
+ */
+ next_elf_notep = (struct elf32_note *)elf_pnotes;
+ left_to_read = elf_ppnt->p_filesz;
+ while (left_to_read >= sizeof(struct elf32_note)) {
+ char *note_namep;
+
+ left_to_read -= sizeof(struct elf32_note);
+
+ /* Sanity check on the name and desc length*/
+ if (((Elf64_Xword) next_elf_notep->n_namesz +
+ (Elf64_Xword) next_elf_notep->n_descsz) >
+ left_to_read)
+ break;
+
+ note_namep = (char *)next_elf_notep +
+ sizeof(struct elf32_note);
+ left_to_read -= next_elf_notep->n_namesz;
+ left_to_read -= next_elf_notep->n_descsz;
+
+ if ((sizeof(ANDROID_NOTE_OWNER) ==
+ next_elf_notep->n_namesz) &&
+ (next_elf_notep->n_type ==
+ ANDROID_KUSER_HELPER_TYPE) &&
+ strncmp(note_namep, ANDROID_NOTE_OWNER,
+ next_elf_notep->n_namesz) == 0) {
+ kfree(elf_pnotes);
+ return true;
+ }
+
+ next_elf_notep = (struct elf32_note *)
+ (note_namep +
+ next_elf_notep->n_namesz +
+ next_elf_notep->n_descsz);
+ }
+
+ kfree(elf_pnotes);
+ }
+
+ return false;
+}
+#endif
+
static int load_elf_binary(struct linux_binprm *bprm)
{
struct file *interpreter = NULL; /* to shut gcc up */
@@ -931,18 +1023,29 @@
}
}
- kfree(elf_phdata);
-
set_binfmt(&elf_format);
#ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
- retval = arch_setup_additional_pages(bprm, !!elf_interpreter);
- if (retval < 0) {
- send_sig(SIGKILL, current, 0);
+#ifdef CONFIG_KUSER_HELPERS_SELECTIVE_DISABLE
+ retval = should_call_arch_setup_additional_pages(bprm, &loc->elf_ex,
+ elf_phdata);
+ if (retval < 0)
goto out;
+
+ if (retval) {
+#endif
+ retval = arch_setup_additional_pages(bprm, !!elf_interpreter);
+ if (retval < 0) {
+ send_sig(SIGKILL, current, 0);
+ goto out;
+ }
+#ifdef CONFIG_KUSER_HELPERS_SELECTIVE_DISABLE
}
+#endif
#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
+ kfree(elf_phdata);
+
install_exec_creds(bprm);
retval = create_elf_tables(bprm, &loc->elf_ex,
load_addr, interp_load_addr);
diff --git a/fs/coredump.c b/fs/coredump.c
index 1d402ce..c0fba3f 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -644,7 +644,7 @@
goto close_fail;
if (!cprm.file->f_op || !cprm.file->f_op->write)
goto close_fail;
- if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
+ if (do_truncate2(cprm.file->f_path.mnt, cprm.file->f_path.dentry, 0, 0, cprm.file))
goto close_fail;
}
diff --git a/fs/exec.c b/fs/exec.c
index cb7f31c..03182da 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1100,7 +1100,7 @@
void would_dump(struct linux_binprm *bprm, struct file *file)
{
- if (inode_permission(file_inode(file), MAY_READ) < 0)
+ if (inode_permission2(file->f_path.mnt, file_inode(file), MAY_READ) < 0)
bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
}
EXPORT_SYMBOL(would_dump);
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index 2b49293..a5ba775 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -34,6 +34,7 @@
#include <linux/random.h>
#include <linux/scatterlist.h>
#include <linux/spinlock_types.h>
+#include <linux/namei.h>
#include "ext4_extents.h"
#include "xattr.h"
@@ -485,6 +486,9 @@
struct ext4_crypt_info *ci;
int dir_has_key, cached_with_key;
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+
dir = dget_parent(dentry);
if (!ext4_encrypted_inode(d_inode(dir))) {
dput(dir);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 28481d3..8f8de66 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3440,6 +3440,11 @@
unsigned blocksize;
struct inode *inode = mapping->host;
+ /* If we are processing an encrypted inode during orphan list
+ * handling */
+ if (ext4_encrypted_inode(inode) && !ext4_has_encryption_key(inode))
+ return 0;
+
blocksize = inode->i_sb->s_blocksize;
length = blocksize - (offset & (blocksize - 1));
diff --git a/fs/fs_struct.c b/fs/fs_struct.c
index d8ac61d..5a1bb82 100644
--- a/fs/fs_struct.c
+++ b/fs/fs_struct.c
@@ -127,6 +127,7 @@
}
return fs;
}
+EXPORT_SYMBOL_GPL(copy_fs_struct);
int unshare_fs_struct(void)
{
diff --git a/fs/inode.c b/fs/inode.c
index 1eab5de..e7a0547 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1634,12 +1634,12 @@
}
EXPORT_SYMBOL(should_remove_suid);
-static int __remove_suid(struct dentry *dentry, int kill)
+static int __remove_suid(struct vfsmount *mnt, struct dentry *dentry, int kill)
{
struct iattr newattrs;
newattrs.ia_valid = ATTR_FORCE | kill;
- return notify_change(dentry, &newattrs);
+ return notify_change2(mnt, dentry, &newattrs);
}
int file_remove_suid(struct file *file)
@@ -1662,7 +1662,7 @@
if (killpriv)
error = security_inode_killpriv(dentry);
if (!error && killsuid)
- error = __remove_suid(dentry, killsuid);
+ error = __remove_suid(file->f_path.mnt, dentry, killsuid);
if (!error && (inode->i_sb->s_flags & MS_NOSEC))
inode->i_flags |= S_NOSEC;
diff --git a/fs/internal.h b/fs/internal.h
index 6812158..8c89460 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -83,9 +83,11 @@
* super.c
*/
extern int do_remount_sb(struct super_block *, int, void *, int);
+extern int do_remount_sb2(struct vfsmount *, struct super_block *, int,
+ void *, int);
extern bool grab_super_passive(struct super_block *sb);
extern struct dentry *mount_fs(struct file_system_type *,
- int, const char *, void *);
+ int, const char *, struct vfsmount *, void *);
extern struct super_block *user_get_super(dev_t);
/*
diff --git a/fs/namei.c b/fs/namei.c
index 750f569..5778b51 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -356,9 +356,11 @@
* flag in inode->i_opflags, that says "this has not special
* permission function, use the fast case".
*/
-static inline int do_inode_permission(struct inode *inode, int mask)
+static inline int do_inode_permission(struct vfsmount *mnt, struct inode *inode, int mask)
{
if (unlikely(!(inode->i_opflags & IOP_FASTPERM))) {
+ if (likely(mnt && inode->i_op->permission2))
+ return inode->i_op->permission2(mnt, inode, mask);
if (likely(inode->i_op->permission))
return inode->i_op->permission(inode, mask);
@@ -382,7 +384,7 @@
* This does not check for a read-only file system. You probably want
* inode_permission().
*/
-int __inode_permission(struct inode *inode, int mask)
+int __inode_permission2(struct vfsmount *mnt, struct inode *inode, int mask)
{
int retval;
@@ -394,7 +396,7 @@
return -EACCES;
}
- retval = do_inode_permission(inode, mask);
+ retval = do_inode_permission(mnt, inode, mask);
if (retval)
return retval;
@@ -404,6 +406,13 @@
return security_inode_permission(inode, mask);
}
+EXPORT_SYMBOL(__inode_permission2);
+
+int __inode_permission(struct inode *inode, int mask)
+{
+ return __inode_permission2(NULL, inode, mask);
+}
+EXPORT_SYMBOL(__inode_permission);
/**
* sb_permission - Check superblock-level permissions
@@ -437,15 +446,22 @@
*
* When checking for MAY_APPEND, MAY_WRITE must also be set in @mask.
*/
-int inode_permission(struct inode *inode, int mask)
+int inode_permission2(struct vfsmount *mnt, struct inode *inode, int mask)
{
int retval;
retval = sb_permission(inode->i_sb, inode, mask);
if (retval)
return retval;
- return __inode_permission(inode, mask);
+ return __inode_permission2(mnt, inode, mask);
}
+EXPORT_SYMBOL(inode_permission2);
+
+int inode_permission(struct inode *inode, int mask)
+{
+ return inode_permission2(NULL, inode, mask);
+}
+EXPORT_SYMBOL(inode_permission);
/**
* path_get - get a reference to a path
@@ -1460,13 +1476,13 @@
static inline int may_lookup(struct nameidata *nd)
{
if (nd->flags & LOOKUP_RCU) {
- int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
+ int err = inode_permission2(nd->path.mnt, nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
if (err != -ECHILD)
return err;
if (unlazy_walk(nd, NULL))
return -ECHILD;
}
- return inode_permission(nd->inode, MAY_EXEC);
+ return inode_permission2(nd->path.mnt, nd->inode, MAY_EXEC);
}
static inline int handle_dots(struct nameidata *nd, int type)
@@ -1840,10 +1856,11 @@
nd->depth = 0;
if (flags & LOOKUP_ROOT) {
struct inode *inode = nd->root.dentry->d_inode;
+ struct vfsmount *mnt = nd->root.mnt;
if (*name) {
if (!can_lookup(inode))
return -ENOTDIR;
- retval = inode_permission(inode, MAY_EXEC);
+ retval = inode_permission2(mnt, inode, MAY_EXEC);
if (retval)
return retval;
}
@@ -2085,6 +2102,7 @@
/**
* lookup_one_len - filesystem helper to lookup single pathname component
* @name: pathname component to lookup
+ * @mnt: mount we are looking up on
* @base: base directory to lookup from
* @len: maximum length @len should be interpreted to
*
@@ -2093,7 +2111,7 @@
* nameidata argument is passed to the filesystem methods and a filesystem
* using this helper needs to be prepared for that.
*/
-struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
+struct dentry *lookup_one_len2(const char *name, struct vfsmount *mnt, struct dentry *base, int len)
{
struct qstr this;
unsigned int c;
@@ -2127,12 +2145,19 @@
return ERR_PTR(err);
}
- err = inode_permission(base->d_inode, MAY_EXEC);
+ err = inode_permission2(mnt, base->d_inode, MAY_EXEC);
if (err)
return ERR_PTR(err);
return __lookup_hash(&this, base, 0);
}
+EXPORT_SYMBOL(lookup_one_len2);
+
+struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
+{
+ return lookup_one_len2(name, NULL, base, len);
+}
+EXPORT_SYMBOL(lookup_one_len);
int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
struct path *path, int *empty)
@@ -2222,7 +2247,7 @@
* 10. We don't allow removal of NFS sillyrenamed files; it's handled by
* nfs_async_unlink().
*/
-static int may_delete(struct inode *dir,struct dentry *victim,int isdir)
+static int may_delete(struct vfsmount *mnt, struct inode *dir,struct dentry *victim,int isdir)
{
int error;
@@ -2232,7 +2257,7 @@
BUG_ON(victim->d_parent->d_inode != dir);
audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
- error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
+ error = inode_permission2(mnt, dir, MAY_WRITE | MAY_EXEC);
if (error)
return error;
if (IS_APPEND(dir))
@@ -2262,14 +2287,14 @@
* 3. We should have write and exec permissions on dir
* 4. We can't do it if dir is immutable (done in permission())
*/
-static inline int may_create(struct inode *dir, struct dentry *child)
+static inline int may_create(struct vfsmount *mnt, struct inode *dir, struct dentry *child)
{
audit_inode_child(dir, child, AUDIT_TYPE_CHILD_CREATE);
if (child->d_inode)
return -EEXIST;
if (IS_DEADDIR(dir))
return -ENOENT;
- return inode_permission(dir, MAY_WRITE | MAY_EXEC);
+ return inode_permission2(mnt, dir, MAY_WRITE | MAY_EXEC);
}
/*
@@ -2314,10 +2339,10 @@
}
}
-int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- bool want_excl)
+int vfs_create2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry,
+ umode_t mode, bool want_excl)
{
- int error = may_create(dir, dentry);
+ int error = may_create(mnt, dir, dentry);
if (error)
return error;
@@ -2338,10 +2363,19 @@
fsnotify_create(dir, dentry);
return error;
}
+EXPORT_SYMBOL(vfs_create2);
+
+int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ bool want_excl)
+{
+ return vfs_create2(NULL, dir, dentry, mode, want_excl);
+}
+EXPORT_SYMBOL(vfs_create);
static int may_open(struct path *path, int acc_mode, int flag)
{
struct dentry *dentry = path->dentry;
+ struct vfsmount *mnt = path->mnt;
struct inode *inode = dentry->d_inode;
int error;
@@ -2370,7 +2404,7 @@
break;
}
- error = inode_permission(inode, acc_mode);
+ error = inode_permission2(mnt, inode, acc_mode);
if (error)
return error;
@@ -2405,7 +2439,7 @@
if (!error)
error = security_path_truncate(path);
if (!error) {
- error = do_truncate(path->dentry, 0,
+ error = do_truncate2(path->mnt, path->dentry, 0,
ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
filp);
}
@@ -2426,7 +2460,7 @@
if (error)
return error;
- error = inode_permission(dir->dentry->d_inode, MAY_WRITE | MAY_EXEC);
+ error = inode_permission2(dir->mnt, dir->dentry->d_inode, MAY_WRITE | MAY_EXEC);
if (error)
return error;
@@ -2614,6 +2648,7 @@
bool got_write, int *opened)
{
struct dentry *dir = nd->path.dentry;
+ struct vfsmount *mnt = nd->path.mnt;
struct inode *dir_inode = dir->d_inode;
struct dentry *dentry;
int error;
@@ -2661,7 +2696,7 @@
error = security_path_mknod(&nd->path, dentry, mode, 0);
if (error)
goto out_dput;
- error = vfs_create(dir->d_inode, dentry, mode,
+ error = vfs_create2(mnt, dir->d_inode, dentry, mode,
nd->flags & LOOKUP_EXCL);
if (error)
goto out_dput;
@@ -3122,9 +3157,9 @@
}
EXPORT_SYMBOL(user_path_create);
-int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
+int vfs_mknod2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
- int error = may_create(dir, dentry);
+ int error = may_create(mnt, dir, dentry);
if (error)
return error;
@@ -3155,6 +3190,13 @@
return 0;
}
+EXPORT_SYMBOL(vfs_mknod2);
+
+int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
+{
+ return vfs_mknod2(NULL, dir, dentry, mode, dev);
+}
+EXPORT_SYMBOL(vfs_mknod);
static int may_mknod(umode_t mode)
{
@@ -3196,10 +3238,10 @@
goto out;
switch (mode & S_IFMT) {
case 0: case S_IFREG:
- error = vfs_create(path.dentry->d_inode,dentry,mode,true);
+ error = vfs_create2(path.mnt, path.dentry->d_inode,dentry,mode,true);
break;
case S_IFCHR: case S_IFBLK:
- error = vfs_mknod(path.dentry->d_inode,dentry,mode,
+ error = vfs_mknod2(path.mnt, path.dentry->d_inode,dentry,mode,
new_decode_dev(dev));
break;
case S_IFIFO: case S_IFSOCK:
@@ -3220,9 +3262,9 @@
return sys_mknodat(AT_FDCWD, filename, mode, dev);
}
-int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+int vfs_mkdir2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, umode_t mode)
{
- int error = may_create(dir, dentry);
+ int error = may_create(mnt, dir, dentry);
unsigned max_links = dir->i_sb->s_max_links;
if (error)
@@ -3244,6 +3286,13 @@
fsnotify_mkdir(dir, dentry);
return error;
}
+EXPORT_SYMBOL(vfs_mkdir2);
+
+int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+ return vfs_mkdir2(NULL, dir, dentry, mode);
+}
+EXPORT_SYMBOL(vfs_mkdir);
SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
{
@@ -3261,7 +3310,7 @@
mode &= ~current_umask();
error = security_path_mkdir(&path, dentry, mode);
if (!error)
- error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
+ error = vfs_mkdir2(path.mnt, path.dentry->d_inode, dentry, mode);
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
@@ -3299,9 +3348,9 @@
spin_unlock(&dentry->d_lock);
}
-int vfs_rmdir(struct inode *dir, struct dentry *dentry)
+int vfs_rmdir2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry)
{
- int error = may_delete(dir, dentry, 1);
+ int error = may_delete(mnt, dir, dentry, 1);
if (error)
return error;
@@ -3335,6 +3384,13 @@
d_delete(dentry);
return error;
}
+EXPORT_SYMBOL(vfs_rmdir2);
+
+int vfs_rmdir(struct inode *dir, struct dentry *dentry)
+{
+ return vfs_rmdir2(NULL, dir, dentry);
+}
+EXPORT_SYMBOL(vfs_rmdir);
static long do_rmdir(int dfd, const char __user *pathname)
{
@@ -3377,7 +3433,7 @@
error = security_path_rmdir(&nd.path, dentry);
if (error)
goto exit3;
- error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
+ error = vfs_rmdir2(nd.path.mnt, nd.path.dentry->d_inode, dentry);
exit3:
dput(dentry);
exit2:
@@ -3398,9 +3454,9 @@
return do_rmdir(AT_FDCWD, pathname);
}
-int vfs_unlink(struct inode *dir, struct dentry *dentry)
+int vfs_unlink2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry)
{
- int error = may_delete(dir, dentry, 0);
+ int error = may_delete(mnt, dir, dentry, 0);
if (error)
return error;
@@ -3429,6 +3485,13 @@
return error;
}
+EXPORT_SYMBOL(vfs_unlink2);
+
+int vfs_unlink(struct inode *dir, struct dentry *dentry)
+{
+ return vfs_unlink2(NULL, dir, dentry);
+}
+EXPORT_SYMBOL(vfs_unlink);
/*
* Make sure that the actual truncation of the file will occur outside its
@@ -3472,7 +3535,7 @@
error = security_path_unlink(&nd.path, dentry);
if (error)
goto exit2;
- error = vfs_unlink(nd.path.dentry->d_inode, dentry);
+ error = vfs_unlink2(nd.path.mnt, nd.path.dentry->d_inode, dentry);
exit2:
dput(dentry);
}
@@ -3512,9 +3575,9 @@
return do_unlinkat(AT_FDCWD, pathname);
}
-int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
+int vfs_symlink2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, const char *oldname)
{
- int error = may_create(dir, dentry);
+ int error = may_create(mnt, dir, dentry);
if (error)
return error;
@@ -3531,6 +3594,13 @@
fsnotify_create(dir, dentry);
return error;
}
+EXPORT_SYMBOL(vfs_symlink2);
+
+int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
+{
+ return vfs_symlink2(NULL, dir, dentry, oldname);
+}
+EXPORT_SYMBOL(vfs_symlink);
SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
int, newdfd, const char __user *, newname)
@@ -3552,7 +3622,7 @@
error = security_path_symlink(&path, dentry, from->name);
if (!error)
- error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
+ error = vfs_symlink2(path.mnt, path.dentry->d_inode, dentry, from->name);
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
@@ -3568,7 +3638,7 @@
return sys_symlinkat(oldname, AT_FDCWD, newname);
}
-int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry)
+int vfs_link2(struct vfsmount *mnt, struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry)
{
struct inode *inode = old_dentry->d_inode;
unsigned max_links = dir->i_sb->s_max_links;
@@ -3577,7 +3647,7 @@
if (!inode)
return -ENOENT;
- error = may_create(dir, new_dentry);
+ error = may_create(mnt, dir, new_dentry);
if (error)
return error;
@@ -3611,6 +3681,13 @@
fsnotify_link(dir, inode, new_dentry);
return error;
}
+EXPORT_SYMBOL(vfs_link2);
+
+int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry)
+{
+ return vfs_link2(NULL, old_dentry, dir, new_dentry);
+}
+EXPORT_SYMBOL(vfs_link);
/*
* Hardlinks are often used in delicate situations. We avoid
@@ -3664,7 +3741,7 @@
error = security_path_link(old_path.dentry, &new_path, new_dentry);
if (error)
goto out_dput;
- error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
+ error = vfs_link2(old_path.mnt, old_path.dentry, new_path.dentry->d_inode, new_dentry);
out_dput:
done_path_create(&new_path, new_dentry);
if (retry_estale(error, how)) {
@@ -3710,8 +3787,9 @@
* ->i_mutex on parents, which works but leads to some truly excessive
* locking].
*/
-static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry)
+static int vfs_rename_dir(struct vfsmount *mnt,
+ struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
{
int error = 0;
struct inode *target = new_dentry->d_inode;
@@ -3722,7 +3800,7 @@
* we'll need to flip '..'.
*/
if (new_dir != old_dir) {
- error = inode_permission(old_dentry->d_inode, MAY_WRITE);
+ error = inode_permission2(mnt, old_dentry->d_inode, MAY_WRITE);
if (error)
return error;
}
@@ -3797,7 +3875,8 @@
return error;
}
-int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+int vfs_rename2(struct vfsmount *mnt,
+ struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
int error;
@@ -3807,14 +3886,14 @@
if (old_dentry->d_inode == new_dentry->d_inode)
return 0;
- error = may_delete(old_dir, old_dentry, is_dir);
+ error = may_delete(mnt, old_dir, old_dentry, is_dir);
if (error)
return error;
if (!new_dentry->d_inode)
- error = may_create(new_dir, new_dentry);
+ error = may_create(mnt, new_dir, new_dentry);
else
- error = may_delete(new_dir, new_dentry, is_dir);
+ error = may_delete(mnt, new_dir, new_dentry, is_dir);
if (error)
return error;
@@ -3824,7 +3903,7 @@
old_name = fsnotify_oldname_init(old_dentry->d_name.name);
if (is_dir)
- error = vfs_rename_dir(old_dir,old_dentry,new_dir,new_dentry);
+ error = vfs_rename_dir(mnt, old_dir,old_dentry,new_dir,new_dentry);
else
error = vfs_rename_other(old_dir,old_dentry,new_dir,new_dentry);
if (!error)
@@ -3834,6 +3913,14 @@
return error;
}
+EXPORT_SYMBOL(vfs_rename2);
+
+int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+ return vfs_rename2(NULL, old_dir, old_dentry, new_dir, new_dentry);
+}
+EXPORT_SYMBOL(vfs_rename);
SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
int, newdfd, const char __user *, newname)
@@ -3916,7 +4003,7 @@
&newnd.path, new_dentry);
if (error)
goto exit5;
- error = vfs_rename(old_dir->d_inode, old_dentry,
+ error = vfs_rename2(oldnd.path.mnt, old_dir->d_inode, old_dentry,
new_dir->d_inode, new_dentry);
exit5:
dput(new_dentry);
@@ -4090,7 +4177,6 @@
EXPORT_SYMBOL(follow_up);
EXPORT_SYMBOL(get_write_access); /* nfsd */
EXPORT_SYMBOL(lock_rename);
-EXPORT_SYMBOL(lookup_one_len);
EXPORT_SYMBOL(page_follow_link_light);
EXPORT_SYMBOL(page_put_link);
EXPORT_SYMBOL(page_readlink);
@@ -4099,18 +4185,9 @@
EXPORT_SYMBOL(page_symlink_inode_operations);
EXPORT_SYMBOL(kern_path);
EXPORT_SYMBOL(vfs_path_lookup);
-EXPORT_SYMBOL(inode_permission);
EXPORT_SYMBOL(unlock_rename);
-EXPORT_SYMBOL(vfs_create);
EXPORT_SYMBOL(vfs_follow_link);
-EXPORT_SYMBOL(vfs_link);
-EXPORT_SYMBOL(vfs_mkdir);
-EXPORT_SYMBOL(vfs_mknod);
EXPORT_SYMBOL(generic_permission);
EXPORT_SYMBOL(vfs_readlink);
-EXPORT_SYMBOL(vfs_rename);
-EXPORT_SYMBOL(vfs_rmdir);
-EXPORT_SYMBOL(vfs_symlink);
-EXPORT_SYMBOL(vfs_unlink);
EXPORT_SYMBOL(dentry_unhash);
EXPORT_SYMBOL(generic_readlink);
diff --git a/fs/namespace.c b/fs/namespace.c
index d0244c8..b015932 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -541,6 +541,7 @@
static void free_vfsmnt(struct mount *mnt)
{
+ kfree(mnt->mnt.data);
kfree(mnt->mnt_devname);
mnt_free_id(mnt);
#ifdef CONFIG_SMP
@@ -784,11 +785,21 @@
if (!mnt)
return ERR_PTR(-ENOMEM);
+ mnt->mnt.data = NULL;
+ if (type->alloc_mnt_data) {
+ mnt->mnt.data = type->alloc_mnt_data();
+ if (!mnt->mnt.data) {
+ mnt_free_id(mnt);
+ free_vfsmnt(mnt);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
if (flags & MS_KERNMOUNT)
mnt->mnt.mnt_flags = MNT_INTERNAL;
- root = mount_fs(type, flags, name, data);
+ root = mount_fs(type, flags, name, &mnt->mnt, data);
if (IS_ERR(root)) {
+ kfree(mnt->mnt.data);
free_vfsmnt(mnt);
return ERR_CAST(root);
}
@@ -815,6 +826,14 @@
if (!mnt)
return ERR_PTR(-ENOMEM);
+ if (sb->s_op->clone_mnt_data) {
+ mnt->mnt.data = sb->s_op->clone_mnt_data(old->mnt.data);
+ if (!mnt->mnt.data) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+ }
+
if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE))
mnt->mnt_group_id = 0; /* not a peer of original */
else
@@ -826,7 +845,7 @@
goto out_free;
}
- mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD;
+ mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED);
/* Don't allow unprivileged users to change mount flags */
if (flag & CL_UNPRIVILEGED) {
mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
@@ -878,6 +897,7 @@
return mnt;
out_free:
+ kfree(mnt->mnt.data);
free_vfsmnt(mnt);
return ERR_PTR(err);
}
@@ -1578,16 +1598,14 @@
err = invent_group_ids(source_mnt, true);
if (err)
goto out;
- }
- err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
- if (err)
- goto out_cleanup_ids;
-
- br_write_lock(&vfsmount_lock);
-
- if (IS_MNT_SHARED(dest_mnt)) {
+ err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
+ br_write_lock(&vfsmount_lock);
+ if (err)
+ goto out_cleanup_ids;
for (p = source_mnt; p; p = next_mnt(p, source_mnt))
set_mnt_shared(p);
+ } else {
+ br_write_lock(&vfsmount_lock);
}
if (parent_path) {
detach_mnt(source_mnt, parent_path);
@@ -1607,8 +1625,12 @@
return 0;
out_cleanup_ids:
- if (IS_MNT_SHARED(dest_mnt))
- cleanup_group_ids(source_mnt, NULL);
+ while (!list_empty(&tree_list)) {
+ child = list_first_entry(&tree_list, struct mount, mnt_hash);
+ umount_tree(child, 0);
+ }
+ br_write_unlock(&vfsmount_lock);
+ cleanup_group_ids(source_mnt, NULL);
out:
return err;
}
@@ -1849,8 +1871,14 @@
err = change_mount_flags(path->mnt, flags);
else if (!capable(CAP_SYS_ADMIN))
err = -EPERM;
- else
- err = do_remount_sb(sb, flags, data, 0);
+ else {
+ err = do_remount_sb2(path->mnt, sb, flags, data, 0);
+ namespace_lock();
+ br_write_lock(&vfsmount_lock);
+ propagate_remount(mnt);
+ br_write_unlock(&vfsmount_lock);
+ namespace_unlock();
+ }
if (!err) {
br_write_lock(&vfsmount_lock);
mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
@@ -1975,7 +2003,7 @@
struct mount *parent;
int err;
- mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL);
+ mnt_flags &= ~MNT_INTERNAL_FLAGS;
mp = lock_mount(path);
if (IS_ERR(mp))
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 9be6b41..92d31fa 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -490,7 +490,7 @@
}
/* you can only watch an inode if you have read permissions on it */
- ret = inode_permission(path->dentry->d_inode, MAY_READ);
+ ret = inode_permission2(path->mnt, path->dentry->d_inode, MAY_READ);
if (ret)
path_put(path);
out:
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index fce2b85..941e59d 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -349,7 +349,7 @@
if (error)
return error;
/* you can only watch an inode if you have read permissions on it */
- error = inode_permission(path->dentry->d_inode, MAY_READ);
+ error = inode_permission2(path->mnt, path->dentry->d_inode, MAY_READ);
if (error)
path_put(path);
return error;
diff --git a/fs/open.c b/fs/open.c
index b1fd2c1..eec8b7f 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -34,8 +34,8 @@
#include "internal.h"
-int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
- struct file *filp)
+int do_truncate2(struct vfsmount *mnt, struct dentry *dentry, loff_t length,
+ unsigned int time_attrs, struct file *filp)
{
int ret;
struct iattr newattrs;
@@ -57,17 +57,24 @@
newattrs.ia_valid |= ret | ATTR_FORCE;
mutex_lock(&dentry->d_inode->i_mutex);
- ret = notify_change(dentry, &newattrs);
+ ret = notify_change2(mnt, dentry, &newattrs);
mutex_unlock(&dentry->d_inode->i_mutex);
return ret;
}
+int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
+ struct file *filp)
+{
+ return do_truncate2(NULL, dentry, length, time_attrs, filp);
+}
long vfs_truncate(struct path *path, loff_t length)
{
struct inode *inode;
+ struct vfsmount *mnt;
long error;
inode = path->dentry->d_inode;
+ mnt = path->mnt;
/* For directories it's -EISDIR, for other non-regulars - -EINVAL */
if (S_ISDIR(inode->i_mode))
@@ -79,7 +86,7 @@
if (error)
goto out;
- error = inode_permission(inode, MAY_WRITE);
+ error = inode_permission2(mnt, inode, MAY_WRITE);
if (error)
goto mnt_drop_write_and_out;
@@ -103,7 +110,7 @@
if (!error)
error = security_path_truncate(path);
if (!error)
- error = do_truncate(path->dentry, length, 0, NULL);
+ error = do_truncate2(mnt, path->dentry, length, 0, NULL);
put_write_and_out:
put_write_access(inode);
@@ -152,6 +159,7 @@
{
struct inode *inode;
struct dentry *dentry;
+ struct vfsmount *mnt;
struct fd f;
int error;
@@ -168,6 +176,7 @@
small = 0;
dentry = f.file->f_path.dentry;
+ mnt = f.file->f_path.mnt;
inode = dentry->d_inode;
error = -EINVAL;
if (!S_ISREG(inode->i_mode) || !(f.file->f_mode & FMODE_WRITE))
@@ -187,7 +196,7 @@
if (!error)
error = security_path_truncate(&f.file->f_path);
if (!error)
- error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
+ error = do_truncate2(mnt, dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
sb_end_write(inode->i_sb);
out_putf:
fdput(f);
@@ -302,6 +311,7 @@
struct cred *override_cred;
struct path path;
struct inode *inode;
+ struct vfsmount *mnt;
int res;
unsigned int lookup_flags = LOOKUP_FOLLOW;
@@ -332,6 +342,7 @@
goto out;
inode = path.dentry->d_inode;
+ mnt = path.mnt;
if ((mode & MAY_EXEC) && S_ISREG(inode->i_mode)) {
/*
@@ -343,7 +354,7 @@
goto out_path_release;
}
- res = inode_permission(inode, mode | MAY_ACCESS);
+ res = inode_permission2(mnt, inode, mode | MAY_ACCESS);
/* SuS v2 requires we report a read only fs too */
if (res || !(mode & S_IWOTH) || special_file(inode->i_mode))
goto out_path_release;
@@ -387,7 +398,7 @@
if (error)
goto out;
- error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR);
+ error = inode_permission2(path.mnt, path.dentry->d_inode, MAY_EXEC | MAY_CHDIR);
if (error)
goto dput_and_out;
@@ -407,6 +418,7 @@
{
struct fd f = fdget_raw(fd);
struct inode *inode;
+ struct vfsmount *mnt;
int error = -EBADF;
error = -EBADF;
@@ -414,12 +426,13 @@
goto out;
inode = file_inode(f.file);
+ mnt = f.file->f_path.mnt;
error = -ENOTDIR;
if (!S_ISDIR(inode->i_mode))
goto out_putf;
- error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
+ error = inode_permission2(mnt, inode, MAY_EXEC | MAY_CHDIR);
if (!error)
set_fs_pwd(current->fs, &f.file->f_path);
out_putf:
@@ -438,7 +451,7 @@
if (error)
goto out;
- error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR);
+ error = inode_permission2(path.mnt, path.dentry->d_inode, MAY_EXEC | MAY_CHDIR);
if (error)
goto dput_and_out;
@@ -476,7 +489,7 @@
goto out_unlock;
newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
- error = notify_change(path->dentry, &newattrs);
+ error = notify_change2(path->mnt, path->dentry, &newattrs);
out_unlock:
mutex_unlock(&inode->i_mutex);
mnt_drop_write(path->mnt);
@@ -550,7 +563,7 @@
mutex_lock(&inode->i_mutex);
error = security_path_chown(path, uid, gid);
if (!error)
- error = notify_change(path->dentry, &newattrs);
+ error = notify_change2(path->mnt, path->dentry, &newattrs);
mutex_unlock(&inode->i_mutex);
return error;
diff --git a/fs/pnode.c b/fs/pnode.c
index 9af0df1..e8d7d68 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -164,46 +164,94 @@
}
}
-/*
- * return the source mount to be used for cloning
- *
- * @dest the current destination mount
- * @last_dest the last seen destination mount
- * @last_src the last seen source mount
- * @type return CL_SLAVE if the new mount has to be
- * cloned as a slave.
- */
-static struct mount *get_source(struct mount *dest,
- struct mount *last_dest,
- struct mount *last_src,
- int *type)
+static struct mount *next_group(struct mount *m, struct mount *origin)
{
- struct mount *p_last_src = NULL;
- struct mount *p_last_dest = NULL;
-
- while (last_dest != dest->mnt_master) {
- p_last_dest = last_dest;
- p_last_src = last_src;
- last_dest = last_dest->mnt_master;
- last_src = last_src->mnt_master;
- }
-
- if (p_last_dest) {
- do {
- p_last_dest = next_peer(p_last_dest);
- } while (IS_MNT_NEW(p_last_dest));
- /* is that a peer of the earlier? */
- if (dest == p_last_dest) {
- *type = CL_MAKE_SHARED;
- return p_last_src;
+ while (1) {
+ while (1) {
+ struct mount *next;
+ if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
+ return first_slave(m);
+ next = next_peer(m);
+ if (m->mnt_group_id == origin->mnt_group_id) {
+ if (next == origin)
+ return NULL;
+ } else if (m->mnt_slave.next != &next->mnt_slave)
+ break;
+ m = next;
}
+ /* m is the last peer */
+ while (1) {
+ struct mount *master = m->mnt_master;
+ if (m->mnt_slave.next != &master->mnt_slave_list)
+ return next_slave(m);
+ m = next_peer(master);
+ if (master->mnt_group_id == origin->mnt_group_id)
+ break;
+ if (master->mnt_slave.next == &m->mnt_slave)
+ break;
+ m = master;
+ }
+ if (m == origin)
+ return NULL;
}
- /* slave of the earlier, then */
- *type = CL_SLAVE;
- /* beginning of peer group among the slaves? */
- if (IS_MNT_SHARED(dest))
- *type |= CL_MAKE_SHARED;
- return last_src;
+}
+
+/* all accesses are serialized by namespace_sem */
+static struct user_namespace *user_ns;
+static struct mount *last_dest, *last_source, *dest_master;
+static struct mountpoint *mp;
+static struct list_head *list;
+
+static int propagate_one(struct mount *m)
+{
+ struct mount *child;
+ int type;
+ /* skip ones added by this propagate_mnt() */
+ if (IS_MNT_NEW(m))
+ return 0;
+ /* skip if mountpoint isn't covered by it */
+ if (!is_subdir(mp->m_dentry, m->mnt.mnt_root))
+ return 0;
+ if (m->mnt_group_id == last_dest->mnt_group_id) {
+ type = CL_MAKE_SHARED;
+ } else {
+ struct mount *n, *p;
+ for (n = m; ; n = p) {
+ p = n->mnt_master;
+ if (p == dest_master || IS_MNT_MARKED(p)) {
+ while (last_dest->mnt_master != p) {
+ last_source = last_source->mnt_master;
+ last_dest = last_source->mnt_parent;
+ }
+ if (n->mnt_group_id != last_dest->mnt_group_id) {
+ last_source = last_source->mnt_master;
+ last_dest = last_source->mnt_parent;
+ }
+ break;
+ }
+ }
+ type = CL_SLAVE;
+ /* beginning of peer group among the slaves? */
+ if (IS_MNT_SHARED(m))
+ type |= CL_MAKE_SHARED;
+ }
+
+ /* Notice when we are propagating across user namespaces */
+ if (m->mnt_ns->user_ns != user_ns)
+ type |= CL_UNPRIVILEGED;
+ child = copy_tree(last_source, last_source->mnt.mnt_root, type);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+ mnt_set_mountpoint(m, mp, child);
+ last_dest = m;
+ last_source = child;
+ if (m->mnt_master != dest_master) {
+ br_write_lock(&vfsmount_lock);
+ SET_MNT_MARK(m->mnt_master);
+ br_write_unlock(&vfsmount_lock);
+ }
+ list_add_tail(&child->mnt_hash, list);
+ return 0;
}
/*
@@ -222,52 +270,46 @@
int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
struct mount *source_mnt, struct list_head *tree_list)
{
- struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
- struct mount *m, *child;
+ struct mount *m, *n;
int ret = 0;
- struct mount *prev_dest_mnt = dest_mnt;
- struct mount *prev_src_mnt = source_mnt;
- LIST_HEAD(tmp_list);
- for (m = propagation_next(dest_mnt, dest_mnt); m;
- m = propagation_next(m, dest_mnt)) {
- int type;
- struct mount *source;
+ /*
+ * we don't want to bother passing tons of arguments to
+ * propagate_one(); everything is serialized by namespace_sem,
+ * so globals will do just fine.
+ */
+ user_ns = current->nsproxy->mnt_ns->user_ns;
+ last_dest = dest_mnt;
+ last_source = source_mnt;
+ mp = dest_mp;
+ list = tree_list;
+ dest_master = dest_mnt->mnt_master;
- if (IS_MNT_NEW(m))
- continue;
-
- source = get_source(m, prev_dest_mnt, prev_src_mnt, &type);
-
- /* Notice when we are propagating across user namespaces */
- if (m->mnt_ns->user_ns != user_ns)
- type |= CL_UNPRIVILEGED;
-
- child = copy_tree(source, source->mnt.mnt_root, type);
- if (IS_ERR(child)) {
- ret = PTR_ERR(child);
- list_splice(tree_list, tmp_list.prev);
+ /* all peers of dest_mnt, except dest_mnt itself */
+ for (n = next_peer(dest_mnt); n != dest_mnt; n = next_peer(n)) {
+ ret = propagate_one(n);
+ if (ret)
goto out;
- }
+ }
- if (is_subdir(dest_mp->m_dentry, m->mnt.mnt_root)) {
- mnt_set_mountpoint(m, dest_mp, child);
- list_add_tail(&child->mnt_hash, tree_list);
- } else {
- /*
- * This can happen if the parent mount was bind mounted
- * on some subdirectory of a shared/slave mount.
- */
- list_add_tail(&child->mnt_hash, &tmp_list);
- }
- prev_dest_mnt = m;
- prev_src_mnt = child;
+ /* all slave groups */
+ for (m = next_group(dest_mnt, dest_mnt); m;
+ m = next_group(m, dest_mnt)) {
+ /* everything in that slave group */
+ n = m;
+ do {
+ ret = propagate_one(n);
+ if (ret)
+ goto out;
+ n = next_peer(n);
+ } while (n != m);
}
out:
br_write_lock(&vfsmount_lock);
- while (!list_empty(&tmp_list)) {
- child = list_first_entry(&tmp_list, struct mount, mnt_hash);
- umount_tree(child, 0);
+ list_for_each_entry(n, tree_list, mnt_hash) {
+ m = n->mnt_parent;
+ if (m->mnt_master != dest_mnt->mnt_master)
+ CLEAR_MNT_MARK(m->mnt_master);
}
br_write_unlock(&vfsmount_lock);
return ret;
@@ -359,3 +401,37 @@
__propagate_umount(mnt);
return 0;
}
+
+/*
+ * Iterates over all slaves, and slaves of slaves.
+ */
+static struct mount *next_descendent(struct mount *root, struct mount *cur)
+{
+ if (!IS_MNT_NEW(cur) && !list_empty(&cur->mnt_slave_list))
+ return first_slave(cur);
+ do {
+ struct mount *master = cur->mnt_master;
+
+ if (!master || cur->mnt_slave.next != &master->mnt_slave_list) {
+ struct mount *next = next_slave(cur);
+
+ return (next == root) ? NULL : next;
+ }
+ cur = master;
+ } while (cur != root);
+ return NULL;
+}
+
+void propagate_remount(struct mount *mnt)
+{
+ struct mount *m = mnt;
+ struct super_block *sb = mnt->mnt.mnt_sb;
+
+ if (sb->s_op->copy_mnt_data) {
+ m = next_descendent(mnt, m);
+ while (m) {
+ sb->s_op->copy_mnt_data(m->mnt.data, mnt->mnt.data);
+ m = next_descendent(mnt, m);
+ }
+ }
+}
diff --git a/fs/pnode.h b/fs/pnode.h
index b091445..e16e597 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -16,6 +16,9 @@
#define IS_MNT_NEW(m) (!(m)->mnt_ns)
#define CLEAR_MNT_SHARED(m) ((m)->mnt.mnt_flags &= ~MNT_SHARED)
#define IS_MNT_UNBINDABLE(m) ((m)->mnt.mnt_flags & MNT_UNBINDABLE)
+#define IS_MNT_MARKED(m) ((m)->mnt.mnt_flags & MNT_MARKED)
+#define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED)
+#define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED)
#define CL_EXPIRE 0x01
#define CL_SLAVE 0x02
@@ -36,6 +39,7 @@
struct list_head *);
int propagate_umount(struct list_head *);
int propagate_mount_busy(struct mount *, int);
+void propagate_remount(struct mount *);
void mnt_release_group_id(struct mount *);
int get_dominating_id(struct mount *mnt, const struct path *root);
unsigned int mnt_get_count(struct mount *mnt);
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 6ed9580..c1b1232 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -312,7 +312,8 @@
static inline void task_cap(struct seq_file *m, struct task_struct *p)
{
const struct cred *cred;
- kernel_cap_t cap_inheritable, cap_permitted, cap_effective, cap_bset;
+ kernel_cap_t cap_inheritable, cap_permitted, cap_effective,
+ cap_bset, cap_ambient;
rcu_read_lock();
cred = __task_cred(p);
@@ -320,12 +321,14 @@
cap_permitted = cred->cap_permitted;
cap_effective = cred->cap_effective;
cap_bset = cred->cap_bset;
+ cap_ambient = cred->cap_ambient;
rcu_read_unlock();
render_cap_t(m, "CapInh:\t", &cap_inheritable);
render_cap_t(m, "CapPrm:\t", &cap_permitted);
render_cap_t(m, "CapEff:\t", &cap_effective);
render_cap_t(m, "CapBnd:\t", &cap_bset);
+ render_cap_t(m, "CapAmb:\t", &cap_ambient);
}
static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 7ce45b9..8c2514a 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -3245,6 +3245,44 @@
}
/*
+ * proc_tid_comm_permission is a special permission function exclusively
+ * used for the node /proc/<pid>/task/<tid>/comm.
+ * It bypasses generic permission checks in the case where a task of the same
+ * task group attempts to access the node.
+ * The rational behind this is that glibc and bionic access this node for
+ * cross thread naming (pthread_set/getname_np(!self)). However, if
+ * PR_SET_DUMPABLE gets set to 0 this node among others becomes uid=0 gid=0,
+ * which locks out the cross thread naming implementation.
+ * This function makes sure that the node is always accessible for members of
+ * same thread group.
+ */
+static int proc_tid_comm_permission(struct inode *inode, int mask)
+{
+ bool is_same_tgroup;
+ struct task_struct *task;
+
+ task = get_proc_task(inode);
+ if (!task)
+ return -ESRCH;
+ is_same_tgroup = same_thread_group(current, task);
+ put_task_struct(task);
+
+ if (likely(is_same_tgroup && !(mask & MAY_EXEC))) {
+ /* This file (/proc/<pid>/task/<tid>/comm) can always be
+ * read or written by the members of the corresponding
+ * thread group.
+ */
+ return 0;
+ }
+
+ return generic_permission(inode, mask);
+}
+
+static const struct inode_operations proc_tid_comm_inode_operations = {
+ .permission = proc_tid_comm_permission,
+};
+
+/*
* Tasks
*/
static const struct pid_entry tid_base_stuff[] = {
@@ -3265,7 +3303,9 @@
#ifdef CONFIG_TASK_CPUFREQ_STATS
REG("cpufreq_stats", S_IRUGO|S_IWUSR, proc_pid_cpufreq_stats_operations),
#endif
- REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
+ NOD("comm", S_IFREG|S_IRUGO|S_IWUSR,
+ &proc_tid_comm_inode_operations,
+ &proc_pid_set_comm_operations, {}),
#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
INF("syscall", S_IRUGO, proc_pid_syscall),
#endif
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
index 5fe34c3..9243966 100644
--- a/fs/proc_namespace.c
+++ b/fs/proc_namespace.c
@@ -112,7 +112,9 @@
if (err)
goto out;
show_mnt_opts(m, mnt);
- if (sb->s_op->show_options)
+ if (sb->s_op->show_options2)
+ err = sb->s_op->show_options2(mnt, m, mnt_path.dentry);
+ else if (sb->s_op->show_options)
err = sb->s_op->show_options(m, mnt_path.dentry);
seq_puts(m, " 0 0\n");
out:
@@ -173,7 +175,9 @@
err = show_sb_opts(m, sb);
if (err)
goto out;
- if (sb->s_op->show_options)
+ if (sb->s_op->show_options2) {
+ err = sb->s_op->show_options2(mnt, m, mnt->mnt_root);
+ } else if (sb->s_op->show_options)
err = sb->s_op->show_options(m, mnt->mnt_root);
seq_putc(m, '\n');
out:
diff --git a/fs/sdcardfs/dentry.c b/fs/sdcardfs/dentry.c
index b2fa36f..c8f3bcb 100755
--- a/fs/sdcardfs/dentry.c
+++ b/fs/sdcardfs/dentry.c
@@ -34,6 +34,8 @@
struct dentry *parent_lower_dentry = NULL;
struct dentry *lower_cur_parent_dentry = NULL;
struct dentry *lower_dentry = NULL;
+ struct inode *inode;
+ struct sdcardfs_inode_data *data;
if (flags & LOOKUP_RCU)
return -ECHILD;
@@ -46,7 +48,8 @@
spin_unlock(&dentry->d_lock);
/* check uninitialized obb_dentry and
- * whether the base obbpath has been changed or not */
+ * whether the base obbpath has been changed or not
+ */
if (is_obbpath_invalid(dentry)) {
d_drop(dentry);
return 0;
@@ -59,6 +62,14 @@
lower_dentry = lower_path.dentry;
lower_cur_parent_dentry = dget_parent(lower_dentry);
+ if ((lower_dentry->d_flags & DCACHE_OP_REVALIDATE)) {
+ err = lower_dentry->d_op->d_revalidate(lower_dentry, flags);
+ if (err == 0) {
+ d_drop(dentry);
+ goto out;
+ }
+ }
+
spin_lock(&lower_dentry->d_lock);
if (d_unhashed(lower_dentry)) {
spin_unlock(&lower_dentry->d_lock);
@@ -76,17 +87,13 @@
if (dentry < lower_dentry) {
spin_lock(&dentry->d_lock);
- spin_lock(&lower_dentry->d_lock);
+ spin_lock_nested(&lower_dentry->d_lock, DENTRY_D_LOCK_NESTED);
} else {
spin_lock(&lower_dentry->d_lock);
- spin_lock(&dentry->d_lock);
+ spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
}
- if (dentry->d_name.len != lower_dentry->d_name.len) {
- __d_drop(dentry);
- err = 0;
- } else if (strncasecmp(dentry->d_name.name, lower_dentry->d_name.name,
- dentry->d_name.len) != 0) {
+ if (!qstr_case_eq(&dentry->d_name, &lower_dentry->d_name)) {
__d_drop(dentry);
err = 0;
}
@@ -98,6 +105,21 @@
spin_unlock(&dentry->d_lock);
spin_unlock(&lower_dentry->d_lock);
}
+ if (!err)
+ goto out;
+
+ /* If our top's inode is gone, we may be out of date */
+ inode = igrab(dentry->d_inode);
+ if (inode) {
+ data = top_data_get(SDCARDFS_I(inode));
+ if (!data || data->abandoned) {
+ d_drop(dentry);
+ err = 0;
+ }
+ if (data)
+ data_put(data);
+ iput(inode);
+ }
out:
dput(parent_dentry);
@@ -110,12 +132,10 @@
static void sdcardfs_d_release(struct dentry *dentry)
{
/* release and reset the lower paths */
- if(has_graft_path(dentry)) {
+ if (has_graft_path(dentry))
sdcardfs_put_reset_orig_path(dentry);
- }
sdcardfs_put_reset_lower_path(dentry);
free_dentry_private_data(dentry);
- return;
}
static int sdcardfs_hash_ci(const struct dentry *dentry,
@@ -132,12 +152,10 @@
unsigned long hash;
name = qstr->name;
- //len = vfat_striptail_len(qstr);
len = qstr->len;
hash = init_name_hash();
while (len--)
- //hash = partial_name_hash(nls_tolower(t, *name++), hash);
hash = partial_name_hash(tolower(*name++), hash);
qstr->hash = end_name_hash(hash);
@@ -152,35 +170,25 @@
const struct dentry *dentry, const struct inode *inode,
unsigned int len, const char *str, const struct qstr *name)
{
- /* This function is copy of vfat_cmpi */
- // FIXME Should we support national language?
- //struct nls_table *t = MSDOS_SB(parent->d_sb)->nls_io;
- //unsigned int alen, blen;
+ /* FIXME Should we support national language? */
- /* A filename cannot end in '.' or we treat it like it has none */
- /*
- alen = vfat_striptail_len(name);
- blen = __vfat_striptail_len(len, str);
- if (alen == blen) {
- if (nls_strnicmp(t, name->name, str, alen) == 0)
- return 0;
- }
- */
if (name->len == len) {
- if (strncasecmp(name->name, str, len) == 0)
+ if (str_n_case_eq(name->name, str, len))
return 0;
}
return 1;
}
-static void sdcardfs_canonical_path(const struct path *path, struct path *actual_path) {
+static void sdcardfs_canonical_path(const struct path *path,
+ struct path *actual_path)
+{
sdcardfs_get_real_lower(path->dentry, actual_path);
}
const struct dentry_operations sdcardfs_ci_dops = {
.d_revalidate = sdcardfs_d_revalidate,
.d_release = sdcardfs_d_release,
- .d_hash = sdcardfs_hash_ci,
+ .d_hash = sdcardfs_hash_ci,
.d_compare = sdcardfs_cmp_ci,
.d_canonical_path = sdcardfs_canonical_path,
};
diff --git a/fs/sdcardfs/derived_perm.c b/fs/sdcardfs/derived_perm.c
index 9de45bc..8a42e01 100755
--- a/fs/sdcardfs/derived_perm.c
+++ b/fs/sdcardfs/derived_perm.c
@@ -26,100 +26,295 @@
struct sdcardfs_inode_info *pi = SDCARDFS_I(parent);
struct sdcardfs_inode_info *ci = SDCARDFS_I(child);
- ci->perm = PERM_INHERIT;
- ci->userid = pi->userid;
- ci->d_uid = pi->d_uid;
- ci->under_android = pi->under_android;
+ ci->data->perm = PERM_INHERIT;
+ ci->data->userid = pi->data->userid;
+ ci->data->d_uid = pi->data->d_uid;
+ ci->data->under_android = pi->data->under_android;
+ ci->data->under_cache = pi->data->under_cache;
+ ci->data->under_obb = pi->data->under_obb;
+ set_top(ci, pi->top_data);
}
/* helper function for derived state */
-void setup_derived_state(struct inode *inode, perm_t perm,
- userid_t userid, uid_t uid, bool under_android)
+void setup_derived_state(struct inode *inode, perm_t perm, userid_t userid,
+ uid_t uid, bool under_android,
+ struct sdcardfs_inode_data *top)
{
struct sdcardfs_inode_info *info = SDCARDFS_I(inode);
- info->perm = perm;
- info->userid = userid;
- info->d_uid = uid;
- info->under_android = under_android;
+ info->data->perm = perm;
+ info->data->userid = userid;
+ info->data->d_uid = uid;
+ info->data->under_android = under_android;
+ info->data->under_cache = false;
+ info->data->under_obb = false;
+ set_top(info, top);
}
-/* While renaming, there is a point where we want the path from dentry, but the name from newdentry */
-void get_derived_permission_new(struct dentry *parent, struct dentry *dentry, struct dentry *newdentry)
+/* While renaming, there is a point where we want the path from dentry,
+ * but the name from newdentry
+ */
+void get_derived_permission_new(struct dentry *parent, struct dentry *dentry,
+ const struct qstr *name)
{
- struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
struct sdcardfs_inode_info *info = SDCARDFS_I(dentry->d_inode);
- struct sdcardfs_inode_info *parent_info= SDCARDFS_I(parent->d_inode);
+ struct sdcardfs_inode_data *parent_data =
+ SDCARDFS_I(parent->d_inode)->data;
appid_t appid;
+ unsigned long user_num;
+ int err;
+ struct qstr q_Android = QSTR_LITERAL("Android");
+ struct qstr q_data = QSTR_LITERAL("data");
+ struct qstr q_obb = QSTR_LITERAL("obb");
+ struct qstr q_media = QSTR_LITERAL("media");
+ struct qstr q_cache = QSTR_LITERAL("cache");
/* By default, each inode inherits from its parent.
* the properties are maintained on its private fields
* because the inode attributes will be modified with that of
* its lower inode.
- * The derived state will be updated on the last
- * stage of each system call by fix_derived_permission(inode).
+ * These values are used by our custom permission call instead
+ * of using the inode permissions.
*/
inherit_derived_state(parent->d_inode, dentry->d_inode);
+ /* Files don't get special labels */
+ if (!S_ISDIR(dentry->d_inode->i_mode))
+ return;
/* Derive custom permissions based on parent and current node */
- switch (parent_info->perm) {
- case PERM_INHERIT:
- /* Already inherited above */
- break;
- case PERM_PRE_ROOT:
- /* Legacy internal layout places users at top level */
- info->perm = PERM_ROOT;
- info->userid = simple_strtoul(newdentry->d_name.name, NULL, 10);
- break;
- case PERM_ROOT:
- /* Assume masked off by default. */
- if (!strcasecmp(newdentry->d_name.name, "Android")) {
- /* App-specific directories inside; let anyone traverse */
- info->perm = PERM_ANDROID;
- info->under_android = true;
- }
- break;
- case PERM_ANDROID:
- if (!strcasecmp(newdentry->d_name.name, "data")) {
- /* App-specific directories inside; let anyone traverse */
- info->perm = PERM_ANDROID_DATA;
- } else if (!strcasecmp(newdentry->d_name.name, "obb")) {
- /* App-specific directories inside; let anyone traverse */
- info->perm = PERM_ANDROID_OBB;
- /* Single OBB directory is always shared */
- } else if (!strcasecmp(newdentry->d_name.name, "media")) {
- /* App-specific directories inside; let anyone traverse */
- info->perm = PERM_ANDROID_MEDIA;
- }
- break;
- case PERM_ANDROID_DATA:
- case PERM_ANDROID_OBB:
- case PERM_ANDROID_MEDIA:
- appid = get_appid(sbi->pkgl_id, newdentry->d_name.name);
- if (appid != 0) {
- info->d_uid = multiuser_get_uid(parent_info->userid, appid);
- }
- break;
+ switch (parent_data->perm) {
+ case PERM_INHERIT:
+ case PERM_ANDROID_PACKAGE_CACHE:
+ /* Already inherited above */
+ break;
+ case PERM_PRE_ROOT:
+ /* Legacy internal layout places users at top level */
+ info->data->perm = PERM_ROOT;
+ err = kstrtoul(name->name, 10, &user_num);
+ if (err)
+ info->data->userid = 0;
+ else
+ info->data->userid = user_num;
+ set_top(info, info->data);
+ break;
+ case PERM_ROOT:
+ /* Assume masked off by default. */
+ if (qstr_case_eq(name, &q_Android)) {
+ /* App-specific directories inside; let anyone traverse */
+ info->data->perm = PERM_ANDROID;
+ info->data->under_android = true;
+ set_top(info, info->data);
+ }
+ break;
+ case PERM_ANDROID:
+ if (qstr_case_eq(name, &q_data)) {
+ /* App-specific directories inside; let anyone traverse */
+ info->data->perm = PERM_ANDROID_DATA;
+ set_top(info, info->data);
+ } else if (qstr_case_eq(name, &q_obb)) {
+ /* App-specific directories inside; let anyone traverse */
+ info->data->perm = PERM_ANDROID_OBB;
+ info->data->under_obb = true;
+ set_top(info, info->data);
+ /* Single OBB directory is always shared */
+ } else if (qstr_case_eq(name, &q_media)) {
+ /* App-specific directories inside; let anyone traverse */
+ info->data->perm = PERM_ANDROID_MEDIA;
+ set_top(info, info->data);
+ }
+ break;
+ case PERM_ANDROID_OBB:
+ case PERM_ANDROID_DATA:
+ case PERM_ANDROID_MEDIA:
+ info->data->perm = PERM_ANDROID_PACKAGE;
+ appid = get_appid(name->name);
+ if (appid != 0 && !is_excluded(name->name, parent_data->userid))
+ info->data->d_uid =
+ multiuser_get_uid(parent_data->userid, appid);
+ set_top(info, info->data);
+ break;
+ case PERM_ANDROID_PACKAGE:
+ if (qstr_case_eq(name, &q_cache)) {
+ info->data->perm = PERM_ANDROID_PACKAGE_CACHE;
+ info->data->under_cache = true;
+ }
+ break;
}
}
void get_derived_permission(struct dentry *parent, struct dentry *dentry)
{
- get_derived_permission_new(parent, dentry, dentry);
+ get_derived_permission_new(parent, dentry, &dentry->d_name);
}
-void get_derive_permissions_recursive(struct dentry *parent) {
- struct dentry *dentry;
- list_for_each_entry(dentry, &parent->d_subdirs, d_u.d_child) {
- if (dentry && dentry->d_inode) {
- mutex_lock(&dentry->d_inode->i_mutex);
- get_derived_permission(parent, dentry);
- fix_derived_permission(dentry->d_inode);
- get_derive_permissions_recursive(dentry);
- mutex_unlock(&dentry->d_inode->i_mutex);
+static appid_t get_type(const char *name)
+{
+ const char *ext = strrchr(name, '.');
+ appid_t id;
+
+ if (ext && ext[0]) {
+ ext = &ext[1];
+ id = get_ext_gid(ext);
+ return id?:AID_MEDIA_RW;
+ }
+ return AID_MEDIA_RW;
+}
+
+void fixup_lower_ownership(struct dentry *dentry, const char *name)
+{
+ struct path path;
+ struct inode *inode;
+ int error;
+ struct sdcardfs_inode_info *info;
+ struct sdcardfs_inode_data *info_d;
+ struct sdcardfs_inode_data *info_top;
+ perm_t perm;
+ struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
+ uid_t uid = sbi->options.fs_low_uid;
+ gid_t gid = sbi->options.fs_low_gid;
+ struct iattr newattrs;
+
+ info = SDCARDFS_I(dentry->d_inode);
+ info_d = info->data;
+ perm = info_d->perm;
+ if (info_d->under_obb) {
+ perm = PERM_ANDROID_OBB;
+ } else if (info_d->under_cache) {
+ perm = PERM_ANDROID_PACKAGE_CACHE;
+ } else if (perm == PERM_INHERIT) {
+ info_top = top_data_get(info);
+ perm = info_top->perm;
+ data_put(info_top);
+ }
+
+ switch (perm) {
+ case PERM_ROOT:
+ case PERM_ANDROID:
+ case PERM_ANDROID_DATA:
+ case PERM_ANDROID_MEDIA:
+ case PERM_ANDROID_PACKAGE:
+ case PERM_ANDROID_PACKAGE_CACHE:
+ uid = multiuser_get_uid(info_d->userid, uid);
+ break;
+ case PERM_ANDROID_OBB:
+ uid = AID_MEDIA_OBB;
+ break;
+ case PERM_PRE_ROOT:
+ default:
+ break;
+ }
+ switch (perm) {
+ case PERM_ROOT:
+ case PERM_ANDROID:
+ case PERM_ANDROID_DATA:
+ case PERM_ANDROID_MEDIA:
+ if (S_ISDIR(dentry->d_inode->i_mode))
+ gid = multiuser_get_uid(info_d->userid, AID_MEDIA_RW);
+ else
+ gid = multiuser_get_uid(info_d->userid, get_type(name));
+ break;
+ case PERM_ANDROID_OBB:
+ gid = AID_MEDIA_OBB;
+ break;
+ case PERM_ANDROID_PACKAGE:
+ if (uid_is_app(info_d->d_uid))
+ gid = multiuser_get_ext_gid(info_d->d_uid);
+ else
+ gid = multiuser_get_uid(info_d->userid, AID_MEDIA_RW);
+ break;
+ case PERM_ANDROID_PACKAGE_CACHE:
+ if (uid_is_app(info_d->d_uid))
+ gid = multiuser_get_ext_cache_gid(info_d->d_uid);
+ else
+ gid = multiuser_get_uid(info_d->userid, AID_MEDIA_RW);
+ break;
+ case PERM_PRE_ROOT:
+ default:
+ break;
+ }
+
+ sdcardfs_get_lower_path(dentry, &path);
+ inode = path.dentry->d_inode;
+ if (path.dentry->d_inode->i_gid != gid || path.dentry->d_inode->i_uid != uid) {
+ newattrs.ia_valid = ATTR_GID | ATTR_UID | ATTR_FORCE;
+ newattrs.ia_uid = make_kuid(current_user_ns(), uid);
+ newattrs.ia_gid = make_kgid(current_user_ns(), gid);
+ if (!S_ISDIR(inode->i_mode))
+ newattrs.ia_valid |=
+ ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
+ mutex_lock(&inode->i_mutex);
+ error = security_path_chown(&path, newattrs.ia_uid, newattrs.ia_gid);
+ if (!error)
+ error = notify_change2(path.mnt, path.dentry, &newattrs);
+ mutex_unlock(&inode->i_mutex);
+ if (error)
+ pr_debug("sdcardfs: Failed to touch up lower fs gid/uid for %s\n", name);
+ }
+ sdcardfs_put_lower_path(dentry, &path);
+}
+
+static int descendant_may_need_fixup(struct sdcardfs_inode_data *data,
+ struct limit_search *limit)
+{
+ if (data->perm == PERM_ROOT)
+ return (limit->flags & BY_USERID) ?
+ data->userid == limit->userid : 1;
+ if (data->perm == PERM_PRE_ROOT || data->perm == PERM_ANDROID)
+ return 1;
+ return 0;
+}
+
+static int needs_fixup(perm_t perm)
+{
+ if (perm == PERM_ANDROID_DATA || perm == PERM_ANDROID_OBB
+ || perm == PERM_ANDROID_MEDIA)
+ return 1;
+ return 0;
+}
+
+static void __fixup_perms_recursive(struct dentry *dentry, struct limit_search *limit, int depth)
+{
+ struct dentry *child;
+ struct sdcardfs_inode_info *info;
+
+ /*
+ * All paths will terminate their recursion on hitting PERM_ANDROID_OBB,
+ * PERM_ANDROID_MEDIA, or PERM_ANDROID_DATA. This happens at a depth of
+ * at most 3.
+ */
+ WARN(depth > 3, "%s: Max expected depth exceeded!\n", __func__);
+ spin_lock_nested(&dentry->d_lock, depth);
+ if (!dentry->d_inode) {
+ spin_unlock(&dentry->d_lock);
+ return;
+ }
+ info = SDCARDFS_I(dentry->d_inode);
+
+ if (needs_fixup(info->data->perm)) {
+ list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child) {
+ spin_lock_nested(&child->d_lock, depth + 1);
+ if (!(limit->flags & BY_NAME) || qstr_case_eq(&child->d_name, &limit->name)) {
+ if (child->d_inode) {
+ get_derived_permission(dentry, child);
+ fixup_tmp_permissions(child->d_inode);
+ spin_unlock(&child->d_lock);
+ break;
+ }
+ }
+ spin_unlock(&child->d_lock);
+ }
+ } else if (descendant_may_need_fixup(info->data, limit)) {
+ list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child) {
+ __fixup_perms_recursive(child, limit, depth + 1);
}
}
+ spin_unlock(&dentry->d_lock);
+}
+
+void fixup_perms_recursive(struct dentry *dentry, struct limit_search *limit)
+{
+ __fixup_perms_recursive(dentry, limit, 0);
}
/* main function for updating derived permission */
@@ -127,41 +322,38 @@
{
struct dentry *parent;
- if(!dentry || !dentry->d_inode) {
- printk(KERN_ERR "sdcardfs: %s: invalid dentry\n", __func__);
+ if (!dentry || !dentry->d_inode) {
+ pr_err("sdcardfs: %s: invalid dentry\n", __func__);
return;
}
/* FIXME:
* 1. need to check whether the dentry is updated or not
* 2. remove the root dentry update
*/
- mutex_lock(&dentry->d_inode->i_mutex);
- if(IS_ROOT(dentry)) {
- //setup_default_pre_root_state(dentry->d_inode);
- } else {
+ if (!IS_ROOT(dentry)) {
parent = dget_parent(dentry);
- if(parent) {
+ if (parent) {
get_derived_permission(parent, dentry);
dput(parent);
}
}
- fix_derived_permission(dentry->d_inode);
- mutex_unlock(&dentry->d_inode->i_mutex);
+ fixup_tmp_permissions(dentry->d_inode);
}
int need_graft_path(struct dentry *dentry)
{
int ret = 0;
struct dentry *parent = dget_parent(dentry);
- struct sdcardfs_inode_info *parent_info= SDCARDFS_I(parent->d_inode);
+ struct sdcardfs_inode_info *parent_info = SDCARDFS_I(parent->d_inode);
struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
+ struct qstr obb = QSTR_LITERAL("obb");
- if(parent_info->perm == PERM_ANDROID &&
- !strcasecmp(dentry->d_name.name, "obb")) {
+ if (parent_info->data->perm == PERM_ANDROID &&
+ qstr_case_eq(&dentry->d_name, &obb)) {
/* /Android/obb is the base obbpath of DERIVED_UNIFIED */
- if(!(sbi->options.multiuser == false
- && parent_info->userid == 0)) {
+ if (!(sbi->options.multiuser == false
+ && parent_info->data->userid == 0)) {
ret = 1;
}
}
@@ -175,36 +367,40 @@
struct sdcardfs_dentry_info *di = SDCARDFS_D(dent);
struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dent->d_sb);
char *path_buf, *obbpath_s;
+ int need_put = 0;
+ struct path lower_path;
/* check the base obbpath has been changed.
* this routine can check an uninitialized obb dentry as well.
- * regarding the uninitialized obb, refer to the sdcardfs_mkdir() */
+ * regarding the uninitialized obb, refer to the sdcardfs_mkdir()
+ */
spin_lock(&di->lock);
- if(di->orig_path.dentry) {
- if(!di->lower_path.dentry) {
+ if (di->orig_path.dentry) {
+ if (!di->lower_path.dentry) {
ret = 1;
} else {
path_get(&di->lower_path);
- //lower_parent = lock_parent(lower_path->dentry);
path_buf = kmalloc(PATH_MAX, GFP_ATOMIC);
- if(!path_buf) {
+ if (!path_buf) {
ret = 1;
- printk(KERN_ERR "sdcardfs: fail to allocate path_buf in %s.\n", __func__);
+ pr_err("sdcardfs: fail to allocate path_buf in %s.\n", __func__);
} else {
obbpath_s = d_path(&di->lower_path, path_buf, PATH_MAX);
if (d_unhashed(di->lower_path.dentry) ||
- strcasecmp(sbi->obbpath_s, obbpath_s)) {
+ !str_case_eq(sbi->obbpath_s, obbpath_s)) {
ret = 1;
}
kfree(path_buf);
}
- //unlock_dir(lower_parent);
- path_put(&di->lower_path);
+ pathcpy(&lower_path, &di->lower_path);
+ need_put = 1;
}
}
spin_unlock(&di->lock);
+ if (need_put)
+ path_put(&lower_path);
return ret;
}
@@ -212,17 +408,18 @@
{
int ret = 0;
struct dentry *parent = dget_parent(dentry);
- struct sdcardfs_inode_info *parent_info= SDCARDFS_I(parent->d_inode);
+ struct sdcardfs_inode_info *parent_info = SDCARDFS_I(parent->d_inode);
struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
+ struct qstr q_obb = QSTR_LITERAL("obb");
spin_lock(&SDCARDFS_D(dentry)->lock);
if (sbi->options.multiuser) {
- if(parent_info->perm == PERM_PRE_ROOT &&
- !strcasecmp(dentry->d_name.name, "obb")) {
+ if (parent_info->data->perm == PERM_PRE_ROOT &&
+ qstr_case_eq(&dentry->d_name, &q_obb)) {
ret = 1;
}
- } else if (parent_info->perm == PERM_ANDROID &&
- !strcasecmp(dentry->d_name.name, "obb")) {
+ } else if (parent_info->data->perm == PERM_ANDROID &&
+ qstr_case_eq(&dentry->d_name, &q_obb)) {
ret = 1;
}
spin_unlock(&SDCARDFS_D(dentry)->lock);
@@ -232,7 +429,8 @@
/* The lower_path will be stored to the dentry's orig_path
* and the base obbpath will be copyed to the lower_path variable.
* if an error returned, there's no change in the lower_path
- * returns: -ERRNO if error (0: no error) */
+ * returns: -ERRNO if error (0: no error)
+ */
int setup_obb_dentry(struct dentry *dentry, struct path *lower_path)
{
int err = 0;
@@ -241,23 +439,24 @@
/* A local obb dentry must have its own orig_path to support rmdir
* and mkdir of itself. Usually, we expect that the sbi->obbpath
- * is avaiable on this stage. */
+ * is avaiable on this stage.
+ */
sdcardfs_set_orig_path(dentry, lower_path);
err = kern_path(sbi->obbpath_s,
LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &obbpath);
- if(!err) {
+ if (!err) {
/* the obbpath base has been found */
- printk(KERN_INFO "sdcardfs: the sbi->obbpath is found\n");
pathcpy(lower_path, &obbpath);
} else {
/* if the sbi->obbpath is not available, we can optionally
* setup the lower_path with its orig_path.
* but, the current implementation just returns an error
* because the sdcard daemon also regards this case as
- * a lookup fail. */
- printk(KERN_INFO "sdcardfs: the sbi->obbpath is not available\n");
+ * a lookup fail.
+ */
+ pr_info("sdcardfs: the sbi->obbpath is not available\n");
}
return err;
}
diff --git a/fs/sdcardfs/file.c b/fs/sdcardfs/file.c
index 369051e..8943bef 100755
--- a/fs/sdcardfs/file.c
+++ b/fs/sdcardfs/file.c
@@ -65,7 +65,7 @@
/* check disk space */
if (!check_min_free_space(dentry, count, 0)) {
- printk(KERN_INFO "No minimum free space.\n");
+ pr_err("No minimum free space.\n");
return -ENOSPC;
}
@@ -113,6 +113,10 @@
if (lower_file->f_op->unlocked_ioctl)
err = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
+ /* some ioctls can change inode attributes (EXT2_IOC_SETFLAGS) */
+ if (!err)
+ sdcardfs_copy_and_fix_attrs(file_inode(file),
+ file_inode(lower_file));
out:
return err;
}
@@ -160,8 +164,7 @@
lower_file = sdcardfs_lower_file(file);
if (willwrite && !lower_file->f_mapping->a_ops->writepage) {
err = -EINVAL;
- printk(KERN_ERR "sdcardfs: lower file system does not "
- "support writeable mmap\n");
+ pr_err("sdcardfs: lower file system does not support writeable mmap\n");
goto out;
}
@@ -173,16 +176,10 @@
if (!SDCARDFS_F(file)->lower_vm_ops) {
err = lower_file->f_op->mmap(lower_file, vma);
if (err) {
- printk(KERN_ERR "sdcardfs: lower mmap failed %d\n", err);
+ pr_err("sdcardfs: lower mmap failed %d\n", err);
goto out;
}
saved_vm_ops = vma->vm_ops; /* save: came from lower ->mmap */
- err = do_munmap(current->mm, vma->vm_start,
- vma->vm_end - vma->vm_start);
- if (err) {
- printk(KERN_ERR "sdcardfs: do_munmap failed %d\n", err);
- goto out;
- }
}
/*
@@ -195,6 +192,9 @@
file->f_mapping->a_ops = &sdcardfs_aops; /* set our aops */
if (!SDCARDFS_F(file)->lower_vm_ops) /* save for our ->fault */
SDCARDFS_F(file)->lower_vm_ops = saved_vm_ops;
+ vma->vm_private_data = file;
+ get_file(lower_file);
+ vma->vm_file = lower_file;
out:
return err;
@@ -216,16 +216,13 @@
goto out_err;
}
- if(!check_caller_access_to_name(parent->d_inode, dentry->d_name.name)) {
- printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
- " dentry: %s, task:%s\n",
- __func__, dentry->d_name.name, current->comm);
+ if (!check_caller_access_to_name(parent->d_inode, &dentry->d_name)) {
err = -EACCES;
goto out_err;
}
/* save current_cred and override it */
- OVERRIDE_CRED(sbi, saved_cred);
+ OVERRIDE_CRED(sbi, saved_cred, SDCARDFS_I(inode));
file->private_data =
kzalloc(sizeof(struct sdcardfs_file_info), GFP_KERNEL);
@@ -251,9 +248,8 @@
if (err)
kfree(SDCARDFS_F(file));
- else {
+ else
sdcardfs_copy_and_fix_attrs(inode, sdcardfs_lower_inode(inode));
- }
out_revert_cred:
REVERT_CRED(saved_cred);
@@ -321,6 +317,29 @@
return err;
}
+/*
+ * Sdcardfs cannot use generic_file_llseek as ->llseek, because it would
+ * only set the offset of the upper file. So we have to implement our
+ * own method to set both the upper and lower file offsets
+ * consistently.
+ */
+static loff_t sdcardfs_file_llseek(struct file *file, loff_t offset, int whence)
+{
+ int err;
+ struct file *lower_file;
+
+ err = generic_file_llseek(file, offset, whence);
+ if (err < 0)
+ goto out;
+
+ lower_file = sdcardfs_lower_file(file);
+ err = generic_file_llseek(lower_file, offset, whence);
+
+out:
+ return err;
+}
+
+
const struct file_operations sdcardfs_main_fops = {
.llseek = generic_file_llseek,
.read = sdcardfs_read,
@@ -339,7 +358,7 @@
/* trimmed directory options */
const struct file_operations sdcardfs_dir_fops = {
- .llseek = generic_file_llseek,
+ .llseek = sdcardfs_file_llseek,
.read = generic_read_dir,
.iterate = sdcardfs_readdir,
.unlocked_ioctl = sdcardfs_unlocked_ioctl,
diff --git a/fs/sdcardfs/inode.c b/fs/sdcardfs/inode.c
index 0f4aa17..f4d6c08 100755
--- a/fs/sdcardfs/inode.c
+++ b/fs/sdcardfs/inode.c
@@ -19,18 +19,26 @@
*/
#include "sdcardfs.h"
+#include <linux/fs_struct.h>
+#include <linux/ratelimit.h>
/* Do not directly use this function. Use OVERRIDE_CRED() instead. */
-const struct cred * override_fsids(struct sdcardfs_sb_info* sbi)
+const struct cred *override_fsids(struct sdcardfs_sb_info *sbi,
+ struct sdcardfs_inode_data *data)
{
- struct cred * cred;
- const struct cred * old_cred;
+ struct cred *cred;
+ const struct cred *old_cred;
+ uid_t uid;
cred = prepare_creds();
if (!cred)
return NULL;
- cred->fsuid = sbi->options.fs_low_uid;
+ if (data->under_obb)
+ uid = AID_MEDIA_OBB;
+ else
+ uid = multiuser_get_uid(data->userid, sbi->options.fs_low_uid);
+ cred->fsuid = uid;
cred->fsgid = sbi->options.fs_low_gid;
old_cred = override_creds(cred);
@@ -39,9 +47,9 @@
}
/* Do not directly use this function, use REVERT_CRED() instead. */
-void revert_fsids(const struct cred * old_cred)
+void revert_fsids(const struct cred *old_cred)
{
- const struct cred * cur_cred;
+ const struct cred *cur_cred;
cur_cred = current->cred;
revert_creds(old_cred);
@@ -53,38 +61,54 @@
{
int err = 0;
struct dentry *lower_dentry;
+ struct vfsmount *lower_dentry_mnt;
struct dentry *lower_parent_dentry = NULL;
struct path lower_path;
const struct cred *saved_cred = NULL;
+ struct fs_struct *saved_fs;
+ struct fs_struct *copied_fs;
- if(!check_caller_access_to_name(dir, dentry->d_name.name)) {
- printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
- " dentry: %s, task:%s\n",
- __func__, dentry->d_name.name, current->comm);
+ if (!check_caller_access_to_name(dir, &dentry->d_name)) {
err = -EACCES;
goto out_eacces;
}
/* save current_cred and override it */
- OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred);
+ OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
sdcardfs_get_lower_path(dentry, &lower_path);
lower_dentry = lower_path.dentry;
+ lower_dentry_mnt = lower_path.mnt;
lower_parent_dentry = lock_parent(lower_dentry);
/* set last 16bytes of mode field to 0664 */
mode = (mode & S_IFMT) | 00664;
- err = vfs_create(lower_parent_dentry->d_inode, lower_dentry, mode, want_excl);
+
+ /* temporarily change umask for lower fs write */
+ saved_fs = current->fs;
+ copied_fs = copy_fs_struct(current->fs);
+ if (!copied_fs) {
+ err = -ENOMEM;
+ goto out_unlock;
+ }
+ current->fs = copied_fs;
+ current->fs->umask = 0;
+ err = vfs_create2(lower_dentry_mnt, lower_parent_dentry->d_inode, lower_dentry, mode, want_excl);
if (err)
goto out;
- err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path, SDCARDFS_I(dir)->userid);
+ err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path,
+ SDCARDFS_I(dir)->data->userid);
if (err)
goto out;
fsstack_copy_attr_times(dir, sdcardfs_lower_inode(dir));
fsstack_copy_inode_size(dir, lower_parent_dentry->d_inode);
+ fixup_lower_ownership(dentry, dentry->d_name.name);
out:
+ current->fs = saved_fs;
+ free_fs_struct(copied_fs);
+out_unlock:
unlock_dir(lower_parent_dentry);
sdcardfs_put_lower_path(dentry, &lower_path);
REVERT_CRED(saved_cred);
@@ -138,28 +162,27 @@
{
int err;
struct dentry *lower_dentry;
+ struct vfsmount *lower_mnt;
struct inode *lower_dir_inode = sdcardfs_lower_inode(dir);
struct dentry *lower_dir_dentry;
struct path lower_path;
const struct cred *saved_cred = NULL;
- if(!check_caller_access_to_name(dir, dentry->d_name.name)) {
- printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
- " dentry: %s, task:%s\n",
- __func__, dentry->d_name.name, current->comm);
+ if (!check_caller_access_to_name(dir, &dentry->d_name)) {
err = -EACCES;
goto out_eacces;
}
/* save current_cred and override it */
- OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred);
+ OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
sdcardfs_get_lower_path(dentry, &lower_path);
lower_dentry = lower_path.dentry;
+ lower_mnt = lower_path.mnt;
dget(lower_dentry);
lower_dir_dentry = lock_parent(lower_dentry);
- err = vfs_unlink(lower_dir_inode, lower_dentry);
+ err = vfs_unlink2(lower_mnt, lower_dir_inode, lower_dentry);
/*
* Note: unlinking on top of NFS can cause silly-renamed files.
@@ -219,14 +242,15 @@
}
#endif
-static int touch(char *abs_path, mode_t mode) {
+static int touch(char *abs_path, mode_t mode)
+{
struct file *filp = filp_open(abs_path, O_RDWR|O_CREAT|O_EXCL|O_NOFOLLOW, mode);
+
if (IS_ERR(filp)) {
if (PTR_ERR(filp) == -EEXIST) {
return 0;
- }
- else {
- printk(KERN_ERR "sdcardfs: failed to open(%s): %ld\n",
+ } else {
+ pr_err("sdcardfs: failed to open(%s): %ld\n",
abs_path, PTR_ERR(filp));
return PTR_ERR(filp);
}
@@ -240,31 +264,29 @@
int err = 0;
int make_nomedia_in_obb = 0;
struct dentry *lower_dentry;
+ struct vfsmount *lower_mnt;
struct dentry *lower_parent_dentry = NULL;
struct path lower_path;
struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
const struct cred *saved_cred = NULL;
- struct sdcardfs_inode_info *pi = SDCARDFS_I(dir);
- char *page_buf;
- char *nomedia_dir_name;
- char *nomedia_fullpath;
- int fullpath_namelen;
+ struct sdcardfs_inode_data *pd = SDCARDFS_I(dir)->data;
int touch_err = 0;
+ struct fs_struct *saved_fs;
+ struct fs_struct *copied_fs;
+ struct qstr q_obb = QSTR_LITERAL("obb");
+ struct qstr q_data = QSTR_LITERAL("data");
- if(!check_caller_access_to_name(dir, dentry->d_name.name)) {
- printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
- " dentry: %s, task:%s\n",
- __func__, dentry->d_name.name, current->comm);
+ if (!check_caller_access_to_name(dir, &dentry->d_name)) {
err = -EACCES;
goto out_eacces;
}
/* save current_cred and override it */
- OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred);
+ OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
/* check disk space */
if (!check_min_free_space(dentry, 0, 1)) {
- printk(KERN_INFO "sdcardfs: No minimum free space.\n");
+ pr_err("sdcardfs: No minimum free space.\n");
err = -ENOSPC;
goto out_revert;
}
@@ -272,87 +294,84 @@
/* the lower_dentry is negative here */
sdcardfs_get_lower_path(dentry, &lower_path);
lower_dentry = lower_path.dentry;
+ lower_mnt = lower_path.mnt;
lower_parent_dentry = lock_parent(lower_dentry);
/* set last 16bytes of mode field to 0775 */
mode = (mode & S_IFMT) | 00775;
- err = vfs_mkdir(lower_parent_dentry->d_inode, lower_dentry, mode);
- if (err)
+ /* temporarily change umask for lower fs write */
+ saved_fs = current->fs;
+ copied_fs = copy_fs_struct(current->fs);
+ if (!copied_fs) {
+ err = -ENOMEM;
+ unlock_dir(lower_parent_dentry);
+ goto out_unlock;
+ }
+ current->fs = copied_fs;
+ current->fs->umask = 0;
+ err = vfs_mkdir2(lower_mnt, lower_parent_dentry->d_inode, lower_dentry, mode);
+
+ if (err) {
+ unlock_dir(lower_parent_dentry);
goto out;
+ }
/* if it is a local obb dentry, setup it with the base obbpath */
- if(need_graft_path(dentry)) {
+ if (need_graft_path(dentry)) {
err = setup_obb_dentry(dentry, &lower_path);
- if(err) {
+ if (err) {
/* if the sbi->obbpath is not available, the lower_path won't be
* changed by setup_obb_dentry() but the lower path is saved to
* its orig_path. this dentry will be revalidated later.
- * but now, the lower_path should be NULL */
+ * but now, the lower_path should be NULL
+ */
sdcardfs_put_reset_lower_path(dentry);
/* the newly created lower path which saved to its orig_path or
* the lower_path is the base obbpath.
- * therefore, an additional path_get is required */
+ * therefore, an additional path_get is required
+ */
path_get(&lower_path);
} else
make_nomedia_in_obb = 1;
}
- err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path, pi->userid);
- if (err)
+ err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path, pd->userid);
+ if (err) {
+ unlock_dir(lower_parent_dentry);
goto out;
+ }
fsstack_copy_attr_times(dir, sdcardfs_lower_inode(dir));
fsstack_copy_inode_size(dir, lower_parent_dentry->d_inode);
/* update number of links on parent directory */
set_nlink(dir, sdcardfs_lower_inode(dir)->i_nlink);
-
- if ((!sbi->options.multiuser) && (!strcasecmp(dentry->d_name.name, "obb"))
- && (pi->perm == PERM_ANDROID) && (pi->userid == 0))
+ fixup_lower_ownership(dentry, dentry->d_name.name);
+ unlock_dir(lower_parent_dentry);
+ if ((!sbi->options.multiuser) && (qstr_case_eq(&dentry->d_name, &q_obb))
+ && (pd->perm == PERM_ANDROID) && (pd->userid == 0))
make_nomedia_in_obb = 1;
/* When creating /Android/data and /Android/obb, mark them as .nomedia */
if (make_nomedia_in_obb ||
- ((pi->perm == PERM_ANDROID) && (!strcasecmp(dentry->d_name.name, "data")))) {
-
- page_buf = (char *)__get_free_page(GFP_KERNEL);
- if (!page_buf) {
- printk(KERN_ERR "sdcardfs: failed to allocate page buf\n");
- goto out;
- }
-
- nomedia_dir_name = d_absolute_path(&lower_path, page_buf, PAGE_SIZE);
- if (IS_ERR(nomedia_dir_name)) {
- free_page((unsigned long)page_buf);
- printk(KERN_ERR "sdcardfs: failed to get .nomedia dir name\n");
- goto out;
- }
-
- fullpath_namelen = page_buf + PAGE_SIZE - nomedia_dir_name - 1;
- fullpath_namelen += strlen("/.nomedia");
- nomedia_fullpath = kzalloc(fullpath_namelen + 1, GFP_KERNEL);
- if (!nomedia_fullpath) {
- free_page((unsigned long)page_buf);
- printk(KERN_ERR "sdcardfs: failed to allocate .nomedia fullpath buf\n");
- goto out;
- }
-
- strcpy(nomedia_fullpath, nomedia_dir_name);
- free_page((unsigned long)page_buf);
- strcat(nomedia_fullpath, "/.nomedia");
- touch_err = touch(nomedia_fullpath, 0664);
+ ((pd->perm == PERM_ANDROID)
+ && (qstr_case_eq(&dentry->d_name, &q_data)))) {
+ REVERT_CRED(saved_cred);
+ OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dentry->d_inode));
+ set_fs_pwd(current->fs, &lower_path);
+ touch_err = touch(".nomedia", 0664);
if (touch_err) {
- printk(KERN_ERR "sdcardfs: failed to touch(%s): %d\n",
- nomedia_fullpath, touch_err);
- kfree(nomedia_fullpath);
+ pr_err("sdcardfs: failed to create .nomedia in %s: %d\n",
+ lower_path.dentry->d_name.name, touch_err);
goto out;
}
- kfree(nomedia_fullpath);
}
out:
- unlock_dir(lower_parent_dentry);
+ current->fs = saved_fs;
+ free_fs_struct(copied_fs);
+out_unlock:
sdcardfs_put_lower_path(dentry, &lower_path);
out_revert:
REVERT_CRED(saved_cred);
@@ -364,29 +383,29 @@
{
struct dentry *lower_dentry;
struct dentry *lower_dir_dentry;
+ struct vfsmount *lower_mnt;
int err;
struct path lower_path;
const struct cred *saved_cred = NULL;
- if(!check_caller_access_to_name(dir, dentry->d_name.name)) {
- printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
- " dentry: %s, task:%s\n",
- __func__, dentry->d_name.name, current->comm);
+ if (!check_caller_access_to_name(dir, &dentry->d_name)) {
err = -EACCES;
goto out_eacces;
}
/* save current_cred and override it */
- OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred);
+ OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
/* sdcardfs_get_real_lower(): in case of remove an user's obb dentry
- * the dentry on the original path should be deleted. */
+ * the dentry on the original path should be deleted.
+ */
sdcardfs_get_real_lower(dentry, &lower_path);
lower_dentry = lower_path.dentry;
+ lower_mnt = lower_path.mnt;
lower_dir_dentry = lock_parent(lower_dentry);
- err = vfs_rmdir(lower_dir_dentry->d_inode, lower_dentry);
+ err = vfs_rmdir2(lower_mnt, lower_dir_dentry->d_inode, lower_dentry);
if (err)
goto out;
@@ -450,27 +469,25 @@
struct dentry *lower_new_dentry = NULL;
struct dentry *lower_old_dir_dentry = NULL;
struct dentry *lower_new_dir_dentry = NULL;
+ struct vfsmount *lower_mnt = NULL;
struct dentry *trap = NULL;
- struct dentry *new_parent = NULL;
struct path lower_old_path, lower_new_path;
const struct cred *saved_cred = NULL;
- if(!check_caller_access_to_name(old_dir, old_dentry->d_name.name) ||
- !check_caller_access_to_name(new_dir, new_dentry->d_name.name)) {
- printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
- " new_dentry: %s, task:%s\n",
- __func__, new_dentry->d_name.name, current->comm);
+ if (!check_caller_access_to_name(old_dir, &old_dentry->d_name) ||
+ !check_caller_access_to_name(new_dir, &new_dentry->d_name)) {
err = -EACCES;
goto out_eacces;
}
/* save current_cred and override it */
- OVERRIDE_CRED(SDCARDFS_SB(old_dir->i_sb), saved_cred);
+ OVERRIDE_CRED(SDCARDFS_SB(old_dir->i_sb), saved_cred, SDCARDFS_I(new_dir));
sdcardfs_get_real_lower(old_dentry, &lower_old_path);
sdcardfs_get_lower_path(new_dentry, &lower_new_path);
lower_old_dentry = lower_old_path.dentry;
lower_new_dentry = lower_new_path.dentry;
+ lower_mnt = lower_old_path.mnt;
lower_old_dir_dentry = dget_parent(lower_old_dentry);
lower_new_dir_dentry = dget_parent(lower_new_dentry);
@@ -486,7 +503,8 @@
goto out;
}
- err = vfs_rename(lower_old_dir_dentry->d_inode, lower_old_dentry,
+ err = vfs_rename2(lower_mnt,
+ lower_old_dir_dentry->d_inode, lower_old_dentry,
lower_new_dir_dentry->d_inode, lower_new_dentry);
if (err)
goto out;
@@ -498,25 +516,11 @@
if (new_dir != old_dir) {
sdcardfs_copy_and_fix_attrs(old_dir, lower_old_dir_dentry->d_inode);
fsstack_copy_inode_size(old_dir, lower_old_dir_dentry->d_inode);
-
- /* update the derived permission of the old_dentry
- * with its new parent
- */
- new_parent = dget_parent(new_dentry);
- if(new_parent) {
- if(old_dentry->d_inode) {
- update_derived_permission_lock(old_dentry);
- }
- dput(new_parent);
- }
}
- /* At this point, not all dentry information has been moved, so
- * we pass along new_dentry for the name.*/
- mutex_lock(&old_dentry->d_inode->i_mutex);
- get_derived_permission_new(new_dentry->d_parent, old_dentry, new_dentry);
- fix_derived_permission(old_dentry->d_inode);
- get_derive_permissions_recursive(old_dentry);
- mutex_unlock(&old_dentry->d_inode->i_mutex);
+ get_derived_permission_new(new_dentry->d_parent, old_dentry, &new_dentry->d_name);
+ fixup_tmp_permissions(old_dentry->d_inode);
+ fixup_lower_ownership(old_dentry, new_dentry->d_name.name);
+ d_invalidate(old_dentry); /* Can't fixup ownership recursively :( */
out:
unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
dput(lower_old_dir_dentry);
@@ -598,16 +602,61 @@
}
#endif
-static int sdcardfs_permission(struct inode *inode, int mask)
+static int sdcardfs_permission_wrn(struct inode *inode, int mask)
+{
+ WARN_RATELIMIT(1, "sdcardfs does not support permission. Use permission2.\n");
+ return -EINVAL;
+}
+
+void copy_attrs(struct inode *dest, const struct inode *src)
+{
+ dest->i_mode = src->i_mode;
+ dest->i_uid = src->i_uid;
+ dest->i_gid = src->i_gid;
+ dest->i_rdev = src->i_rdev;
+ dest->i_atime = src->i_atime;
+ dest->i_mtime = src->i_mtime;
+ dest->i_ctime = src->i_ctime;
+ dest->i_blkbits = src->i_blkbits;
+ dest->i_flags = src->i_flags;
+#ifdef CONFIG_FS_POSIX_ACL
+ dest->i_acl = src->i_acl;
+#endif
+#ifdef CONFIG_SECURITY
+ dest->i_security = src->i_security;
+#endif
+}
+
+static int sdcardfs_permission(struct vfsmount *mnt, struct inode *inode, int mask)
{
int err;
+ struct inode tmp;
+ struct sdcardfs_inode_data *top = top_data_get(SDCARDFS_I(inode));
+
+ if (!top)
+ return -EINVAL;
/*
* Permission check on sdcardfs inode.
* Calling process should have AID_SDCARD_RW permission
+ * Since generic_permission only needs i_mode, i_uid,
+ * i_gid, and i_sb, we can create a fake inode to pass
+ * this information down in.
+ *
+ * The underlying code may attempt to take locks in some
+ * cases for features we're not using, but if that changes,
+ * locks must be dealt with to avoid undefined behavior.
*/
- err = generic_permission(inode, mask);
-
+ copy_attrs(&tmp, inode);
+ tmp.i_uid = make_kuid(&init_user_ns, top->d_uid);
+ tmp.i_gid = make_kgid(&init_user_ns, get_gid(mnt, top));
+ tmp.i_mode = (inode->i_mode & S_IFMT)
+ | get_mode(mnt, SDCARDFS_I(inode), top);
+ data_put(top);
+ tmp.i_sb = inode->i_sb;
+ if (IS_POSIXACL(inode))
+ pr_warn("%s: This may be undefined behavior...\n", __func__);
+ err = generic_permission(&tmp, mask);
/* XXX
* Original sdcardfs code calls inode_permission(lower_inode,.. )
* for checking inode permission. But doing such things here seems
@@ -624,6 +673,7 @@
* we check it with AID_MEDIA_RW permission
*/
struct inode *lower_inode;
+
OVERRIDE_CRED(SDCARDFS_SB(inode->sb));
lower_inode = sdcardfs_lower_inode(inode);
@@ -636,82 +686,85 @@
}
-static int sdcardfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
- struct kstat *stat)
+static int sdcardfs_setattr_wrn(struct dentry *dentry, struct iattr *ia)
{
- struct dentry *lower_dentry;
- struct inode *inode;
- struct inode *lower_inode;
- struct path lower_path;
- struct dentry *parent;
-
- parent = dget_parent(dentry);
- if(!check_caller_access_to_name(parent->d_inode, dentry->d_name.name)) {
- printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
- " dentry: %s, task:%s\n",
- __func__, dentry->d_name.name, current->comm);
- dput(parent);
- return -EACCES;
- }
- dput(parent);
-
- inode = dentry->d_inode;
-
- sdcardfs_get_lower_path(dentry, &lower_path);
- lower_dentry = lower_path.dentry;
- lower_inode = sdcardfs_lower_inode(inode);
-
-
- sdcardfs_copy_and_fix_attrs(inode, lower_inode);
- fsstack_copy_inode_size(inode, lower_inode);
-
-
- generic_fillattr(inode, stat);
- sdcardfs_put_lower_path(dentry, &lower_path);
- return 0;
+ WARN_RATELIMIT(1, "sdcardfs does not support setattr. User setattr2.\n");
+ return -EINVAL;
}
-static int sdcardfs_setattr(struct dentry *dentry, struct iattr *ia)
+static int sdcardfs_setattr(struct vfsmount *mnt, struct dentry *dentry, struct iattr *ia)
{
int err = 0;
struct dentry *lower_dentry;
+ struct vfsmount *lower_mnt;
struct inode *inode;
struct inode *lower_inode;
struct path lower_path;
struct iattr lower_ia;
struct dentry *parent;
+ struct inode tmp;
+ struct sdcardfs_inode_data *top;
+ const struct cred *saved_cred = NULL;
inode = dentry->d_inode;
+ top = top_data_get(SDCARDFS_I(inode));
+
+ if (!top)
+ return -EINVAL;
+
+ /*
+ * Permission check on sdcardfs inode.
+ * Calling process should have AID_SDCARD_RW permission
+ * Since generic_permission only needs i_mode, i_uid,
+ * i_gid, and i_sb, we can create a fake inode to pass
+ * this information down in.
+ *
+ * The underlying code may attempt to take locks in some
+ * cases for features we're not using, but if that changes,
+ * locks must be dealt with to avoid undefined behavior.
+ *
+ */
+ copy_attrs(&tmp, inode);
+ tmp.i_uid = make_kuid(&init_user_ns, top->d_uid);
+ tmp.i_gid = make_kgid(&init_user_ns, get_gid(mnt, top));
+ tmp.i_mode = (inode->i_mode & S_IFMT)
+ | get_mode(mnt, SDCARDFS_I(inode), top);
+ tmp.i_size = i_size_read(inode);
+ data_put(top);
+ tmp.i_sb = inode->i_sb;
/*
* Check if user has permission to change inode. We don't check if
* this user can change the lower inode: that should happen when
* calling notify_change on the lower inode.
*/
- err = inode_change_ok(inode, ia);
+ /* prepare our own lower struct iattr (with the lower file) */
+ memcpy(&lower_ia, ia, sizeof(lower_ia));
+ /* Allow touch updating timestamps. A previous permission check ensures
+ * we have write access. Changes to mode, owner, and group are ignored
+ */
+ ia->ia_valid |= ATTR_FORCE;
+ err = inode_change_ok(&tmp, ia);
- /* no vfs_XXX operations required, cred overriding will be skipped. wj*/
if (!err) {
/* check the Android group ID */
parent = dget_parent(dentry);
- if(!check_caller_access_to_name(parent->d_inode, dentry->d_name.name)) {
- printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
- " dentry: %s, task:%s\n",
- __func__, dentry->d_name.name, current->comm);
+ if (!check_caller_access_to_name(parent->d_inode, &dentry->d_name))
err = -EACCES;
- }
dput(parent);
}
if (err)
goto out_err;
+ /* save current_cred and override it */
+ OVERRIDE_CRED(SDCARDFS_SB(dentry->d_sb), saved_cred, SDCARDFS_I(inode));
+
sdcardfs_get_lower_path(dentry, &lower_path);
lower_dentry = lower_path.dentry;
+ lower_mnt = lower_path.mnt;
lower_inode = sdcardfs_lower_inode(inode);
- /* prepare our own lower struct iattr (with the lower file) */
- memcpy(&lower_ia, ia, sizeof(lower_ia));
if (ia->ia_valid & ATTR_FILE)
lower_ia.ia_file = sdcardfs_lower_file(ia->ia_file);
@@ -728,7 +781,7 @@
if (current->mm)
down_write(¤t->mm->mmap_sem);
if (ia->ia_valid & ATTR_SIZE) {
- err = inode_newsize_ok(inode, ia->ia_size);
+ err = inode_newsize_ok(&tmp, ia->ia_size);
if (err) {
if (current->mm)
up_write(¤t->mm->mmap_sem);
@@ -751,7 +804,8 @@
* tries to open(), unlink(), then ftruncate() a file.
*/
mutex_lock(&lower_dentry->d_inode->i_mutex);
- err = notify_change(lower_dentry, &lower_ia); /* note: lower_ia */
+ err = notify_change2(lower_mnt, lower_dentry, &lower_ia); /* note: lower_ia */
+
mutex_unlock(&lower_dentry->d_inode->i_mutex);
if (current->mm)
up_write(¤t->mm->mmap_sem);
@@ -769,13 +823,68 @@
out:
sdcardfs_put_lower_path(dentry, &lower_path);
+ REVERT_CRED(saved_cred);
out_err:
return err;
}
+static int sdcardfs_fillattr(struct vfsmount *mnt,
+ struct inode *inode, struct kstat *stat)
+{
+ struct sdcardfs_inode_info *info = SDCARDFS_I(inode);
+ struct sdcardfs_inode_data *top = top_data_get(info);
+
+ if (!top)
+ return -EINVAL;
+
+ stat->dev = inode->i_sb->s_dev;
+ stat->ino = inode->i_ino;
+ stat->mode = (inode->i_mode & S_IFMT) | get_mode(mnt, info, top);
+ stat->nlink = inode->i_nlink;
+ stat->uid = make_kuid(&init_user_ns, top->d_uid);
+ stat->gid = make_kgid(&init_user_ns, get_gid(mnt, top));
+ stat->rdev = inode->i_rdev;
+ stat->size = i_size_read(inode);
+ stat->atime = inode->i_atime;
+ stat->mtime = inode->i_mtime;
+ stat->ctime = inode->i_ctime;
+ stat->blksize = (1 << inode->i_blkbits);
+ stat->blocks = inode->i_blocks;
+ data_put(top);
+ return 0;
+}
+
+static int sdcardfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
+ struct kstat *stat)
+{
+ struct kstat lower_stat;
+ struct path lower_path;
+ struct dentry *parent;
+ int err;
+
+ parent = dget_parent(dentry);
+ if (!check_caller_access_to_name(parent->d_inode, &dentry->d_name)) {
+ dput(parent);
+ return -EACCES;
+ }
+ dput(parent);
+
+ sdcardfs_get_lower_path(dentry, &lower_path);
+ err = vfs_getattr(&lower_path, &lower_stat);
+ if (err)
+ goto out;
+ sdcardfs_copy_and_fix_attrs(dentry->d_inode,
+ lower_path.dentry->d_inode);
+ err = sdcardfs_fillattr(mnt, dentry->d_inode, stat);
+ stat->blocks = lower_stat.blocks;
+out:
+ sdcardfs_put_lower_path(dentry, &lower_path);
+ return err;
+}
+
const struct inode_operations sdcardfs_symlink_iops = {
- .permission = sdcardfs_permission,
- .setattr = sdcardfs_setattr,
+ .permission2 = sdcardfs_permission,
+ .setattr2 = sdcardfs_setattr,
/* XXX Following operations are implemented,
* but FUSE(sdcard) or FAT does not support them
* These methods are *NOT* perfectly tested.
@@ -788,14 +897,14 @@
const struct inode_operations sdcardfs_dir_iops = {
.create = sdcardfs_create,
.lookup = sdcardfs_lookup,
-#if 0
- .permission = sdcardfs_permission,
-#endif
+ .permission = sdcardfs_permission_wrn,
+ .permission2 = sdcardfs_permission,
.unlink = sdcardfs_unlink,
.mkdir = sdcardfs_mkdir,
.rmdir = sdcardfs_rmdir,
.rename = sdcardfs_rename,
- .setattr = sdcardfs_setattr,
+ .setattr = sdcardfs_setattr_wrn,
+ .setattr2 = sdcardfs_setattr,
.getattr = sdcardfs_getattr,
/* XXX Following operations are implemented,
* but FUSE(sdcard) or FAT does not support them
@@ -807,7 +916,9 @@
};
const struct inode_operations sdcardfs_main_iops = {
- .permission = sdcardfs_permission,
- .setattr = sdcardfs_setattr,
+ .permission = sdcardfs_permission_wrn,
+ .permission2 = sdcardfs_permission,
+ .setattr = sdcardfs_setattr_wrn,
+ .setattr2 = sdcardfs_setattr,
.getattr = sdcardfs_getattr,
};
diff --git a/fs/sdcardfs/lookup.c b/fs/sdcardfs/lookup.c
index a01b06a..369b94e 100755
--- a/fs/sdcardfs/lookup.c
+++ b/fs/sdcardfs/lookup.c
@@ -36,8 +36,7 @@
void sdcardfs_destroy_dentry_cache(void)
{
- if (sdcardfs_dentry_cachep)
- kmem_cache_destroy(sdcardfs_dentry_cachep);
+ kmem_cache_destroy(sdcardfs_dentry_cachep);
}
void free_dentry_private_data(struct dentry *dentry)
@@ -72,7 +71,8 @@
static int sdcardfs_inode_test(struct inode *inode, void *candidate_data/*void *candidate_lower_inode*/)
{
struct inode *current_lower_inode = sdcardfs_lower_inode(inode);
- userid_t current_userid = SDCARDFS_I(inode)->userid;
+ userid_t current_userid = SDCARDFS_I(inode)->data->userid;
+
if (current_lower_inode == ((struct inode_data *)candidate_data)->lower_inode &&
current_userid == ((struct inode_data *)candidate_data)->id)
return 1; /* found a match */
@@ -91,7 +91,9 @@
struct sdcardfs_inode_info *info;
struct inode_data data;
struct inode *inode; /* the new inode to return */
- int err;
+
+ if (!igrab(lower_inode))
+ return ERR_PTR(-ESTALE);
data.id = id;
data.lower_inode = lower_inode;
@@ -102,26 +104,23 @@
* instead.
*/
lower_inode->i_ino, /* hashval */
- sdcardfs_inode_test, /* inode comparison function */
+ sdcardfs_inode_test, /* inode comparison function */
sdcardfs_inode_set, /* inode init function */
&data); /* data passed to test+set fxns */
if (!inode) {
- err = -EACCES;
iput(lower_inode);
- return ERR_PTR(err);
+ return ERR_PTR(-ENOMEM);
}
- /* if found a cached inode, then just return it */
- if (!(inode->i_state & I_NEW))
+ /* if found a cached inode, then just return it (after iput) */
+ if (!(inode->i_state & I_NEW)) {
+ iput(lower_inode);
return inode;
+ }
/* initialize new inode */
info = SDCARDFS_I(inode);
inode->i_ino = lower_inode->i_ino;
- if (!igrab(lower_inode)) {
- err = -ESTALE;
- return ERR_PTR(err);
- }
sdcardfs_set_lower_inode(inode, lower_inode);
inode->i_version++;
@@ -164,27 +163,25 @@
}
/*
- * Connect a sdcardfs inode dentry/inode with several lower ones. This is
- * the classic stackable file system "vnode interposition" action.
- *
- * @dentry: sdcardfs's dentry which interposes on lower one
- * @sb: sdcardfs's super_block
- * @lower_path: the lower path (caller does path_get/put)
+ * Helper interpose routine, called directly by ->lookup to handle
+ * spliced dentries.
*/
-int sdcardfs_interpose(struct dentry *dentry, struct super_block *sb,
- struct path *lower_path, userid_t id)
+static struct dentry *__sdcardfs_interpose(struct dentry *dentry,
+ struct super_block *sb,
+ struct path *lower_path,
+ userid_t id)
{
- int err = 0;
struct inode *inode;
struct inode *lower_inode;
struct super_block *lower_sb;
+ struct dentry *ret_dentry;
lower_inode = lower_path->dentry->d_inode;
lower_sb = sdcardfs_lower_super(sb);
/* check that the lower file system didn't cross a mount point */
if (lower_inode->i_sb != lower_sb) {
- err = -EXDEV;
+ ret_dentry = ERR_PTR(-EXDEV);
goto out;
}
@@ -196,14 +193,54 @@
/* inherit lower inode number for sdcardfs's inode */
inode = sdcardfs_iget(sb, lower_inode, id);
if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
+ ret_dentry = ERR_CAST(inode);
goto out;
}
- d_add(dentry, inode);
+ ret_dentry = d_splice_alias(inode, dentry);
+ dentry = ret_dentry ?: dentry;
update_derived_permission_lock(dentry);
out:
- return err;
+ return ret_dentry;
+}
+
+/*
+ * Connect an sdcardfs inode dentry/inode with several lower ones. This is
+ * the classic stackable file system "vnode interposition" action.
+ *
+ * @dentry: sdcardfs's dentry which interposes on lower one
+ * @sb: sdcardfs's super_block
+ * @lower_path: the lower path (caller does path_get/put)
+ */
+int sdcardfs_interpose(struct dentry *dentry, struct super_block *sb,
+ struct path *lower_path, userid_t id)
+{
+ struct dentry *ret_dentry;
+
+ ret_dentry = __sdcardfs_interpose(dentry, sb, lower_path, id);
+ return PTR_ERR(ret_dentry);
+}
+
+struct sdcardfs_name_data {
+ struct dir_context ctx;
+ const struct qstr *to_find;
+ char *name;
+ bool found;
+};
+
+static int sdcardfs_name_match(void *__buf, const char *name, int namelen,
+ loff_t offset, u64 ino, unsigned int d_type)
+{
+ struct sdcardfs_name_data *buf = (struct sdcardfs_name_data *) __buf;
+ struct qstr candidate = QSTR_INIT(name, namelen);
+
+ if (qstr_case_eq(buf->to_find, &candidate)) {
+ memcpy(buf->name, name, namelen);
+ buf->name[namelen] = 0;
+ buf->found = true;
+ return 1;
+ }
+ return 0;
}
/*
@@ -219,9 +256,10 @@
struct vfsmount *lower_dir_mnt;
struct dentry *lower_dir_dentry = NULL;
struct dentry *lower_dentry;
- const char *name;
+ const struct qstr *name;
struct path lower_path;
- struct qstr this;
+ struct qstr dname;
+ struct dentry *ret_dentry = NULL;
struct sdcardfs_sb_info *sbi;
sbi = SDCARDFS_SB(dentry->d_sb);
@@ -231,47 +269,90 @@
if (IS_ROOT(dentry))
goto out;
- name = dentry->d_name.name;
+ name = &dentry->d_name;
/* now start the actual lookup procedure */
lower_dir_dentry = lower_parent_path->dentry;
lower_dir_mnt = lower_parent_path->mnt;
/* Use vfs_path_lookup to check if the dentry exists or not */
- err = vfs_path_lookup(lower_dir_dentry, lower_dir_mnt, name, 0,
+ err = vfs_path_lookup(lower_dir_dentry, lower_dir_mnt, name->name, 0,
&lower_path);
+ /* check for other cases */
+ if (err == -ENOENT) {
+ struct file *file;
+ const struct cred *cred = current_cred();
+
+ struct sdcardfs_name_data buffer = {
+ .ctx.actor = sdcardfs_name_match,
+ .to_find = name,
+ .name = __getname(),
+ .found = false,
+ };
+
+ if (!buffer.name) {
+ err = -ENOMEM;
+ goto out;
+ }
+ file = dentry_open(lower_parent_path, O_RDONLY, cred);
+ if (IS_ERR(file)) {
+ err = PTR_ERR(file);
+ goto put_name;
+ }
+ err = iterate_dir(file, &buffer.ctx);
+ fput(file);
+ if (err)
+ goto put_name;
+
+ if (buffer.found)
+ err = vfs_path_lookup(lower_dir_dentry,
+ lower_dir_mnt,
+ buffer.name, 0,
+ &lower_path);
+ else
+ err = -ENOENT;
+put_name:
+ __putname(buffer.name);
+ }
/* no error: handle positive dentries */
if (!err) {
/* check if the dentry is an obb dentry
* if true, the lower_inode must be replaced with
- * the inode of the graft path */
+ * the inode of the graft path
+ */
- if(need_graft_path(dentry)) {
+ if (need_graft_path(dentry)) {
/* setup_obb_dentry()
- * The lower_path will be stored to the dentry's orig_path
+ * The lower_path will be stored to the dentry's orig_path
* and the base obbpath will be copyed to the lower_path variable.
* if an error returned, there's no change in the lower_path
- * returns: -ERRNO if error (0: no error) */
+ * returns: -ERRNO if error (0: no error)
+ */
err = setup_obb_dentry(dentry, &lower_path);
- if(err) {
+ if (err) {
/* if the sbi->obbpath is not available, we can optionally
* setup the lower_path with its orig_path.
* but, the current implementation just returns an error
* because the sdcard daemon also regards this case as
- * a lookup fail. */
- printk(KERN_INFO "sdcardfs: base obbpath is not available\n");
+ * a lookup fail.
+ */
+ pr_info("sdcardfs: base obbpath is not available\n");
sdcardfs_put_reset_orig_path(dentry);
goto out;
}
}
sdcardfs_set_lower_path(dentry, &lower_path);
- err = sdcardfs_interpose(dentry, dentry->d_sb, &lower_path, id);
- if (err) /* path_put underlying path on error */
+ ret_dentry =
+ __sdcardfs_interpose(dentry, dentry->d_sb, &lower_path, id);
+ if (IS_ERR(ret_dentry)) {
+ err = PTR_ERR(ret_dentry);
+ /* path_put underlying path on error */
sdcardfs_put_reset_lower_path(dentry);
+ }
goto out;
}
@@ -283,21 +364,24 @@
goto out;
/* instatiate a new negative dentry */
- this.name = name;
- this.len = strlen(name);
- this.hash = full_name_hash(this.name, this.len);
- lower_dentry = d_lookup(lower_dir_dentry, &this);
- if (lower_dentry)
- goto setup_lower;
+ dname.name = name->name;
+ dname.len = name->len;
- lower_dentry = d_alloc(lower_dir_dentry, &this);
+ /* See if the low-level filesystem might want
+ * to use its own hash
+ */
+ lower_dentry = d_hash_and_lookup(lower_dir_dentry, &dname);
+ if (IS_ERR(lower_dentry))
+ return lower_dentry;
if (!lower_dentry) {
- err = -ENOMEM;
+ /* We called vfs_path_lookup earlier, and did not get a negative
+ * dentry then. Don't confuse the lower filesystem by forcing
+ * one on it now...
+ */
+ err = -ENOENT;
goto out;
}
- d_add(lower_dentry, NULL); /* instantiate and hash */
-setup_lower:
lower_path.dentry = lower_dentry;
lower_path.mnt = mntget(lower_dir_mnt);
sdcardfs_set_lower_path(dentry, &lower_path);
@@ -311,14 +395,16 @@
err = 0;
out:
- return ERR_PTR(err);
+ if (err)
+ return ERR_PTR(err);
+ return ret_dentry;
}
/*
* On success:
- * fills dentry object appropriate values and returns NULL.
+ * fills dentry object appropriate values and returns NULL.
* On fail (== error)
- * returns error ptr
+ * returns error ptr
*
* @dir : Parent inode. It is locked (dir->i_mutex)
* @dentry : Target dentry to lookup. we should set each of fields.
@@ -335,16 +421,13 @@
parent = dget_parent(dentry);
- if(!check_caller_access_to_name(parent->d_inode, dentry->d_name.name)) {
+ if (!check_caller_access_to_name(parent->d_inode, &dentry->d_name)) {
ret = ERR_PTR(-EACCES);
- printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
- " dentry: %s, task:%s\n",
- __func__, dentry->d_name.name, current->comm);
goto out_err;
- }
+ }
/* save current_cred and override it */
- OVERRIDE_CRED_PTR(SDCARDFS_SB(dir->i_sb), saved_cred);
+ OVERRIDE_CRED_PTR(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
sdcardfs_get_lower_path(parent, &lower_parent_path);
@@ -355,21 +438,19 @@
goto out;
}
- ret = __sdcardfs_lookup(dentry, flags, &lower_parent_path, SDCARDFS_I(dir)->userid);
+ ret = __sdcardfs_lookup(dentry, flags, &lower_parent_path,
+ SDCARDFS_I(dir)->data->userid);
if (IS_ERR(ret))
- {
goto out;
- }
if (ret)
dentry = ret;
if (dentry->d_inode) {
fsstack_copy_attr_times(dentry->d_inode,
sdcardfs_lower_inode(dentry->d_inode));
- /* get drived permission */
- mutex_lock(&dentry->d_inode->i_mutex);
+ /* get derived permission */
get_derived_permission(parent, dentry);
- fix_derived_permission(dentry->d_inode);
- mutex_unlock(&dentry->d_inode->i_mutex);
+ fixup_tmp_permissions(dentry->d_inode);
+ fixup_lower_ownership(dentry, dentry->d_name.name);
}
/* update parent directory's atime */
fsstack_copy_attr_atime(parent->d_inode,
diff --git a/fs/sdcardfs/main.c b/fs/sdcardfs/main.c
index 8b51a12..129d98e 100755
--- a/fs/sdcardfs/main.c
+++ b/fs/sdcardfs/main.c
@@ -28,9 +28,8 @@
Opt_fsgid,
Opt_gid,
Opt_debug,
- Opt_lower_fs,
Opt_mask,
- Opt_multiuser, // May need?
+ Opt_multiuser,
Opt_userid,
Opt_reserved_mb,
Opt_err,
@@ -49,7 +48,8 @@
};
static int parse_options(struct super_block *sb, char *options, int silent,
- int *debug, struct sdcardfs_mount_options *opts)
+ int *debug, struct sdcardfs_vfsmount_options *vfsopts,
+ struct sdcardfs_mount_options *opts)
{
char *p;
substring_t args[MAX_OPT_ARGS];
@@ -58,10 +58,10 @@
/* by default, we use AID_MEDIA_RW as uid, gid */
opts->fs_low_uid = AID_MEDIA_RW;
opts->fs_low_gid = AID_MEDIA_RW;
- opts->mask = 0;
+ vfsopts->mask = 0;
opts->multiuser = false;
opts->fs_user_id = 0;
- opts->gid = 0;
+ vfsopts->gid = 0;
/* by default, 0MB is reserved */
opts->reserved_mb = 0;
@@ -72,6 +72,7 @@
while ((p = strsep(&options, ",")) != NULL) {
int token;
+
if (!*p)
continue;
@@ -94,7 +95,7 @@
case Opt_gid:
if (match_int(&args[0], &option))
return 0;
- opts->gid = option;
+ vfsopts->gid = option;
break;
case Opt_userid:
if (match_int(&args[0], &option))
@@ -104,7 +105,7 @@
case Opt_mask:
if (match_int(&args[0], &option))
return 0;
- opts->mask = option;
+ vfsopts->mask = option;
break;
case Opt_multiuser:
opts->multiuser = true;
@@ -116,25 +117,81 @@
break;
/* unknown option */
default:
- if (!silent) {
- printk( KERN_ERR "Unrecognized mount option \"%s\" "
- "or missing value", p);
- }
+ if (!silent)
+ pr_err("Unrecognized mount option \"%s\" or missing value", p);
return -EINVAL;
}
}
if (*debug) {
- printk( KERN_INFO "sdcardfs : options - debug:%d\n", *debug);
- printk( KERN_INFO "sdcardfs : options - uid:%d\n",
+ pr_info("sdcardfs : options - debug:%d\n", *debug);
+ pr_info("sdcardfs : options - uid:%d\n",
opts->fs_low_uid);
- printk( KERN_INFO "sdcardfs : options - gid:%d\n",
+ pr_info("sdcardfs : options - gid:%d\n",
opts->fs_low_gid);
}
return 0;
}
+int parse_options_remount(struct super_block *sb, char *options, int silent,
+ struct sdcardfs_vfsmount_options *vfsopts)
+{
+ char *p;
+ substring_t args[MAX_OPT_ARGS];
+ int option;
+ int debug;
+
+ if (!options)
+ return 0;
+
+ while ((p = strsep(&options, ",")) != NULL) {
+ int token;
+
+ if (!*p)
+ continue;
+
+ token = match_token(p, sdcardfs_tokens, args);
+
+ switch (token) {
+ case Opt_debug:
+ debug = 1;
+ break;
+ case Opt_gid:
+ if (match_int(&args[0], &option))
+ return 0;
+ vfsopts->gid = option;
+
+ break;
+ case Opt_mask:
+ if (match_int(&args[0], &option))
+ return 0;
+ vfsopts->mask = option;
+ break;
+ case Opt_multiuser:
+ case Opt_userid:
+ case Opt_fsuid:
+ case Opt_fsgid:
+ case Opt_reserved_mb:
+ pr_warn("Option \"%s\" can't be changed during remount\n", p);
+ break;
+ /* unknown option */
+ default:
+ if (!silent)
+ pr_err("Unrecognized mount option \"%s\" or missing value", p);
+ return -EINVAL;
+ }
+ }
+
+ if (debug) {
+ pr_info("sdcardfs : options - debug:%d\n", debug);
+ pr_info("sdcardfs : options - gid:%d\n", vfsopts->gid);
+ pr_info("sdcardfs : options - mask:%d\n", vfsopts->mask);
+ }
+
+ return 0;
+}
+
#if 0
/*
* our custom d_alloc_root work-alike
@@ -164,57 +221,58 @@
#endif
DEFINE_MUTEX(sdcardfs_super_list_lock);
-LIST_HEAD(sdcardfs_super_list);
EXPORT_SYMBOL_GPL(sdcardfs_super_list_lock);
+LIST_HEAD(sdcardfs_super_list);
EXPORT_SYMBOL_GPL(sdcardfs_super_list);
/*
* There is no need to lock the sdcardfs_super_info's rwsem as there is no
* way anyone can have a reference to the superblock at this point in time.
*/
-static int sdcardfs_read_super(struct super_block *sb, const char *dev_name,
- void *raw_data, int silent)
+static int sdcardfs_read_super(struct vfsmount *mnt, struct super_block *sb,
+ const char *dev_name, void *raw_data, int silent)
{
int err = 0;
int debug;
struct super_block *lower_sb;
struct path lower_path;
struct sdcardfs_sb_info *sb_info;
+ struct sdcardfs_vfsmount_options *mnt_opt = mnt->data;
struct inode *inode;
- printk(KERN_INFO "sdcardfs version 2.0\n");
+ pr_info("sdcardfs version 2.0\n");
if (!dev_name) {
- printk(KERN_ERR
- "sdcardfs: read_super: missing dev_name argument\n");
+ pr_err("sdcardfs: read_super: missing dev_name argument\n");
err = -EINVAL;
goto out;
}
- printk(KERN_INFO "sdcardfs: dev_name -> %s\n", dev_name);
- printk(KERN_INFO "sdcardfs: options -> %s\n", (char *)raw_data);
+ pr_info("sdcardfs: dev_name -> %s\n", dev_name);
+ pr_info("sdcardfs: options -> %s\n", (char *)raw_data);
+ pr_info("sdcardfs: mnt -> %p\n", mnt);
/* parse lower path */
err = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
&lower_path);
if (err) {
- printk(KERN_ERR "sdcardfs: error accessing lower directory '%s'\n", dev_name);
+ pr_err("sdcardfs: error accessing lower directory '%s'\n", dev_name);
goto out;
}
/* allocate superblock private data */
sb->s_fs_info = kzalloc(sizeof(struct sdcardfs_sb_info), GFP_KERNEL);
if (!SDCARDFS_SB(sb)) {
- printk(KERN_CRIT "sdcardfs: read_super: out of memory\n");
+ pr_crit("sdcardfs: read_super: out of memory\n");
err = -ENOMEM;
goto out_free;
}
sb_info = sb->s_fs_info;
/* parse options */
- err = parse_options(sb, raw_data, silent, &debug, &sb_info->options);
+ err = parse_options(sb, raw_data, silent, &debug, mnt_opt, &sb_info->options);
if (err) {
- printk(KERN_ERR "sdcardfs: invalid options\n");
+ pr_err("sdcardfs: invalid options\n");
goto out_freesbi;
}
@@ -274,23 +332,24 @@
/* setup permission policy */
sb_info->obbpath_s = kzalloc(PATH_MAX, GFP_KERNEL);
mutex_lock(&sdcardfs_super_list_lock);
- if(sb_info->options.multiuser) {
- setup_derived_state(sb->s_root->d_inode, PERM_PRE_ROOT, sb_info->options.fs_user_id, AID_ROOT, false);
+ if (sb_info->options.multiuser) {
+ setup_derived_state(sb->s_root->d_inode, PERM_PRE_ROOT,
+ sb_info->options.fs_user_id, AID_ROOT,
+ false, SDCARDFS_I(sb->s_root->d_inode)->data);
snprintf(sb_info->obbpath_s, PATH_MAX, "%s/obb", dev_name);
- /*err = prepare_dir(sb_info->obbpath_s,
- sb_info->options.fs_low_uid,
- sb_info->options.fs_low_gid, 00755);*/
} else {
- setup_derived_state(sb->s_root->d_inode, PERM_ROOT, sb_info->options.fs_low_uid, AID_ROOT, false);
+ setup_derived_state(sb->s_root->d_inode, PERM_ROOT,
+ sb_info->options.fs_user_id, AID_ROOT,
+ false, SDCARDFS_I(sb->s_root->d_inode)->data);
snprintf(sb_info->obbpath_s, PATH_MAX, "%s/Android/obb", dev_name);
}
- fix_derived_permission(sb->s_root->d_inode);
+ fixup_tmp_permissions(sb->s_root->d_inode);
sb_info->sb = sb;
list_add(&sb_info->list, &sdcardfs_super_list);
mutex_unlock(&sdcardfs_super_list_lock);
if (!silent)
- printk(KERN_INFO "sdcardfs: mounted on top of %s type %s\n",
+ pr_info("sdcardfs: mounted on top of %s type %s\n",
dev_name, lower_sb->s_type->name);
goto out; /* all is well */
@@ -313,9 +372,11 @@
}
/* A feature which supports mount_nodev() with options */
-static struct dentry *mount_nodev_with_options(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data,
- int (*fill_super)(struct super_block *, const char *, void *, int))
+static struct dentry *mount_nodev_with_options(struct vfsmount *mnt,
+ struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data,
+ int (*fill_super)(struct vfsmount *, struct super_block *,
+ const char *, void *, int))
{
int error;
@@ -326,7 +387,7 @@
s->s_flags = flags;
- error = fill_super(s, dev_name, data, flags & MS_SILENT ? 1 : 0);
+ error = fill_super(mnt, s, dev_name, data, flags & MS_SILENT ? 1 : 0);
if (error) {
deactivate_locked_super(s);
return ERR_PTR(error);
@@ -335,19 +396,34 @@
return dget(s->s_root);
}
-struct dentry *sdcardfs_mount(struct file_system_type *fs_type, int flags,
+static struct dentry *sdcardfs_mount(struct vfsmount *mnt,
+ struct file_system_type *fs_type, int flags,
const char *dev_name, void *raw_data)
{
/*
* dev_name is a lower_path_name,
* raw_data is a option string.
*/
- return mount_nodev_with_options(fs_type, flags, dev_name,
- raw_data, sdcardfs_read_super);
+ return mount_nodev_with_options(mnt, fs_type, flags, dev_name,
+ raw_data, sdcardfs_read_super);
}
-void sdcardfs_kill_sb(struct super_block *sb) {
+static struct dentry *sdcardfs_mount_wrn(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data)
+{
+ WARN(1, "sdcardfs does not support mount. Use mount2.\n");
+ return ERR_PTR(-EINVAL);
+}
+
+void *sdcardfs_alloc_mnt_data(void)
+{
+ return kmalloc(sizeof(struct sdcardfs_vfsmount_options), GFP_KERNEL);
+}
+
+void sdcardfs_kill_sb(struct super_block *sb)
+{
struct sdcardfs_sb_info *sbi;
+
if (sb->s_magic == SDCARDFS_SUPER_MAGIC) {
sbi = SDCARDFS_SB(sb);
mutex_lock(&sdcardfs_super_list_lock);
@@ -360,10 +436,13 @@
static struct file_system_type sdcardfs_fs_type = {
.owner = THIS_MODULE,
.name = SDCARDFS_NAME,
- .mount = sdcardfs_mount,
+ .mount = sdcardfs_mount_wrn,
+ .mount2 = sdcardfs_mount,
+ .alloc_mnt_data = sdcardfs_alloc_mnt_data,
.kill_sb = sdcardfs_kill_sb,
.fs_flags = 0,
};
+MODULE_ALIAS_FS(SDCARDFS_NAME);
static int __init init_sdcardfs_fs(void)
{
@@ -399,10 +478,15 @@
pr_info("Completed sdcardfs module unload\n");
}
-MODULE_AUTHOR("Erez Zadok, Filesystems and Storage Lab, Stony Brook University"
- " (http://www.fsl.cs.sunysb.edu/)");
-MODULE_DESCRIPTION("Wrapfs " SDCARDFS_VERSION
- " (http://wrapfs.filesystems.org/)");
+/* Original wrapfs authors */
+MODULE_AUTHOR("Erez Zadok, Filesystems and Storage Lab, Stony Brook University (http://www.fsl.cs.sunysb.edu/)");
+
+/* Original sdcardfs authors */
+MODULE_AUTHOR("Woojoong Lee, Daeho Jeong, Kitae Lee, Yeongjin Gil System Memory Lab., Samsung Electronics");
+
+/* Current maintainer */
+MODULE_AUTHOR("Daniel Rosenberg, Google");
+MODULE_DESCRIPTION("Sdcardfs " SDCARDFS_VERSION);
MODULE_LICENSE("GPL");
module_init(init_sdcardfs_fs);
diff --git a/fs/sdcardfs/mmap.c b/fs/sdcardfs/mmap.c
index c807d7f..96759ca 100755
--- a/fs/sdcardfs/mmap.c
+++ b/fs/sdcardfs/mmap.c
@@ -23,28 +23,46 @@
static int sdcardfs_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
int err;
- struct file *file, *lower_file;
+ struct file *file;
const struct vm_operations_struct *lower_vm_ops;
- struct vm_area_struct lower_vma;
- memcpy(&lower_vma, vma, sizeof(struct vm_area_struct));
- file = lower_vma.vm_file;
+ file = (struct file *)vma->vm_private_data;
lower_vm_ops = SDCARDFS_F(file)->lower_vm_ops;
BUG_ON(!lower_vm_ops);
- lower_file = sdcardfs_lower_file(file);
- /*
- * XXX: vm_ops->fault may be called in parallel. Because we have to
- * resort to temporarily changing the vma->vm_file to point to the
- * lower file, a concurrent invocation of sdcardfs_fault could see a
- * different value. In this workaround, we keep a different copy of
- * the vma structure in our stack, so we never expose a different
- * value of the vma->vm_file called to us, even temporarily. A
- * better fix would be to change the calling semantics of ->fault to
- * take an explicit file pointer.
- */
- lower_vma.vm_file = lower_file;
- err = lower_vm_ops->fault(&lower_vma, vmf);
+ err = lower_vm_ops->fault(vma, vmf);
+ return err;
+}
+
+static void sdcardfs_vm_open(struct vm_area_struct *vma)
+{
+ struct file *file = (struct file *)vma->vm_private_data;
+
+ get_file(file);
+}
+
+static void sdcardfs_vm_close(struct vm_area_struct *vma)
+{
+ struct file *file = (struct file *)vma->vm_private_data;
+
+ fput(file);
+}
+
+static int sdcardfs_page_mkwrite(struct vm_area_struct *vma,
+ struct vm_fault *vmf)
+{
+ int err = 0;
+ struct file *file;
+ const struct vm_operations_struct *lower_vm_ops;
+
+ file = (struct file *)vma->vm_private_data;
+ lower_vm_ops = SDCARDFS_F(file)->lower_vm_ops;
+ BUG_ON(!lower_vm_ops);
+ if (!lower_vm_ops->page_mkwrite)
+ goto out;
+
+ err = lower_vm_ops->page_mkwrite(vma, vmf);
+out:
return err;
}
@@ -53,30 +71,20 @@
unsigned long nr_segs)
{
/*
- * This function returns zero on purpose in order to support direct IO.
- * __dentry_open checks a_ops->direct_IO and returns EINVAL if it is null.
- *
- * However, this function won't be called by certain file operations
- * including generic fs functions. * reads and writes are delivered to
- * the lower file systems and the direct IOs will be handled by them.
- *
- * NOTE: exceptionally, on the recent kernels (since Linux 3.8.x),
- * swap_writepage invokes this function directly.
+ * This function should never be called directly. We need it
+ * to exist, to get past a check in open_check_o_direct(),
+ * which is called from do_last().
*/
- printk(KERN_INFO "%s, operation is not supported\n", __func__);
- return 0;
+ return -EINVAL;
}
-/*
- * XXX: the default address_space_ops for sdcardfs is empty. We cannot set
- * our inode->i_mapping->a_ops to NULL because too many code paths expect
- * the a_ops vector to be non-NULL.
- */
const struct address_space_operations sdcardfs_aops = {
- /* empty on purpose */
.direct_IO = sdcardfs_direct_IO,
};
const struct vm_operations_struct sdcardfs_vm_ops = {
.fault = sdcardfs_fault,
+ .page_mkwrite = sdcardfs_page_mkwrite,
+ .open = sdcardfs_vm_open,
+ .close = sdcardfs_vm_close,
};
diff --git a/fs/sdcardfs/multiuser.h b/fs/sdcardfs/multiuser.h
index 923ba10..85341e7 100755
--- a/fs/sdcardfs/multiuser.h
+++ b/fs/sdcardfs/multiuser.h
@@ -18,20 +18,36 @@
* General Public License.
*/
-#define MULTIUSER_APP_PER_USER_RANGE 100000
+#define AID_USER_OFFSET 100000 /* offset for uid ranges for each user */
+#define AID_APP_START 10000 /* first app user */
+#define AID_APP_END 19999 /* last app user */
+#define AID_CACHE_GID_START 20000 /* start of gids for apps to mark cached data */
+#define AID_EXT_GID_START 30000 /* start of gids for apps to mark external data */
+#define AID_EXT_CACHE_GID_START 40000 /* start of gids for apps to mark external cached data */
+#define AID_EXT_CACHE_GID_END 49999 /* end of gids for apps to mark external cached data */
+#define AID_SHARED_GID_START 50000 /* start of gids for apps in each user to share */
typedef uid_t userid_t;
typedef uid_t appid_t;
-static inline userid_t multiuser_get_user_id(uid_t uid) {
- return uid / MULTIUSER_APP_PER_USER_RANGE;
+static inline uid_t multiuser_get_uid(userid_t user_id, appid_t app_id)
+{
+ return (user_id * AID_USER_OFFSET) + (app_id % AID_USER_OFFSET);
}
-static inline appid_t multiuser_get_app_id(uid_t uid) {
- return uid % MULTIUSER_APP_PER_USER_RANGE;
+static inline bool uid_is_app(uid_t uid)
+{
+ appid_t appid = uid % AID_USER_OFFSET;
+
+ return appid >= AID_APP_START && appid <= AID_APP_END;
}
-static inline uid_t multiuser_get_uid(userid_t userId, appid_t appId) {
- return userId * MULTIUSER_APP_PER_USER_RANGE + (appId % MULTIUSER_APP_PER_USER_RANGE);
+static inline gid_t multiuser_get_ext_cache_gid(uid_t uid)
+{
+ return uid - AID_APP_START + AID_EXT_CACHE_GID_START;
}
+static inline gid_t multiuser_get_ext_gid(uid_t uid)
+{
+ return uid - AID_APP_START + AID_EXT_GID_START;
+}
diff --git a/fs/sdcardfs/packagelist.c b/fs/sdcardfs/packagelist.c
index 0cf9c34..f120bcc 100755
--- a/fs/sdcardfs/packagelist.c
+++ b/fs/sdcardfs/packagelist.c
@@ -20,8 +20,10 @@
#include "sdcardfs.h"
#include <linux/hashtable.h>
+#include <linux/ctype.h>
#include <linux/delay.h>
-
+#include <linux/radix-tree.h>
+#include <linux/dcache.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -29,389 +31,840 @@
#include <linux/configfs.h>
-#define STRING_BUF_SIZE (512)
-
struct hashtable_entry {
struct hlist_node hlist;
- void *key;
- unsigned int value;
+ struct hlist_node dlist; /* for deletion cleanup */
+ struct qstr key;
+ atomic_t value;
};
-struct sb_list {
- struct super_block *sb;
- struct list_head list;
-};
+static DEFINE_HASHTABLE(package_to_appid, 8);
+static DEFINE_HASHTABLE(package_to_userid, 8);
+static DEFINE_HASHTABLE(ext_to_groupid, 8);
-struct packagelist_data {
- DECLARE_HASHTABLE(package_to_appid,8);
- struct mutex hashtable_lock;
-
-};
-
-static struct packagelist_data *pkgl_data_all;
static struct kmem_cache *hashtable_entry_cachep;
-static unsigned int str_hash(const char *key) {
- int i;
- unsigned int h = strlen(key);
- char *data = (char *)key;
+static unsigned int full_name_case_hash(const unsigned char *name, unsigned int len)
+{
+ unsigned long hash = init_name_hash();
- for (i = 0; i < strlen(key); i++) {
- h = h * 31 + *data;
- data++;
- }
- return h;
+ while (len--)
+ hash = partial_name_hash(tolower(*name++), hash);
+ return end_name_hash(hash);
}
-appid_t get_appid(void *pkgl_id, const char *app_name)
+static inline void qstr_init(struct qstr *q, const char *name)
{
- struct packagelist_data *pkgl_dat = pkgl_data_all;
+ q->name = name;
+ q->len = strlen(q->name);
+ q->hash = full_name_case_hash(q->name, q->len);
+}
+
+static inline int qstr_copy(const struct qstr *src, struct qstr *dest)
+{
+ dest->name = kstrdup(src->name, GFP_KERNEL);
+ dest->hash_len = src->hash_len;
+ return !!dest->name;
+}
+
+
+static appid_t __get_appid(const struct qstr *key)
+{
struct hashtable_entry *hash_cur;
- unsigned int hash = str_hash(app_name);
+ unsigned int hash = key->hash;
appid_t ret_id;
- mutex_lock(&pkgl_dat->hashtable_lock);
- hash_for_each_possible(pkgl_dat->package_to_appid, hash_cur, hlist, hash) {
- if (!strcasecmp(app_name, hash_cur->key)) {
- ret_id = (appid_t)hash_cur->value;
- mutex_unlock(&pkgl_dat->hashtable_lock);
+ rcu_read_lock();
+ hash_for_each_possible_rcu(package_to_appid, hash_cur, hlist, hash) {
+ if (qstr_case_eq(key, &hash_cur->key)) {
+ ret_id = atomic_read(&hash_cur->value);
+ rcu_read_unlock();
return ret_id;
}
}
- mutex_unlock(&pkgl_dat->hashtable_lock);
+ rcu_read_unlock();
return 0;
}
+appid_t get_appid(const char *key)
+{
+ struct qstr q;
+
+ qstr_init(&q, key);
+ return __get_appid(&q);
+}
+
+static appid_t __get_ext_gid(const struct qstr *key)
+{
+ struct hashtable_entry *hash_cur;
+ unsigned int hash = key->hash;
+ appid_t ret_id;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu(ext_to_groupid, hash_cur, hlist, hash) {
+ if (qstr_case_eq(key, &hash_cur->key)) {
+ ret_id = atomic_read(&hash_cur->value);
+ rcu_read_unlock();
+ return ret_id;
+ }
+ }
+ rcu_read_unlock();
+ return 0;
+}
+
+appid_t get_ext_gid(const char *key)
+{
+ struct qstr q;
+
+ qstr_init(&q, key);
+ return __get_ext_gid(&q);
+}
+
+static appid_t __is_excluded(const struct qstr *app_name, userid_t user)
+{
+ struct hashtable_entry *hash_cur;
+ unsigned int hash = app_name->hash;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu(package_to_userid, hash_cur, hlist, hash) {
+ if (atomic_read(&hash_cur->value) == user &&
+ qstr_case_eq(app_name, &hash_cur->key)) {
+ rcu_read_unlock();
+ return 1;
+ }
+ }
+ rcu_read_unlock();
+ return 0;
+}
+
+appid_t is_excluded(const char *key, userid_t user)
+{
+ struct qstr q;
+
+ qstr_init(&q, key);
+ return __is_excluded(&q, user);
+}
+
/* Kernel has already enforced everything we returned through
* derive_permissions_locked(), so this is used to lock down access
- * even further, such as enforcing that apps hold sdcard_rw. */
-int check_caller_access_to_name(struct inode *parent_node, const char* name) {
+ * even further, such as enforcing that apps hold sdcard_rw.
+ */
+int check_caller_access_to_name(struct inode *parent_node, const struct qstr *name)
+{
+ struct qstr q_autorun = QSTR_LITERAL("autorun.inf");
+ struct qstr q__android_secure = QSTR_LITERAL(".android_secure");
+ struct qstr q_android_secure = QSTR_LITERAL("android_secure");
/* Always block security-sensitive files at root */
- if (parent_node && SDCARDFS_I(parent_node)->perm == PERM_ROOT) {
- if (!strcasecmp(name, "autorun.inf")
- || !strcasecmp(name, ".android_secure")
- || !strcasecmp(name, "android_secure")) {
+ if (parent_node && SDCARDFS_I(parent_node)->data->perm == PERM_ROOT) {
+ if (qstr_case_eq(name, &q_autorun)
+ || qstr_case_eq(name, &q__android_secure)
+ || qstr_case_eq(name, &q_android_secure)) {
return 0;
}
}
/* Root always has access; access for any other UIDs should always
- * be controlled through packages.list. */
- if (current_fsuid() == 0) {
+ * be controlled through packages.list.
+ */
+ if (current_fsuid() == 0)
return 1;
- }
/* No extra permissions to enforce */
return 1;
}
/* This function is used when file opening. The open flags must be
- * checked before calling check_caller_access_to_name() */
-int open_flags_to_access_mode(int open_flags) {
- if((open_flags & O_ACCMODE) == O_RDONLY) {
- return 0; /* R_OK */
- } else if ((open_flags & O_ACCMODE) == O_WRONLY) {
- return 1; /* W_OK */
- } else {
- /* Probably O_RDRW, but treat as default to be safe */
- return 1; /* R_OK | W_OK */
- }
-}
-
-static int insert_str_to_int_lock(struct packagelist_data *pkgl_dat, char *key,
- unsigned int value)
+ * checked before calling check_caller_access_to_name()
+ */
+int open_flags_to_access_mode(int open_flags)
{
- struct hashtable_entry *hash_cur;
- struct hashtable_entry *new_entry;
- unsigned int hash = str_hash(key);
-
- hash_for_each_possible(pkgl_dat->package_to_appid, hash_cur, hlist, hash) {
- if (!strcasecmp(key, hash_cur->key)) {
- hash_cur->value = value;
- return 0;
- }
- }
- new_entry = kmem_cache_alloc(hashtable_entry_cachep, GFP_KERNEL);
- if (!new_entry)
- return -ENOMEM;
- new_entry->key = kstrdup(key, GFP_KERNEL);
- new_entry->value = value;
- hash_add(pkgl_dat->package_to_appid, &new_entry->hlist, hash);
- return 0;
+ if ((open_flags & O_ACCMODE) == O_RDONLY)
+ return 0; /* R_OK */
+ if ((open_flags & O_ACCMODE) == O_WRONLY)
+ return 1; /* W_OK */
+ /* Probably O_RDRW, but treat as default to be safe */
+ return 1; /* R_OK | W_OK */
}
-static void fixup_perms(struct super_block *sb) {
- if (sb && sb->s_magic == SDCARDFS_SUPER_MAGIC) {
- mutex_lock(&sb->s_root->d_inode->i_mutex);
- get_derive_permissions_recursive(sb->s_root);
- mutex_unlock(&sb->s_root->d_inode->i_mutex);
- }
-}
+static struct hashtable_entry *alloc_hashtable_entry(const struct qstr *key,
+ appid_t value)
+{
+ struct hashtable_entry *ret = kmem_cache_alloc(hashtable_entry_cachep,
+ GFP_KERNEL);
+ if (!ret)
+ return NULL;
+ INIT_HLIST_NODE(&ret->dlist);
+ INIT_HLIST_NODE(&ret->hlist);
-static int insert_str_to_int(struct packagelist_data *pkgl_dat, char *key,
- unsigned int value) {
- int ret;
- struct sdcardfs_sb_info *sbinfo;
- mutex_lock(&sdcardfs_super_list_lock);
- mutex_lock(&pkgl_dat->hashtable_lock);
- ret = insert_str_to_int_lock(pkgl_dat, key, value);
- mutex_unlock(&pkgl_dat->hashtable_lock);
-
- list_for_each_entry(sbinfo, &sdcardfs_super_list, list) {
- if (sbinfo) {
- fixup_perms(sbinfo->sb);
- }
+ if (!qstr_copy(key, &ret->key)) {
+ kmem_cache_free(hashtable_entry_cachep, ret);
+ return NULL;
}
- mutex_unlock(&sdcardfs_super_list_lock);
+
+ atomic_set(&ret->value, value);
return ret;
}
-static void remove_str_to_int_lock(struct hashtable_entry *h_entry) {
- kfree(h_entry->key);
- hash_del(&h_entry->hlist);
- kmem_cache_free(hashtable_entry_cachep, h_entry);
+static int insert_packagelist_appid_entry_locked(const struct qstr *key, appid_t value)
+{
+ struct hashtable_entry *hash_cur;
+ struct hashtable_entry *new_entry;
+ unsigned int hash = key->hash;
+
+ hash_for_each_possible_rcu(package_to_appid, hash_cur, hlist, hash) {
+ if (qstr_case_eq(key, &hash_cur->key)) {
+ atomic_set(&hash_cur->value, value);
+ return 0;
+ }
+ }
+ new_entry = alloc_hashtable_entry(key, value);
+ if (!new_entry)
+ return -ENOMEM;
+ hash_add_rcu(package_to_appid, &new_entry->hlist, hash);
+ return 0;
}
-static void remove_str_to_int(struct packagelist_data *pkgl_dat, const char *key)
+static int insert_ext_gid_entry_locked(const struct qstr *key, appid_t value)
+{
+ struct hashtable_entry *hash_cur;
+ struct hashtable_entry *new_entry;
+ unsigned int hash = key->hash;
+
+ /* An extension can only belong to one gid */
+ hash_for_each_possible_rcu(ext_to_groupid, hash_cur, hlist, hash) {
+ if (qstr_case_eq(key, &hash_cur->key))
+ return -EINVAL;
+ }
+ new_entry = alloc_hashtable_entry(key, value);
+ if (!new_entry)
+ return -ENOMEM;
+ hash_add_rcu(ext_to_groupid, &new_entry->hlist, hash);
+ return 0;
+}
+
+static int insert_userid_exclude_entry_locked(const struct qstr *key, userid_t value)
+{
+ struct hashtable_entry *hash_cur;
+ struct hashtable_entry *new_entry;
+ unsigned int hash = key->hash;
+
+ /* Only insert if not already present */
+ hash_for_each_possible_rcu(package_to_userid, hash_cur, hlist, hash) {
+ if (atomic_read(&hash_cur->value) == value &&
+ qstr_case_eq(key, &hash_cur->key))
+ return 0;
+ }
+ new_entry = alloc_hashtable_entry(key, value);
+ if (!new_entry)
+ return -ENOMEM;
+ hash_add_rcu(package_to_userid, &new_entry->hlist, hash);
+ return 0;
+}
+
+static void fixup_all_perms_name(const struct qstr *key)
{
struct sdcardfs_sb_info *sbinfo;
- struct hashtable_entry *hash_cur;
- unsigned int hash = str_hash(key);
+ struct limit_search limit = {
+ .flags = BY_NAME,
+ .name = QSTR_INIT(key->name, key->len),
+ };
+ list_for_each_entry(sbinfo, &sdcardfs_super_list, list) {
+ if (sbinfo_has_sdcard_magic(sbinfo))
+ fixup_perms_recursive(sbinfo->sb->s_root, &limit);
+ }
+}
+
+static void fixup_all_perms_name_userid(const struct qstr *key, userid_t userid)
+{
+ struct sdcardfs_sb_info *sbinfo;
+ struct limit_search limit = {
+ .flags = BY_NAME | BY_USERID,
+ .name = QSTR_INIT(key->name, key->len),
+ .userid = userid,
+ };
+ list_for_each_entry(sbinfo, &sdcardfs_super_list, list) {
+ if (sbinfo_has_sdcard_magic(sbinfo))
+ fixup_perms_recursive(sbinfo->sb->s_root, &limit);
+ }
+}
+
+static void fixup_all_perms_userid(userid_t userid)
+{
+ struct sdcardfs_sb_info *sbinfo;
+ struct limit_search limit = {
+ .flags = BY_USERID,
+ .userid = userid,
+ };
+ list_for_each_entry(sbinfo, &sdcardfs_super_list, list) {
+ if (sbinfo_has_sdcard_magic(sbinfo))
+ fixup_perms_recursive(sbinfo->sb->s_root, &limit);
+ }
+}
+
+static int insert_packagelist_entry(const struct qstr *key, appid_t value)
+{
+ int err;
+
mutex_lock(&sdcardfs_super_list_lock);
- mutex_lock(&pkgl_dat->hashtable_lock);
- hash_for_each_possible(pkgl_dat->package_to_appid, hash_cur, hlist, hash) {
- if (!strcasecmp(key, hash_cur->key)) {
- remove_str_to_int_lock(hash_cur);
+ err = insert_packagelist_appid_entry_locked(key, value);
+ if (!err)
+ fixup_all_perms_name(key);
+ mutex_unlock(&sdcardfs_super_list_lock);
+
+ return err;
+}
+
+static int insert_ext_gid_entry(const struct qstr *key, appid_t value)
+{
+ int err;
+
+ mutex_lock(&sdcardfs_super_list_lock);
+ err = insert_ext_gid_entry_locked(key, value);
+ mutex_unlock(&sdcardfs_super_list_lock);
+
+ return err;
+}
+
+static int insert_userid_exclude_entry(const struct qstr *key, userid_t value)
+{
+ int err;
+
+ mutex_lock(&sdcardfs_super_list_lock);
+ err = insert_userid_exclude_entry_locked(key, value);
+ if (!err)
+ fixup_all_perms_name_userid(key, value);
+ mutex_unlock(&sdcardfs_super_list_lock);
+
+ return err;
+}
+
+static void free_hashtable_entry(struct hashtable_entry *entry)
+{
+ kfree(entry->key.name);
+ kmem_cache_free(hashtable_entry_cachep, entry);
+}
+
+static void remove_packagelist_entry_locked(const struct qstr *key)
+{
+ struct hashtable_entry *hash_cur;
+ unsigned int hash = key->hash;
+ struct hlist_node *h_t;
+ HLIST_HEAD(free_list);
+
+ hash_for_each_possible_rcu(package_to_userid, hash_cur, hlist, hash) {
+ if (qstr_case_eq(key, &hash_cur->key)) {
+ hash_del_rcu(&hash_cur->hlist);
+ hlist_add_head(&hash_cur->dlist, &free_list);
+ }
+ }
+ hash_for_each_possible_rcu(package_to_appid, hash_cur, hlist, hash) {
+ if (qstr_case_eq(key, &hash_cur->key)) {
+ hash_del_rcu(&hash_cur->hlist);
+ hlist_add_head(&hash_cur->dlist, &free_list);
break;
}
}
- mutex_unlock(&pkgl_dat->hashtable_lock);
- list_for_each_entry(sbinfo, &sdcardfs_super_list, list) {
- if (sbinfo) {
- fixup_perms(sbinfo->sb);
- }
- }
- mutex_unlock(&sdcardfs_super_list_lock);
- return;
+ synchronize_rcu();
+ hlist_for_each_entry_safe(hash_cur, h_t, &free_list, dlist)
+ free_hashtable_entry(hash_cur);
}
-static void remove_all_hashentrys(struct packagelist_data *pkgl_dat)
+static void remove_packagelist_entry(const struct qstr *key)
+{
+ mutex_lock(&sdcardfs_super_list_lock);
+ remove_packagelist_entry_locked(key);
+ fixup_all_perms_name(key);
+ mutex_unlock(&sdcardfs_super_list_lock);
+}
+
+static void remove_ext_gid_entry_locked(const struct qstr *key, gid_t group)
+{
+ struct hashtable_entry *hash_cur;
+ unsigned int hash = key->hash;
+
+ hash_for_each_possible_rcu(ext_to_groupid, hash_cur, hlist, hash) {
+ if (qstr_case_eq(key, &hash_cur->key) && atomic_read(&hash_cur->value) == group) {
+ hash_del_rcu(&hash_cur->hlist);
+ synchronize_rcu();
+ free_hashtable_entry(hash_cur);
+ break;
+ }
+ }
+}
+
+static void remove_ext_gid_entry(const struct qstr *key, gid_t group)
+{
+ mutex_lock(&sdcardfs_super_list_lock);
+ remove_ext_gid_entry_locked(key, group);
+ mutex_unlock(&sdcardfs_super_list_lock);
+}
+
+static void remove_userid_all_entry_locked(userid_t userid)
{
struct hashtable_entry *hash_cur;
struct hlist_node *h_t;
+ HLIST_HEAD(free_list);
int i;
- mutex_lock(&pkgl_dat->hashtable_lock);
- hash_for_each_safe(pkgl_dat->package_to_appid, i, h_t, hash_cur, hlist)
- remove_str_to_int_lock(hash_cur);
- mutex_unlock(&pkgl_dat->hashtable_lock);
- hash_init(pkgl_dat->package_to_appid);
-}
-static struct packagelist_data * packagelist_create(void)
-{
- struct packagelist_data *pkgl_dat;
-
- pkgl_dat = kmalloc(sizeof(*pkgl_dat), GFP_KERNEL | __GFP_ZERO);
- if (!pkgl_dat) {
- printk(KERN_ERR "sdcardfs: Failed to create hash\n");
- return ERR_PTR(-ENOMEM);
+ hash_for_each_rcu(package_to_userid, i, hash_cur, hlist) {
+ if (atomic_read(&hash_cur->value) == userid) {
+ hash_del_rcu(&hash_cur->hlist);
+ hlist_add_head(&hash_cur->dlist, &free_list);
+ }
}
-
- mutex_init(&pkgl_dat->hashtable_lock);
- hash_init(pkgl_dat->package_to_appid);
-
- return pkgl_dat;
+ synchronize_rcu();
+ hlist_for_each_entry_safe(hash_cur, h_t, &free_list, dlist) {
+ free_hashtable_entry(hash_cur);
+ }
}
-static void packagelist_destroy(struct packagelist_data *pkgl_dat)
+static void remove_userid_all_entry(userid_t userid)
{
- remove_all_hashentrys(pkgl_dat);
- printk(KERN_INFO "sdcardfs: destroyed packagelist pkgld\n");
- kfree(pkgl_dat);
+ mutex_lock(&sdcardfs_super_list_lock);
+ remove_userid_all_entry_locked(userid);
+ fixup_all_perms_userid(userid);
+ mutex_unlock(&sdcardfs_super_list_lock);
}
-struct package_appid {
+static void remove_userid_exclude_entry_locked(const struct qstr *key, userid_t userid)
+{
+ struct hashtable_entry *hash_cur;
+ unsigned int hash = key->hash;
+
+ hash_for_each_possible_rcu(package_to_userid, hash_cur, hlist, hash) {
+ if (qstr_case_eq(key, &hash_cur->key) &&
+ atomic_read(&hash_cur->value) == userid) {
+ hash_del_rcu(&hash_cur->hlist);
+ synchronize_rcu();
+ free_hashtable_entry(hash_cur);
+ break;
+ }
+ }
+}
+
+static void remove_userid_exclude_entry(const struct qstr *key, userid_t userid)
+{
+ mutex_lock(&sdcardfs_super_list_lock);
+ remove_userid_exclude_entry_locked(key, userid);
+ fixup_all_perms_name_userid(key, userid);
+ mutex_unlock(&sdcardfs_super_list_lock);
+}
+
+static void packagelist_destroy(void)
+{
+ struct hashtable_entry *hash_cur;
+ struct hlist_node *h_t;
+ HLIST_HEAD(free_list);
+ int i;
+
+ mutex_lock(&sdcardfs_super_list_lock);
+ hash_for_each_rcu(package_to_appid, i, hash_cur, hlist) {
+ hash_del_rcu(&hash_cur->hlist);
+ hlist_add_head(&hash_cur->dlist, &free_list);
+ }
+ hash_for_each_rcu(package_to_userid, i, hash_cur, hlist) {
+ hash_del_rcu(&hash_cur->hlist);
+ hlist_add_head(&hash_cur->dlist, &free_list);
+ }
+ synchronize_rcu();
+ hlist_for_each_entry_safe(hash_cur, h_t, &free_list, dlist)
+ free_hashtable_entry(hash_cur);
+ mutex_unlock(&sdcardfs_super_list_lock);
+ pr_info("sdcardfs: destroyed packagelist pkgld\n");
+}
+
+struct package_details {
struct config_item item;
- int add_pid;
+ struct qstr name;
};
-static inline struct package_appid *to_package_appid(struct config_item *item)
+static inline struct package_details *to_package_details(struct config_item *item)
{
- return item ? container_of(item, struct package_appid, item) : NULL;
+ return item ? container_of(item, struct package_details, item) : NULL;
}
-static struct configfs_attribute package_appid_attr_add_pid = {
- .ca_owner = THIS_MODULE,
- .ca_name = "appid",
- .ca_mode = S_IRUGO | S_IWUGO,
-};
+CONFIGFS_ATTR_STRUCT(package_details);
+#define PACKAGE_DETAILS_ATTR(_name, _mode, _show, _store) \
+struct package_details_attribute package_details_attr_##_name = __CONFIGFS_ATTR(_name, _mode, _show, _store)
+#define PACKAGE_DETAILS_ATTRIBUTE(name) (&package_details_attr_##name.attr)
-static struct configfs_attribute *package_appid_attrs[] = {
- &package_appid_attr_add_pid,
- NULL,
-};
-
-static ssize_t package_appid_attr_show(struct config_item *item,
- struct configfs_attribute *attr,
+static ssize_t package_details_appid_show(struct package_details *package_details,
char *page)
{
- ssize_t count;
- count = sprintf(page, "%d\n", get_appid(pkgl_data_all, item->ci_name));
- return count;
+ return scnprintf(page, PAGE_SIZE, "%u\n", __get_appid(&package_details->name));
}
-static ssize_t package_appid_attr_store(struct config_item *item,
- struct configfs_attribute *attr,
+static ssize_t package_details_appid_store(struct package_details *package_details,
const char *page, size_t count)
{
- struct package_appid *package_appid = to_package_appid(item);
- unsigned long tmp;
- char *p = (char *) page;
+ unsigned int tmp;
int ret;
- tmp = simple_strtoul(p, &p, 10);
- if (!p || (*p && (*p != '\n')))
- return -EINVAL;
+ ret = kstrtouint(page, 10, &tmp);
+ if (ret)
+ return ret;
- if (tmp > INT_MAX)
- return -ERANGE;
- ret = insert_str_to_int(pkgl_data_all, item->ci_name, (unsigned int)tmp);
- package_appid->add_pid = tmp;
+ ret = insert_packagelist_entry(&package_details->name, tmp);
+
if (ret)
return ret;
return count;
}
-static void package_appid_release(struct config_item *item)
+static ssize_t package_details_excluded_userids_show(struct package_details *package_details,
+ char *page)
{
- printk(KERN_INFO "sdcardfs: removing %s\n", item->ci_dentry->d_name.name);
- /* item->ci_name is freed already, so we rely on the dentry */
- remove_str_to_int(pkgl_data_all, item->ci_dentry->d_name.name);
- kfree(to_package_appid(item));
+ struct hashtable_entry *hash_cur;
+ unsigned int hash = package_details->name.hash;
+ int count = 0;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu(package_to_userid, hash_cur, hlist, hash) {
+ if (qstr_case_eq(&package_details->name, &hash_cur->key))
+ count += scnprintf(page + count, PAGE_SIZE - count,
+ "%d ", atomic_read(&hash_cur->value));
+ }
+ rcu_read_unlock();
+ if (count)
+ count--;
+ count += scnprintf(page + count, PAGE_SIZE - count, "\n");
+ return count;
}
-static struct configfs_item_operations package_appid_item_ops = {
- .release = package_appid_release,
- .show_attribute = package_appid_attr_show,
- .store_attribute = package_appid_attr_store,
-};
-
-static struct config_item_type package_appid_type = {
- .ct_item_ops = &package_appid_item_ops,
- .ct_attrs = package_appid_attrs,
- .ct_owner = THIS_MODULE,
-};
-
-
-struct sdcardfs_packages {
- struct config_group group;
-};
-
-static inline struct sdcardfs_packages *to_sdcardfs_packages(struct config_item *item)
+static ssize_t package_details_excluded_userids_store(struct package_details *package_details,
+ const char *page, size_t count)
{
- return item ? container_of(to_config_group(item), struct sdcardfs_packages, group) : NULL;
+ unsigned int tmp;
+ int ret;
+
+ ret = kstrtouint(page, 10, &tmp);
+ if (ret)
+ return ret;
+
+ ret = insert_userid_exclude_entry(&package_details->name, tmp);
+
+ if (ret)
+ return ret;
+
+ return count;
}
-static struct config_item *sdcardfs_packages_make_item(struct config_group *group, const char *name)
+static ssize_t package_details_clear_userid_store(struct package_details *package_details,
+ const char *page, size_t count)
{
- struct package_appid *package_appid;
+ unsigned int tmp;
+ int ret;
- package_appid = kzalloc(sizeof(struct package_appid), GFP_KERNEL);
- if (!package_appid)
- return ERR_PTR(-ENOMEM);
-
- config_item_init_type_name(&package_appid->item, name,
- &package_appid_type);
-
- package_appid->add_pid = 0;
-
- return &package_appid->item;
+ ret = kstrtouint(page, 10, &tmp);
+ if (ret)
+ return ret;
+ remove_userid_exclude_entry(&package_details->name, tmp);
+ return count;
}
-static struct configfs_attribute sdcardfs_packages_attr_description = {
- .ca_owner = THIS_MODULE,
- .ca_name = "packages_gid.list",
- .ca_mode = S_IRUGO,
-};
+static void package_details_release(struct config_item *item)
+{
+ struct package_details *package_details = to_package_details(item);
-static struct configfs_attribute *sdcardfs_packages_attrs[] = {
- &sdcardfs_packages_attr_description,
+ pr_info("sdcardfs: removing %s\n", package_details->name.name);
+ remove_packagelist_entry(&package_details->name);
+ kfree(package_details->name.name);
+ kfree(package_details);
+}
+
+PACKAGE_DETAILS_ATTR(appid, S_IRUGO | S_IWUGO, package_details_appid_show, package_details_appid_store);
+PACKAGE_DETAILS_ATTR(excluded_userids, S_IRUGO | S_IWUGO,
+ package_details_excluded_userids_show, package_details_excluded_userids_store);
+PACKAGE_DETAILS_ATTR(clear_userid, S_IWUGO, NULL, package_details_clear_userid_store);
+
+static struct configfs_attribute *package_details_attrs[] = {
+ PACKAGE_DETAILS_ATTRIBUTE(appid),
+ PACKAGE_DETAILS_ATTRIBUTE(excluded_userids),
+ PACKAGE_DETAILS_ATTRIBUTE(clear_userid),
NULL,
};
-static ssize_t packages_attr_show(struct config_item *item,
- struct configfs_attribute *attr,
+CONFIGFS_ATTR_OPS(package_details);
+
+static struct configfs_item_operations package_details_item_ops = {
+ .release = package_details_release,
+ .show_attribute = package_details_attr_show,
+ .store_attribute = package_details_attr_store,
+};
+
+static struct config_item_type package_appid_type = {
+ .ct_item_ops = &package_details_item_ops,
+ .ct_attrs = package_details_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+struct extensions_value {
+ struct config_group group;
+ unsigned int num;
+};
+
+struct extension_details {
+ struct config_item item;
+ struct qstr name;
+ unsigned int num;
+};
+
+static inline struct extensions_value *to_extensions_value(struct config_item *item)
+{
+ return item ? container_of(to_config_group(item), struct extensions_value, group) : NULL;
+}
+
+static inline struct extension_details *to_extension_details(struct config_item *item)
+{
+ return item ? container_of(item, struct extension_details, item) : NULL;
+}
+
+static void extension_details_release(struct config_item *item)
+{
+ struct extension_details *extension_details = to_extension_details(item);
+
+ pr_info("sdcardfs: No longer mapping %s files to gid %d\n",
+ extension_details->name.name, extension_details->num);
+ remove_ext_gid_entry(&extension_details->name, extension_details->num);
+ kfree(extension_details->name.name);
+ kfree(extension_details);
+}
+
+static struct configfs_item_operations extension_details_item_ops = {
+ .release = extension_details_release,
+};
+
+static struct config_item_type extension_details_type = {
+ .ct_item_ops = &extension_details_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_item *extension_details_make_item(struct config_group *group, const char *name)
+{
+ struct extensions_value *extensions_value = to_extensions_value(&group->cg_item);
+ struct extension_details *extension_details = kzalloc(sizeof(struct extension_details), GFP_KERNEL);
+ const char *tmp;
+ int ret;
+
+ if (!extension_details)
+ return ERR_PTR(-ENOMEM);
+
+ tmp = kstrdup(name, GFP_KERNEL);
+ if (!tmp) {
+ kfree(extension_details);
+ return ERR_PTR(-ENOMEM);
+ }
+ qstr_init(&extension_details->name, tmp);
+ ret = insert_ext_gid_entry(&extension_details->name, extensions_value->num);
+
+ if (ret) {
+ kfree(extension_details->name.name);
+ kfree(extension_details);
+ return ERR_PTR(ret);
+ }
+ config_item_init_type_name(&extension_details->item, name, &extension_details_type);
+
+ return &extension_details->item;
+}
+
+static struct configfs_group_operations extensions_value_group_ops = {
+ .make_item = extension_details_make_item,
+};
+
+static struct config_item_type extensions_name_type = {
+ .ct_group_ops = &extensions_value_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *extensions_make_group(struct config_group *group, const char *name)
+{
+ struct extensions_value *extensions_value;
+ unsigned int tmp;
+ int ret;
+
+ extensions_value = kzalloc(sizeof(struct extensions_value), GFP_KERNEL);
+ if (!extensions_value)
+ return ERR_PTR(-ENOMEM);
+ ret = kstrtouint(name, 10, &tmp);
+ if (ret) {
+ kfree(extensions_value);
+ return ERR_PTR(ret);
+ }
+
+ extensions_value->num = tmp;
+ config_group_init_type_name(&extensions_value->group, name,
+ &extensions_name_type);
+ return &extensions_value->group;
+}
+
+static void extensions_drop_group(struct config_group *group, struct config_item *item)
+{
+ struct extensions_value *value = to_extensions_value(item);
+
+ pr_info("sdcardfs: No longer mapping any files to gid %d\n", value->num);
+ kfree(value);
+}
+
+static struct configfs_group_operations extensions_group_ops = {
+ .make_group = extensions_make_group,
+ .drop_item = extensions_drop_group,
+};
+
+static struct config_item_type extensions_type = {
+ .ct_group_ops = &extensions_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+struct config_group extension_group = {
+ .cg_item = {
+ .ci_namebuf = "extensions",
+ .ci_type = &extensions_type,
+ },
+};
+
+struct packages {
+ struct configfs_subsystem subsystem;
+};
+
+static inline struct packages *to_packages(struct config_item *item)
+{
+ return item ? container_of(to_configfs_subsystem(to_config_group(item)), struct packages, subsystem) : NULL;
+}
+
+CONFIGFS_ATTR_STRUCT(packages);
+#define PACKAGES_ATTR(_name, _mode, _show, _store) \
+struct packages_attribute packages_attr_##_name = __CONFIGFS_ATTR(_name, _mode, _show, _store)
+#define PACKAGES_ATTR_RO(_name, _show) \
+struct packages_attribute packages_attr_##_name = __CONFIGFS_ATTR_RO(_name, _show)
+
+static struct config_item *packages_make_item(struct config_group *group, const char *name)
+{
+ struct package_details *package_details;
+ const char *tmp;
+
+ package_details = kzalloc(sizeof(struct package_details), GFP_KERNEL);
+ if (!package_details)
+ return ERR_PTR(-ENOMEM);
+ tmp = kstrdup(name, GFP_KERNEL);
+ if (!tmp) {
+ kfree(package_details);
+ return ERR_PTR(-ENOMEM);
+ }
+ qstr_init(&package_details->name, tmp);
+ config_item_init_type_name(&package_details->item, name,
+ &package_appid_type);
+
+ return &package_details->item;
+}
+
+static ssize_t packages_list_show(struct packages *packages,
char *page)
{
- struct hashtable_entry *hash_cur;
- struct hlist_node *h_t;
+ struct hashtable_entry *hash_cur_app;
+ struct hashtable_entry *hash_cur_user;
int i;
int count = 0, written = 0;
- char errormsg[] = "<truncated>\n";
+ const char errormsg[] = "<truncated>\n";
+ unsigned int hash;
- mutex_lock(&pkgl_data_all->hashtable_lock);
- hash_for_each_safe(pkgl_data_all->package_to_appid, i, h_t, hash_cur, hlist) {
- written = scnprintf(page + count, PAGE_SIZE - sizeof(errormsg) - count, "%s %d\n", (char *)hash_cur->key, hash_cur->value);
- if (count + written == PAGE_SIZE - sizeof(errormsg)) {
+ rcu_read_lock();
+ hash_for_each_rcu(package_to_appid, i, hash_cur_app, hlist) {
+ written = scnprintf(page + count, PAGE_SIZE - sizeof(errormsg) - count, "%s %d\n",
+ hash_cur_app->key.name, atomic_read(&hash_cur_app->value));
+ hash = hash_cur_app->key.hash;
+ hash_for_each_possible_rcu(package_to_userid, hash_cur_user, hlist, hash) {
+ if (qstr_case_eq(&hash_cur_app->key, &hash_cur_user->key)) {
+ written += scnprintf(page + count + written - 1,
+ PAGE_SIZE - sizeof(errormsg) - count - written + 1,
+ " %d\n", atomic_read(&hash_cur_user->value)) - 1;
+ }
+ }
+ if (count + written == PAGE_SIZE - sizeof(errormsg) - 1) {
count += scnprintf(page + count, PAGE_SIZE - count, errormsg);
break;
}
count += written;
}
- mutex_unlock(&pkgl_data_all->hashtable_lock);
+ rcu_read_unlock();
return count;
}
-static void sdcardfs_packages_release(struct config_item *item)
+static ssize_t packages_remove_userid_store(struct packages *packages,
+ const char *page, size_t count)
{
+ unsigned int tmp;
+ int ret;
- printk(KERN_INFO "sdcardfs: destroyed something?\n");
- kfree(to_sdcardfs_packages(item));
+ ret = kstrtouint(page, 10, &tmp);
+ if (ret)
+ return ret;
+ remove_userid_all_entry(tmp);
+ return count;
}
-static struct configfs_item_operations sdcardfs_packages_item_ops = {
- .release = sdcardfs_packages_release,
- .show_attribute = packages_attr_show,
+struct packages_attribute packages_attr_packages_gid_list = __CONFIGFS_ATTR_RO(packages_gid.list, packages_list_show);
+PACKAGES_ATTR(remove_userid, S_IWUGO, NULL, packages_remove_userid_store);
+
+static struct configfs_attribute *packages_attrs[] = {
+ &packages_attr_packages_gid_list.attr,
+ &packages_attr_remove_userid.attr,
+ NULL,
+};
+
+CONFIGFS_ATTR_OPS(packages)
+static struct configfs_item_operations packages_item_ops = {
+ .show_attribute = packages_attr_show,
+ .store_attribute = packages_attr_store,
};
/*
* Note that, since no extra work is required on ->drop_item(),
* no ->drop_item() is provided.
*/
-static struct configfs_group_operations sdcardfs_packages_group_ops = {
- .make_item = sdcardfs_packages_make_item,
+static struct configfs_group_operations packages_group_ops = {
+ .make_item = packages_make_item,
};
-static struct config_item_type sdcardfs_packages_type = {
- .ct_item_ops = &sdcardfs_packages_item_ops,
- .ct_group_ops = &sdcardfs_packages_group_ops,
- .ct_attrs = sdcardfs_packages_attrs,
+static struct config_item_type packages_type = {
+ .ct_item_ops = &packages_item_ops,
+ .ct_group_ops = &packages_group_ops,
+ .ct_attrs = packages_attrs,
.ct_owner = THIS_MODULE,
};
-static struct configfs_subsystem sdcardfs_packages_subsys = {
- .su_group = {
- .cg_item = {
- .ci_namebuf = "sdcardfs",
- .ci_type = &sdcardfs_packages_type,
+struct config_group *sd_default_groups[] = {
+ &extension_group,
+ NULL,
+};
+
+static struct packages sdcardfs_packages = {
+ .subsystem = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "sdcardfs",
+ .ci_type = &packages_type,
+ },
+ .default_groups = sd_default_groups,
},
},
};
static int configfs_sdcardfs_init(void)
{
- int ret;
- struct configfs_subsystem *subsys = &sdcardfs_packages_subsys;
+ int ret, i;
+ struct configfs_subsystem *subsys = &sdcardfs_packages.subsystem;
+ for (i = 0; sd_default_groups[i]; i++)
+ config_group_init(sd_default_groups[i]);
config_group_init(&subsys->su_group);
mutex_init(&subsys->su_mutex);
ret = configfs_register_subsystem(subsys);
if (ret) {
- printk(KERN_ERR "Error %d while registering subsystem %s\n",
+ pr_err("Error %d while registering subsystem %s\n",
ret,
subsys->su_group.cg_item.ci_namebuf);
}
@@ -420,7 +873,7 @@
static void configfs_sdcardfs_exit(void)
{
- configfs_unregister_subsystem(&sdcardfs_packages_subsys);
+ configfs_unregister_subsystem(&sdcardfs_packages.subsystem);
}
int packagelist_init(void)
@@ -429,19 +882,17 @@
kmem_cache_create("packagelist_hashtable_entry",
sizeof(struct hashtable_entry), 0, 0, NULL);
if (!hashtable_entry_cachep) {
- printk(KERN_ERR "sdcardfs: failed creating pkgl_hashtable entry slab cache\n");
+ pr_err("sdcardfs: failed creating pkgl_hashtable entry slab cache\n");
return -ENOMEM;
}
- pkgl_data_all = packagelist_create();
configfs_sdcardfs_init();
- return 0;
+ return 0;
}
void packagelist_exit(void)
{
configfs_sdcardfs_exit();
- packagelist_destroy(pkgl_data_all);
- if (hashtable_entry_cachep)
- kmem_cache_destroy(hashtable_entry_cachep);
+ packagelist_destroy();
+ kmem_cache_destroy(hashtable_entry_cachep);
}
diff --git a/fs/sdcardfs/sdcardfs.h b/fs/sdcardfs/sdcardfs.h
index 7f3dee3..3874d40 100755
--- a/fs/sdcardfs/sdcardfs.h
+++ b/fs/sdcardfs/sdcardfs.h
@@ -29,6 +29,7 @@
#include <linux/dcache.h>
#include <linux/file.h>
#include <linux/fs.h>
+#include <linux/aio.h>
#include <linux/mm.h>
#include <linux/mount.h>
#include <linux/namei.h>
@@ -52,7 +53,7 @@
#define SDCARDFS_ROOT_INO 1
/* useful for tracking code reachability */
-#define UDBG printk(KERN_DEFAULT "DBG:%s:%s:%d\n", __FILE__, __func__, __LINE__)
+#define UDBG pr_default("DBG:%s:%s:%d\n", __FILE__, __func__, __LINE__)
#define SDCARDFS_DIRENT_SIZE 256
@@ -65,71 +66,89 @@
#define AID_SDCARD_PICS 1033 /* external storage photos access */
#define AID_SDCARD_AV 1034 /* external storage audio/video access */
#define AID_SDCARD_ALL 1035 /* access all users external storage */
+#define AID_MEDIA_OBB 1059 /* obb files */
+
+#define AID_SDCARD_IMAGE 1057
#define AID_PACKAGE_INFO 1027
-#define fix_derived_permission(x) \
+
+/*
+ * Permissions are handled by our permission function.
+ * We don't want anyone who happens to look at our inode value to prematurely
+ * block access, so store more permissive values. These are probably never
+ * used.
+ */
+#define fixup_tmp_permissions(x) \
do { \
- (x)->i_uid = SDCARDFS_I(x)->d_uid; \
- (x)->i_gid = get_gid(SDCARDFS_I(x)); \
- (x)->i_mode = ((x)->i_mode & S_IFMT) | get_mode(SDCARDFS_I(x));\
+ (x)->i_uid = SDCARDFS_I(x)->data->d_uid; \
+ (x)->i_gid = AID_SDCARD_RW; \
+ (x)->i_mode = ((x)->i_mode & S_IFMT) | 0775;\
} while (0)
-
/* OVERRIDE_CRED() and REVERT_CRED()
- * OVERRID_CRED()
- * backup original task->cred
- * and modifies task->cred->fsuid/fsgid to specified value.
+ * OVERRIDE_CRED()
+ * backup original task->cred
+ * and modifies task->cred->fsuid/fsgid to specified value.
* REVERT_CRED()
- * restore original task->cred->fsuid/fsgid.
+ * restore original task->cred->fsuid/fsgid.
* These two macro should be used in pair, and OVERRIDE_CRED() should be
* placed at the beginning of a function, right after variable declaration.
*/
-#define OVERRIDE_CRED(sdcardfs_sbi, saved_cred) \
- saved_cred = override_fsids(sdcardfs_sbi); \
- if (!saved_cred) { return -ENOMEM; }
+#define OVERRIDE_CRED(sdcardfs_sbi, saved_cred, info) \
+ do { \
+ saved_cred = override_fsids(sdcardfs_sbi, info->data); \
+ if (!saved_cred) \
+ return -ENOMEM; \
+ } while (0)
-#define OVERRIDE_CRED_PTR(sdcardfs_sbi, saved_cred) \
- saved_cred = override_fsids(sdcardfs_sbi); \
- if (!saved_cred) { return ERR_PTR(-ENOMEM); }
+#define OVERRIDE_CRED_PTR(sdcardfs_sbi, saved_cred, info) \
+ do { \
+ saved_cred = override_fsids(sdcardfs_sbi, info->data); \
+ if (!saved_cred) \
+ return ERR_PTR(-ENOMEM); \
+ } while (0)
#define REVERT_CRED(saved_cred) revert_fsids(saved_cred)
-#define DEBUG_CRED() \
- printk("KAKJAGI: %s:%d fsuid %d fsgid %d\n", \
- __FUNCTION__, __LINE__, \
- (int)current->cred->fsuid, \
- (int)current->cred->fsgid);
-
/* Android 5.0 support */
/* Permission mode for a specific node. Controls how file permissions
- * are derived for children nodes. */
+ * are derived for children nodes.
+ */
typedef enum {
- /* Nothing special; this node should just inherit from its parent. */
- PERM_INHERIT,
- /* This node is one level above a normal root; used for legacy layouts
- * which use the first level to represent user_id. */
- PERM_PRE_ROOT,
- /* This node is "/" */
- PERM_ROOT,
- /* This node is "/Android" */
- PERM_ANDROID,
- /* This node is "/Android/data" */
- PERM_ANDROID_DATA,
- /* This node is "/Android/obb" */
- PERM_ANDROID_OBB,
- /* This node is "/Android/media" */
- PERM_ANDROID_MEDIA,
+ /* Nothing special; this node should just inherit from its parent. */
+ PERM_INHERIT,
+ /* This node is one level above a normal root; used for legacy layouts
+ * which use the first level to represent user_id.
+ */
+ PERM_PRE_ROOT,
+ /* This node is "/" */
+ PERM_ROOT,
+ /* This node is "/Android" */
+ PERM_ANDROID,
+ /* This node is "/Android/data" */
+ PERM_ANDROID_DATA,
+ /* This node is "/Android/obb" */
+ PERM_ANDROID_OBB,
+ /* This node is "/Android/media" */
+ PERM_ANDROID_MEDIA,
+ /* This node is "/Android/[data|media|obb]/[package]" */
+ PERM_ANDROID_PACKAGE,
+ /* This node is "/Android/[data|media|obb]/[package]/cache" */
+ PERM_ANDROID_PACKAGE_CACHE,
} perm_t;
struct sdcardfs_sb_info;
struct sdcardfs_mount_options;
+struct sdcardfs_inode_info;
+struct sdcardfs_inode_data;
/* Do not directly use this function. Use OVERRIDE_CRED() instead. */
-const struct cred * override_fsids(struct sdcardfs_sb_info* sbi);
+const struct cred *override_fsids(struct sdcardfs_sb_info *sbi,
+ struct sdcardfs_inode_data *data);
/* Do not directly use this function, use REVERT_CRED() instead. */
-void revert_fsids(const struct cred * old_cred);
+void revert_fsids(const struct cred *old_cred);
/* operations vectors defined in specific files */
extern const struct file_operations sdcardfs_main_fops;
@@ -161,14 +180,26 @@
const struct vm_operations_struct *lower_vm_ops;
};
-/* sdcardfs inode data in memory */
-struct sdcardfs_inode_info {
- struct inode *lower_inode;
- /* state derived based on current position in hierachy */
+struct sdcardfs_inode_data {
+ struct kref refcount;
+ bool abandoned;
+
perm_t perm;
userid_t userid;
uid_t d_uid;
bool under_android;
+ bool under_cache;
+ bool under_obb;
+};
+
+/* sdcardfs inode data in memory */
+struct sdcardfs_inode_info {
+ struct inode *lower_inode;
+ /* state derived based on current position in hierarchy */
+ struct sdcardfs_inode_data *data;
+
+ /* top folder for ownership */
+ struct sdcardfs_inode_data *top_data;
struct inode vfs_inode;
};
@@ -185,18 +216,25 @@
uid_t fs_low_uid;
gid_t fs_low_gid;
userid_t fs_user_id;
- gid_t gid;
- mode_t mask;
bool multiuser;
unsigned int reserved_mb;
};
+struct sdcardfs_vfsmount_options {
+ gid_t gid;
+ mode_t mask;
+};
+
+extern int parse_options_remount(struct super_block *sb, char *options, int silent,
+ struct sdcardfs_vfsmount_options *vfsopts);
+
/* sdcardfs super-block data in memory */
struct sdcardfs_sb_info {
struct super_block *sb;
struct super_block *lower_sb;
/* derived perm policy : some of options have been added
- * to sdcardfs_mount_options (Android 4.4 support) */
+ * to sdcardfs_mount_options (Android 4.4 support)
+ */
struct sdcardfs_mount_options options;
spinlock_t lock; /* protects obbpath */
char *obbpath_s;
@@ -307,7 +345,7 @@
{ \
struct path pname; \
spin_lock(&SDCARDFS_D(dent)->lock); \
- if(SDCARDFS_D(dent)->pname.dentry) { \
+ if (SDCARDFS_D(dent)->pname.dentry) { \
pathcpy(&pname, &SDCARDFS_D(dent)->pname); \
SDCARDFS_D(dent)->pname.dentry = NULL; \
SDCARDFS_D(dent)->pname.mnt = NULL; \
@@ -321,38 +359,97 @@
SDCARDFS_DENT_FUNC(lower_path)
SDCARDFS_DENT_FUNC(orig_path)
-static inline int get_gid(struct sdcardfs_inode_info *info) {
- struct sdcardfs_sb_info *sb_info = SDCARDFS_SB(info->vfs_inode.i_sb);
- if (sb_info->options.gid == AID_SDCARD_RW) {
+static inline bool sbinfo_has_sdcard_magic(struct sdcardfs_sb_info *sbinfo)
+{
+ return sbinfo && sbinfo->sb
+ && sbinfo->sb->s_magic == SDCARDFS_SUPER_MAGIC;
+}
+
+static inline struct sdcardfs_inode_data *data_get(
+ struct sdcardfs_inode_data *data)
+{
+ if (data)
+ kref_get(&data->refcount);
+ return data;
+}
+
+static inline struct sdcardfs_inode_data *top_data_get(
+ struct sdcardfs_inode_info *info)
+{
+ return data_get(info->top_data);
+}
+
+extern void data_release(struct kref *ref);
+
+static inline void data_put(struct sdcardfs_inode_data *data)
+{
+ kref_put(&data->refcount, data_release);
+}
+
+static inline void release_own_data(struct sdcardfs_inode_info *info)
+{
+ /*
+ * This happens exactly once per inode. At this point, the inode that
+ * originally held this data is about to be freed, and all references
+ * to it are held as a top value, and will likely be released soon.
+ */
+ info->data->abandoned = true;
+ data_put(info->data);
+}
+
+static inline void set_top(struct sdcardfs_inode_info *info,
+ struct sdcardfs_inode_data *top)
+{
+ struct sdcardfs_inode_data *old_top = info->top_data;
+
+ if (top)
+ data_get(top);
+ info->top_data = top;
+ if (old_top)
+ data_put(old_top);
+}
+
+static inline int get_gid(struct vfsmount *mnt,
+ struct sdcardfs_inode_data *data)
+{
+ struct sdcardfs_vfsmount_options *opts = mnt->data;
+
+ if (opts->gid == AID_SDCARD_RW)
/* As an optimization, certain trusted system components only run
* as owner but operate across all users. Since we're now handing
* out the sdcard_rw GID only to trusted apps, we're okay relaxing
* the user boundary enforcement for the default view. The UIDs
- * assigned to app directories are still multiuser aware. */
+ * assigned to app directories are still multiuser aware.
+ */
return AID_SDCARD_RW;
- } else {
- return multiuser_get_uid(info->userid, sb_info->options.gid);
- }
+ else
+ return multiuser_get_uid(data->userid, opts->gid);
}
-static inline int get_mode(struct sdcardfs_inode_info *info) {
+
+static inline int get_mode(struct vfsmount *mnt,
+ struct sdcardfs_inode_info *info,
+ struct sdcardfs_inode_data *data)
+{
int owner_mode;
int filtered_mode;
- struct sdcardfs_sb_info *sb_info = SDCARDFS_SB(info->vfs_inode.i_sb);
- int visible_mode = 0775 & ~sb_info->options.mask;
+ struct sdcardfs_vfsmount_options *opts = mnt->data;
+ int visible_mode = 0775 & ~opts->mask;
- if (info->perm == PERM_PRE_ROOT) {
+
+ if (data->perm == PERM_PRE_ROOT) {
/* Top of multi-user view should always be visible to ensure
- * secondary users can traverse inside. */
+ * secondary users can traverse inside.
+ */
visible_mode = 0711;
- } else if (info->under_android) {
+ } else if (data->under_android) {
/* Block "other" access to Android directories, since only apps
* belonging to a specific user should be in there; we still
- * leave +x open for the default view. */
- if (sb_info->options.gid == AID_SDCARD_RW) {
+ * leave +x open for the default view.
+ */
+ if (opts->gid == AID_SDCARD_RW)
visible_mode = visible_mode & ~0006;
- } else {
+ else
visible_mode = visible_mode & ~0007;
- }
}
owner_mode = info->lower_inode->i_mode & 0700;
filtered_mode = visible_mode & (owner_mode | (owner_mode >> 3) | (owner_mode >> 6));
@@ -377,7 +474,7 @@
/* in case of a local obb dentry
* the orig_path should be returned
*/
- if(has_graft_path(dent))
+ if (has_graft_path(dent))
sdcardfs_get_orig_path(dent, real_lower);
else
sdcardfs_get_lower_path(dent, real_lower);
@@ -386,7 +483,7 @@
static inline void sdcardfs_put_real_lower(const struct dentry *dent,
struct path *real_lower)
{
- if(has_graft_path(dent))
+ if (has_graft_path(dent))
sdcardfs_put_orig_path(dent, real_lower);
else
sdcardfs_put_lower_path(dent, real_lower);
@@ -396,20 +493,32 @@
extern struct list_head sdcardfs_super_list;
/* for packagelist.c */
-extern appid_t get_appid(void *pkgl_id, const char *app_name);
-extern int check_caller_access_to_name(struct inode *parent_node, const char* name);
+extern appid_t get_appid(const char *app_name);
+extern appid_t get_ext_gid(const char *app_name);
+extern appid_t is_excluded(const char *app_name, userid_t userid);
+extern int check_caller_access_to_name(struct inode *parent_node, const struct qstr *name);
extern int open_flags_to_access_mode(int open_flags);
extern int packagelist_init(void);
extern void packagelist_exit(void);
/* for derived_perm.c */
+#define BY_NAME (1 << 0)
+#define BY_USERID (1 << 1)
+struct limit_search {
+ unsigned int flags;
+ struct qstr name;
+ userid_t userid;
+};
+
extern void setup_derived_state(struct inode *inode, perm_t perm,
- userid_t userid, uid_t uid, bool under_android);
+ userid_t userid, uid_t uid, bool under_android,
+ struct sdcardfs_inode_data *top);
extern void get_derived_permission(struct dentry *parent, struct dentry *dentry);
-extern void get_derived_permission_new(struct dentry *parent, struct dentry *dentry, struct dentry *newdentry);
-extern void get_derive_permissions_recursive(struct dentry *parent);
+extern void get_derived_permission_new(struct dentry *parent, struct dentry *dentry, const struct qstr *name);
+extern void fixup_perms_recursive(struct dentry *dentry, struct limit_search *limit);
extern void update_derived_permission_lock(struct dentry *dentry);
+void fixup_lower_ownership(struct dentry *dentry, const char *name);
extern int need_graft_path(struct dentry *dentry);
extern int is_base_obbpath(struct dentry *dentry);
extern int is_obbpath_invalid(struct dentry *dentry);
@@ -419,6 +528,7 @@
static inline struct dentry *lock_parent(struct dentry *dentry)
{
struct dentry *dir = dget_parent(dentry);
+
mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
return dir;
}
@@ -444,7 +554,7 @@
goto out_unlock;
}
- err = vfs_mkdir(parent.dentry->d_inode, dent, mode);
+ err = vfs_mkdir2(parent.mnt, parent.dentry->d_inode, dent, mode);
if (err) {
if (err == -EEXIST)
err = 0;
@@ -455,7 +565,7 @@
attrs.ia_gid = gid;
attrs.ia_valid = ATTR_UID | ATTR_GID;
mutex_lock(&dent->d_inode->i_mutex);
- notify_change(dent, &attrs);
+ notify_change2(parent.mnt, dent, &attrs);
mutex_unlock(&dent->d_inode->i_mutex);
out_dput:
@@ -513,12 +623,17 @@
return 1;
}
-/* Copies attrs and maintains sdcardfs managed attrs */
+/*
+ * Copies attrs and maintains sdcardfs managed attrs
+ * Since our permission check handles all special permissions, set those to be open
+ */
static inline void sdcardfs_copy_and_fix_attrs(struct inode *dest, const struct inode *src)
{
- dest->i_mode = (src->i_mode & S_IFMT) | get_mode(SDCARDFS_I(dest));
- dest->i_uid = SDCARDFS_I(dest)->d_uid;
- dest->i_gid = get_gid(SDCARDFS_I(dest));
+
+ dest->i_mode = (src->i_mode & S_IFMT) | S_IRWXU | S_IRWXG |
+ S_IROTH | S_IXOTH; /* 0775 */
+ dest->i_uid = SDCARDFS_I(dest)->data->d_uid;
+ dest->i_gid = AID_SDCARD_RW;
dest->i_rdev = src->i_rdev;
dest->i_atime = src->i_atime;
dest->i_mtime = src->i_mtime;
@@ -527,4 +642,22 @@
dest->i_flags = src->i_flags;
set_nlink(dest, src->i_nlink);
}
+
+static inline bool str_case_eq(const char *s1, const char *s2)
+{
+ return !strcasecmp(s1, s2);
+}
+
+static inline bool str_n_case_eq(const char *s1, const char *s2, size_t len)
+{
+ return !strncasecmp(s1, s2, len);
+}
+
+static inline bool qstr_case_eq(const struct qstr *q1, const struct qstr *q2)
+{
+ return q1->len == q2->len && str_case_eq(q1->name, q2->name);
+}
+
+#define QSTR_LITERAL(string) QSTR_INIT(string, sizeof(string)-1)
+
#endif /* not _SDCARDFS_H_ */
diff --git a/fs/sdcardfs/super.c b/fs/sdcardfs/super.c
index 1d64901..7f4539b 100755
--- a/fs/sdcardfs/super.c
+++ b/fs/sdcardfs/super.c
@@ -26,6 +26,23 @@
*/
static struct kmem_cache *sdcardfs_inode_cachep;
+/*
+ * To support the top references, we must track some data separately.
+ * An sdcardfs_inode_info always has a reference to its data, and once set up,
+ * also has a reference to its top. The top may be itself, in which case it
+ * holds two references to its data. When top is changed, it takes a ref to the
+ * new data and then drops the ref to the old data.
+ */
+static struct kmem_cache *sdcardfs_inode_data_cachep;
+
+void data_release(struct kref *ref)
+{
+ struct sdcardfs_inode_data *data =
+ container_of(ref, struct sdcardfs_inode_data, refcount);
+
+ kmem_cache_free(sdcardfs_inode_data_cachep, data);
+}
+
/* final actions when unmounting a file system */
static void sdcardfs_put_super(struct super_block *sb)
{
@@ -36,7 +53,7 @@
if (!spd)
return;
- if(spd->obbpath_s) {
+ if (spd->obbpath_s) {
kfree(spd->obbpath_s);
path_put(&spd->obbpath);
}
@@ -64,7 +81,7 @@
if (sbi->options.reserved_mb) {
/* Invalid statfs informations. */
if (buf->f_bsize == 0) {
- printk(KERN_ERR "Returned block size is zero.\n");
+ pr_err("Returned block size is zero.\n");
return -EINVAL;
}
@@ -100,8 +117,7 @@
* SILENT, but anything else left over is an error.
*/
if ((*flags & ~(MS_RDONLY | MS_MANDLOCK | MS_SILENT)) != 0) {
- printk(KERN_ERR
- "sdcardfs: remount flags 0x%x unsupported\n", *flags);
+ pr_err("sdcardfs: remount flags 0x%x unsupported\n", *flags);
err = -EINVAL;
}
@@ -109,6 +125,54 @@
}
/*
+ * @mnt: mount point we are remounting
+ * @sb: superblock we are remounting
+ * @flags: numeric mount options
+ * @options: mount options string
+ */
+static int sdcardfs_remount_fs2(struct vfsmount *mnt, struct super_block *sb,
+ int *flags, char *options)
+{
+ int err = 0;
+
+ /*
+ * The VFS will take care of "ro" and "rw" flags among others. We
+ * can safely accept a few flags (RDONLY, MANDLOCK), and honor
+ * SILENT, but anything else left over is an error.
+ */
+ if ((*flags & ~(MS_RDONLY | MS_MANDLOCK | MS_SILENT | MS_REMOUNT)) != 0) {
+ pr_err("sdcardfs: remount flags 0x%x unsupported\n", *flags);
+ err = -EINVAL;
+ }
+ pr_info("Remount options were %s for vfsmnt %p.\n", options, mnt);
+ err = parse_options_remount(sb, options, *flags & ~MS_SILENT, mnt->data);
+
+
+ return err;
+}
+
+static void *sdcardfs_clone_mnt_data(void *data)
+{
+ struct sdcardfs_vfsmount_options *opt = kmalloc(sizeof(struct sdcardfs_vfsmount_options), GFP_KERNEL);
+ struct sdcardfs_vfsmount_options *old = data;
+
+ if (!opt)
+ return NULL;
+ opt->gid = old->gid;
+ opt->mask = old->mask;
+ return opt;
+}
+
+static void sdcardfs_copy_mnt_data(void *data, void *newdata)
+{
+ struct sdcardfs_vfsmount_options *old = data;
+ struct sdcardfs_vfsmount_options *new = newdata;
+
+ old->gid = new->gid;
+ old->mask = new->mask;
+}
+
+/*
* Called by iput() when the inode reference count reached zero
* and the inode is not hashed anywhere. Used to clear anything
* that needs to be, before the inode is completely destroyed and put
@@ -119,6 +183,7 @@
struct inode *lower_inode;
truncate_inode_pages(&inode->i_data, 0);
+ set_top(SDCARDFS_I(inode), NULL);
clear_inode(inode);
/*
* Decrement a reference to a lower_inode, which was incremented
@@ -132,6 +197,7 @@
static struct inode *sdcardfs_alloc_inode(struct super_block *sb)
{
struct sdcardfs_inode_info *i;
+ struct sdcardfs_inode_data *d;
i = kmem_cache_alloc(sdcardfs_inode_cachep, GFP_KERNEL);
if (!i)
@@ -140,13 +206,31 @@
/* memset everything up to the inode to 0 */
memset(i, 0, offsetof(struct sdcardfs_inode_info, vfs_inode));
+ d = kmem_cache_alloc(sdcardfs_inode_data_cachep,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!d) {
+ kmem_cache_free(sdcardfs_inode_cachep, i);
+ return NULL;
+ }
+
+ i->data = d;
+ kref_init(&d->refcount);
+
i->vfs_inode.i_version = 1;
return &i->vfs_inode;
}
+static void i_callback(struct rcu_head *head)
+{
+ struct inode *inode = container_of(head, struct inode, i_rcu);
+
+ release_own_data(SDCARDFS_I(inode));
+ kmem_cache_free(sdcardfs_inode_cachep, SDCARDFS_I(inode));
+}
+
static void sdcardfs_destroy_inode(struct inode *inode)
{
- kmem_cache_free(sdcardfs_inode_cachep, SDCARDFS_I(inode));
+ call_rcu(&inode->i_rcu, i_callback);
}
/* sdcardfs inode cache constructor */
@@ -159,22 +243,31 @@
int sdcardfs_init_inode_cache(void)
{
- int err = 0;
-
sdcardfs_inode_cachep =
kmem_cache_create("sdcardfs_inode_cache",
sizeof(struct sdcardfs_inode_info), 0,
SLAB_RECLAIM_ACCOUNT, init_once);
+
if (!sdcardfs_inode_cachep)
- err = -ENOMEM;
- return err;
+ return -ENOMEM;
+
+ sdcardfs_inode_data_cachep =
+ kmem_cache_create("sdcardfs_inode_data_cache",
+ sizeof(struct sdcardfs_inode_data), 0,
+ SLAB_RECLAIM_ACCOUNT, NULL);
+ if (!sdcardfs_inode_data_cachep) {
+ kmem_cache_destroy(sdcardfs_inode_cachep);
+ return -ENOMEM;
+ }
+
+ return 0;
}
/* sdcardfs inode cache destructor */
void sdcardfs_destroy_inode_cache(void)
{
- if (sdcardfs_inode_cachep)
- kmem_cache_destroy(sdcardfs_inode_cachep);
+ kmem_cache_destroy(sdcardfs_inode_data_cachep);
+ kmem_cache_destroy(sdcardfs_inode_cachep);
}
/*
@@ -190,19 +283,25 @@
lower_sb->s_op->umount_begin(lower_sb);
}
-static int sdcardfs_show_options(struct seq_file *m, struct dentry *root)
+static int sdcardfs_show_options(struct vfsmount *mnt, struct seq_file *m,
+ struct dentry *root)
{
struct sdcardfs_sb_info *sbi = SDCARDFS_SB(root->d_sb);
struct sdcardfs_mount_options *opts = &sbi->options;
+ struct sdcardfs_vfsmount_options *vfsopts = mnt->data;
if (opts->fs_low_uid != 0)
- seq_printf(m, ",uid=%u", opts->fs_low_uid);
+ seq_printf(m, ",fsuid=%u", opts->fs_low_uid);
if (opts->fs_low_gid != 0)
- seq_printf(m, ",gid=%u", opts->fs_low_gid);
-
+ seq_printf(m, ",fsgid=%u", opts->fs_low_gid);
+ if (vfsopts->gid != 0)
+ seq_printf(m, ",gid=%u", vfsopts->gid);
if (opts->multiuser)
- seq_printf(m, ",multiuser");
-
+ seq_puts(m, ",multiuser");
+ if (vfsopts->mask)
+ seq_printf(m, ",mask=%u", vfsopts->mask);
+ if (opts->fs_user_id)
+ seq_printf(m, ",userid=%u", opts->fs_user_id);
if (opts->reserved_mb != 0)
seq_printf(m, ",reserved=%uMB", opts->reserved_mb);
@@ -213,9 +312,12 @@
.put_super = sdcardfs_put_super,
.statfs = sdcardfs_statfs,
.remount_fs = sdcardfs_remount_fs,
+ .remount_fs2 = sdcardfs_remount_fs2,
+ .clone_mnt_data = sdcardfs_clone_mnt_data,
+ .copy_mnt_data = sdcardfs_copy_mnt_data,
.evict_inode = sdcardfs_evict_inode,
.umount_begin = sdcardfs_umount_begin,
- .show_options = sdcardfs_show_options,
+ .show_options2 = sdcardfs_show_options,
.alloc_inode = sdcardfs_alloc_inode,
.destroy_inode = sdcardfs_destroy_inode,
.drop_inode = generic_delete_inode,
diff --git a/fs/super.c b/fs/super.c
index a7708e0..a84e274 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -695,7 +695,8 @@
}
/**
- * do_remount_sb - asks filesystem to change mount options.
+ * do_remount_sb2 - asks filesystem to change mount options.
+ * @mnt: mount we are looking at
* @sb: superblock in question
* @flags: numeric part of options
* @data: the rest of options
@@ -703,7 +704,7 @@
*
* Alters the mount options of a mounted file system.
*/
-int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
+int do_remount_sb2(struct vfsmount *mnt, struct super_block *sb, int flags, void *data, int force)
{
int retval;
int remount_ro;
@@ -736,7 +737,16 @@
sync_filesystem(sb);
- if (sb->s_op->remount_fs) {
+ if (mnt && sb->s_op->remount_fs2) {
+ retval = sb->s_op->remount_fs2(mnt, sb, &flags, data);
+ if (retval) {
+ if (!force)
+ goto cancel_readonly;
+ /* If forced remount, go ahead despite any errors */
+ WARN(1, "forced remount of a %s fs returned %i\n",
+ sb->s_type->name, retval);
+ }
+ } else if (sb->s_op->remount_fs) {
retval = sb->s_op->remount_fs(sb, &flags, data);
if (retval) {
if (!force)
@@ -768,6 +778,11 @@
return retval;
}
+int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
+{
+ return do_remount_sb2(NULL, sb, flags, data, force);
+}
+
static void do_emergency_remount(struct work_struct *work)
{
struct super_block *sb, *p = NULL;
@@ -1087,7 +1102,7 @@
EXPORT_SYMBOL(mount_single);
struct dentry *
-mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
+mount_fs(struct file_system_type *type, int flags, const char *name, struct vfsmount *mnt, void *data)
{
struct dentry *root;
struct super_block *sb;
@@ -1104,7 +1119,10 @@
goto out_free_secdata;
}
- root = type->mount(type, flags, name, data);
+ if (type->mount2)
+ root = type->mount2(mnt, type, flags, name, data);
+ else
+ root = type->mount(type, flags, name, data);
if (IS_ERR(root)) {
error = PTR_ERR(root);
goto out_free_secdata;
diff --git a/fs/utimes.c b/fs/utimes.c
index f4fb7ec..7216a07 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -96,13 +96,13 @@
goto mnt_drop_write_and_out;
if (!inode_owner_or_capable(inode)) {
- error = inode_permission(inode, MAY_WRITE);
+ error = inode_permission2(path->mnt, inode, MAY_WRITE);
if (error)
goto mnt_drop_write_and_out;
}
}
mutex_lock(&inode->i_mutex);
- error = notify_change(path->dentry, &newattrs);
+ error = notify_change2(path->mnt, path->dentry, &newattrs);
mutex_unlock(&inode->i_mutex);
mnt_drop_write_and_out:
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 6c58dd7..94da8d2 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -122,6 +122,7 @@
kernel_cap_t cap_permitted; /* caps we're permitted */
kernel_cap_t cap_effective; /* caps we can actually use */
kernel_cap_t cap_bset; /* capability bounding set */
+ kernel_cap_t cap_ambient; /* Ambient capability set */
#ifdef CONFIG_KEYS
unsigned char jit_keyring; /* default keyring to attach requested
* keys to */
@@ -197,6 +198,13 @@
}
#endif
+static inline bool cap_ambient_invariant_ok(const struct cred *cred)
+{
+ return cap_issubset(cred->cap_ambient,
+ cap_intersect(cred->cap_permitted,
+ cred->cap_inheritable));
+}
+
/**
* get_new_cred - Get a reference on a new set of credentials
* @cred: The new credentials to reference
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 0d1e168..63ad13a 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1470,13 +1470,20 @@
* VFS helper functions..
*/
extern int vfs_create(struct inode *, struct dentry *, umode_t, bool);
+extern int vfs_create2(struct vfsmount *, struct inode *, struct dentry *, umode_t, bool);
extern int vfs_mkdir(struct inode *, struct dentry *, umode_t);
+extern int vfs_mkdir2(struct vfsmount *, struct inode *, struct dentry *, umode_t);
extern int vfs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
+extern int vfs_mknod2(struct vfsmount *, struct inode *, struct dentry *, umode_t, dev_t);
extern int vfs_symlink(struct inode *, struct dentry *, const char *);
extern int vfs_link(struct dentry *, struct inode *, struct dentry *);
+extern int vfs_link2(struct vfsmount *, struct dentry *, struct inode *, struct dentry *);
extern int vfs_rmdir(struct inode *, struct dentry *);
+extern int vfs_rmdir2(struct vfsmount *, struct inode *, struct dentry *);
extern int vfs_unlink(struct inode *, struct dentry *);
+extern int vfs_unlink2(struct vfsmount *, struct inode *, struct dentry *);
extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
+extern int vfs_rename2(struct vfsmount *, struct inode *, struct dentry *, struct inode *, struct dentry *);
/*
* VFS dentry helper functions.
@@ -1580,6 +1587,7 @@
struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
void * (*follow_link) (struct dentry *, struct nameidata *);
int (*permission) (struct inode *, int);
+ int (*permission2) (struct vfsmount *, struct inode *, int);
struct posix_acl * (*get_acl)(struct inode *, int);
int (*readlink) (struct dentry *, char __user *,int);
@@ -1595,6 +1603,7 @@
int (*rename) (struct inode *, struct dentry *,
struct inode *, struct dentry *);
int (*setattr) (struct dentry *, struct iattr *);
+ int (*setattr2) (struct vfsmount *, struct dentry *, struct iattr *);
int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
@@ -1634,9 +1643,13 @@
int (*unfreeze_fs) (struct super_block *);
int (*statfs) (struct dentry *, struct kstatfs *);
int (*remount_fs) (struct super_block *, int *, char *);
+ int (*remount_fs2) (struct vfsmount *, struct super_block *, int *, char *);
+ void *(*clone_mnt_data) (void *);
+ void (*copy_mnt_data) (void *, void *);
void (*umount_begin) (struct super_block *);
int (*show_options)(struct seq_file *, struct dentry *);
+ int (*show_options2)(struct vfsmount *,struct seq_file *, struct dentry *);
int (*show_devname)(struct seq_file *, struct dentry *);
int (*show_path)(struct seq_file *, struct dentry *);
int (*show_stats)(struct seq_file *, struct dentry *);
@@ -1843,6 +1856,9 @@
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
struct dentry *(*mount) (struct file_system_type *, int,
const char *, void *);
+ struct dentry *(*mount2) (struct vfsmount *, struct file_system_type *, int,
+ const char *, void *);
+ void *(*alloc_mnt_data) (void);
void (*kill_sb) (struct super_block *);
struct module *owner;
struct file_system_type * next;
@@ -2031,6 +2047,8 @@
extern long vfs_truncate(struct path *, loff_t);
extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
struct file *filp);
+extern int do_truncate2(struct vfsmount *, struct dentry *, loff_t start,
+ unsigned int time_attrs, struct file *filp);
extern int do_fallocate(struct file *file, int mode, loff_t offset,
loff_t len);
extern long do_sys_open(int dfd, const char __user *filename, int flags,
@@ -2238,7 +2256,9 @@
extern sector_t bmap(struct inode *, sector_t);
#endif
extern int notify_change(struct dentry *, struct iattr *);
+extern int notify_change2(struct vfsmount *, struct dentry *, struct iattr *);
extern int inode_permission(struct inode *, int);
+extern int inode_permission2(struct vfsmount *, struct inode *, int);
extern int generic_permission(struct inode *, int);
static inline bool execute_ok(struct inode *inode)
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index f6f0c3c..d12af11 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -17,6 +17,7 @@
__s32 dad_transmits;
__s32 rtr_solicits;
__s32 rtr_solicit_interval;
+ __s32 rtr_solicit_max_interval;
__s32 rtr_solicit_delay;
__s32 force_mld_version;
#ifdef CONFIG_IPV6_PRIVACY
@@ -33,6 +34,7 @@
__s32 accept_ra_rtr_pref;
__s32 rtr_probe_interval;
#ifdef CONFIG_IPV6_ROUTE_INFO
+ __s32 accept_ra_rt_info_min_plen;
__s32 accept_ra_rt_info_max_plen;
#endif
#endif
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 6883e19..e4a205d 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -122,7 +122,7 @@
static inline void print_ip_sym(unsigned long ip)
{
- printk("[<%p>] %pS\n", (void *) ip, (void *) ip);
+ printk("[<%pP>] %pS\n", (void *) ip, (void *) ip);
}
#endif /*_LINUX_KALLSYMS_H*/
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 8eeb8f6..f64067e2 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -48,6 +48,9 @@
#define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME )
+#define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \
+ MNT_MARKED)
+
#define MNT_INTERNAL 0x4000
#define MNT_LOCK_ATIME 0x040000
@@ -55,11 +58,13 @@
#define MNT_LOCK_NOSUID 0x100000
#define MNT_LOCK_NODEV 0x200000
#define MNT_LOCK_READONLY 0x400000
+#define MNT_MARKED 0x4000000
struct vfsmount {
struct dentry *mnt_root; /* root of the mounted tree */
struct super_block *mnt_sb; /* pointer to superblock */
int mnt_flags;
+ void *data;
};
struct file; /* forward dec */
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 5a5ff57..f10c7f2 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -74,6 +74,7 @@
const char *, unsigned int, struct path *);
extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
+extern struct dentry *lookup_one_len2(const char *, struct vfsmount *mnt, struct dentry *, int);
extern int follow_down_one(struct path *);
extern int follow_down(struct path *);
diff --git a/include/linux/netfilter/xt_qtaguid.h b/include/linux/netfilter/xt_qtaguid.h
index ca60fbd..1c67155 100644
--- a/include/linux/netfilter/xt_qtaguid.h
+++ b/include/linux/netfilter/xt_qtaguid.h
@@ -10,4 +10,5 @@
#define XT_QTAGUID_SOCKET XT_OWNER_SOCKET
#define xt_qtaguid_match_info xt_owner_match_info
+int qtaguid_untag(struct socket *sock, bool kernel);
#endif /* _XT_QTAGUID_MATCH_H */
diff --git a/include/linux/security.h b/include/linux/security.h
index 3fd19934..4b5df69 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -2450,7 +2450,7 @@
unsigned long arg4,
unsigned long arg5)
{
- return cap_task_prctl(option, arg2, arg3, arg3, arg5);
+ return cap_task_prctl(option, arg2, arg3, arg4, arg5);
}
static inline void security_task_to_inode(struct task_struct *p, struct inode *inode)
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 2cbf0ba..5de60f8 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -1,8 +1,9 @@
#ifndef _ADDRCONF_H
#define _ADDRCONF_H
-#define MAX_RTR_SOLICITATIONS 3
+#define MAX_RTR_SOLICITATIONS -1 /* unlimited */
#define RTR_SOLICITATION_INTERVAL (4*HZ)
+#define RTR_SOLICITATION_MAX_INTERVAL (3600*HZ) /* 1 hour */
#define MIN_VALID_LIFETIME (2*3600) /* 2 hours */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index e144826..189c268 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -63,6 +63,7 @@
#define TDLS_MGMT_VERSION2 1
#define CFG80211_BSSID_HINT_BACKPORT 1
+#define CFG80211_SCAN_RANDOM_MAC_ADDR 1
/*
* wireless hardware capability structures
@@ -1264,6 +1265,10 @@
* @wdev: the wireless device to scan for
* @aborted: (internal) scan request was notified as aborted
* @no_cck: used to send probe requests at non CCK rate in 2GHz band
+ * @mac_addr: MAC address used with randomisation
+ * @mac_addr_mask: MAC address mask used with randomisation, bits that
+ * are 0 in the mask should be randomised, bits that are 1 should
+ * be taken from the @mac_addr
*/
struct cfg80211_scan_request {
struct cfg80211_ssid *ssids;
@@ -1277,6 +1282,9 @@
struct wireless_dev *wdev;
+ u8 mac_addr[ETH_ALEN] __aligned(2);
+ u8 mac_addr_mask[ETH_ALEN] __aligned(2);
+
/* internal */
struct wiphy *wiphy;
unsigned long scan_start;
@@ -1287,6 +1295,17 @@
struct ieee80211_channel *channels[0];
};
+static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask)
+{
+ int i;
+
+ get_random_bytes(buf, ETH_ALEN);
+ for (i = 0; i < ETH_ALEN; i++) {
+ buf[i] &= ~mask[i];
+ buf[i] |= addr[i] & mask[i];
+ }
+}
+
/**
* struct cfg80211_match_set - sets of attributes to match
*
@@ -1319,6 +1338,10 @@
* @channels: channels to scan
* @min_rssi_thold: for drivers only supporting a single threshold, this
* contains the minimum over all matchsets
+ * @mac_addr: MAC address used with randomisation
+ * @mac_addr_mask: MAC address mask used with randomisation, bits that
+ * are 0 in the mask should be randomised, bits that are 1 should
+ * be taken from the @mac_addr
*/
struct cfg80211_sched_scan_request {
struct cfg80211_ssid *ssids;
@@ -1333,6 +1356,9 @@
s32 min_rssi_thold;
s32 rssi_thold; /* just for backward compatible */
+ u8 mac_addr[ETH_ALEN] __aligned(2);
+ u8 mac_addr_mask[ETH_ALEN] __aligned(2);
+
/* internal */
struct wiphy *wiphy;
struct net_device *dev;
diff --git a/include/net/cnss_nl.h b/include/net/cnss_nl.h
new file mode 100644
index 0000000..abbeae2
--- /dev/null
+++ b/include/net/cnss_nl.h
@@ -0,0 +1,107 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _NET_CNSS_GENETLINK_H_
+#define _NET_CNSS_GENETLINK_H_
+
+#define CLD80211_MAX_COMMANDS 40
+#define CLD80211_MAX_NL_DATA 4096
+
+/**
+ * enum cld80211_attr - Driver/Application embeds the data in nlmsg with the
+ * help of below attributes
+ *
+ * @CLD80211_ATTR_VENDOR_DATA: Embed all other attributes in this nested
+ * attribute.
+ * @CLD80211_ATTR_DATA: Embed complete data in this attribute
+ *
+ * Any new message in future can be added as another attribute
+ */
+enum cld80211_attr {
+ CLD80211_ATTR_VENDOR_DATA = 1,
+ CLD80211_ATTR_DATA,
+ /* add new attributes above here */
+
+ __CLD80211_ATTR_AFTER_LAST,
+ CLD80211_ATTR_MAX = __CLD80211_ATTR_AFTER_LAST - 1
+};
+
+/**
+ * enum cld80211_multicast_groups - List of multicast groups supported
+ *
+ * @CLD80211_MCGRP_SVC_MSGS: WLAN service message will be sent to this group.
+ * Ex: Status ind messages
+ * @CLD80211_MCGRP_HOST_LOGS: All logging related messages from driver will be
+ * sent to this multicast group
+ * @CLD80211_MCGRP_FW_LOGS: Firmware logging messages will be sent to this group
+ * @CLD80211_MCGRP_PER_PKT_STATS: Messages related packet stats debugging infra
+ * will be sent to this group
+ * @CLD80211_MCGRP_DIAG_EVENTS: Driver/Firmware status logging diag events will
+ * be sent to this group
+ * @CLD80211_MCGRP_FATAL_EVENTS: Any fatal message generated in driver/firmware
+ * will be sent to this group
+ * @CLD80211_MCGRP_OEM_MSGS: All OEM message will be sent to this group
+ * Ex: LOWI messages
+ */
+enum cld80211_multicast_groups {
+ CLD80211_MCGRP_SVC_MSGS,
+ CLD80211_MCGRP_HOST_LOGS,
+ CLD80211_MCGRP_FW_LOGS,
+ CLD80211_MCGRP_PER_PKT_STATS,
+ CLD80211_MCGRP_DIAG_EVENTS,
+ CLD80211_MCGRP_FATAL_EVENTS,
+ CLD80211_MCGRP_OEM_MSGS,
+};
+
+/**
+ * typedef cld80211_cb - Callback to be called when an nlmsg is received with
+ * the registered cmd_id command from userspace
+ * @data: Payload of the message to be sent to driver
+ * @data_len: Length of the payload
+ * @cb_ctx: callback context to be returned to driver when the callback
+ * is called
+ * @pid: process id of the sender
+ */
+typedef void (*cld80211_cb)(const void *data, int data_len,
+ void *cb_ctx, int pid);
+
+/**
+ * register_cld_cmd_cb() - Allows cld driver to register for commands with
+ * callback
+ * @cmd_id: Command to be registered. Valid range [1, CLD80211_MAX_COMMANDS]
+ * @cb: Callback to be called when an nlmsg is received with cmd_id command
+ * from userspace
+ * @cb_ctx: context provided by driver; Send this as cb_ctx of func()
+ * to driver
+ */
+int register_cld_cmd_cb(u8 cmd_id, cld80211_cb cb, void *cb_ctx);
+
+/**
+ * deregister_cld_cmd_cb() - Allows cld driver to de-register the command it
+ * has already registered
+ * @cmd_id: Command to be deregistered.
+ */
+int deregister_cld_cmd_cb(u8 cmd_id);
+
+/**
+ * cld80211_get_genl_family() - Returns current netlink family context
+ */
+struct genl_family *cld80211_get_genl_family(void);
+
+/**
+ * cld80211_get_mcgrp_id() - Returns netlink multicast group id of the local
+ * groupid
+ * @groupid: index of the group from enum cld80211_multicast_groups
+ */
+int cld80211_get_mcgrp_id(enum cld80211_multicast_groups groupid);
+
+#endif /* _NET_CNSS_GENETLINK_H_ */
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 4ac12e1..21666fb 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -8,6 +8,11 @@
#include <net/flow.h>
#include <net/rtnetlink.h>
+struct fib_kuid_range {
+ kuid_t start;
+ kuid_t end;
+};
+
struct fib_rule {
struct list_head list;
atomic_t refcnt;
@@ -23,8 +28,7 @@
struct fib_rule __rcu *ctarget;
char iifname[IFNAMSIZ];
char oifname[IFNAMSIZ];
- kuid_t uid_start;
- kuid_t uid_end;
+ struct fib_kuid_range uid_range;
struct rcu_head rcu;
struct net * fr_net;
};
@@ -83,8 +87,7 @@
[FRA_FWMASK] = { .type = NLA_U32 }, \
[FRA_TABLE] = { .type = NLA_U32 }, \
[FRA_GOTO] = { .type = NLA_U32 }, \
- [FRA_UID_START] = { .type = NLA_U32 }, \
- [FRA_UID_END] = { .type = NLA_U32 }
+ [FRA_UID_RANGE] = { .len = sizeof(struct fib_rule_uid_range) }
static inline void fib_rule_get(struct fib_rule *rule)
{
diff --git a/include/net/flow.h b/include/net/flow.h
index 1426681f..15ecdd0 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -32,7 +32,7 @@
#define FLOWI_FLAG_CAN_SLEEP 0x02
#define FLOWI_FLAG_KNOWN_NH 0x04
__u32 flowic_secid;
- kuid_t flowic_uid;
+ kuid_t flowic_uid;
};
union flowi_uli {
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 100fb8c..cc4a879 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -195,6 +195,9 @@
struct inet6_dev *next;
struct ipv6_devconf cnf;
struct ipv6_devstat stats;
+
+ __s32 rs_interval; /* in jiffies */
+
unsigned long tstamp; /* ipv6InterfaceTable update timestamp */
struct rcu_head rcu;
};
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 6be6deb..30f068f 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -139,7 +139,8 @@
int oif, u32 mark, kuid_t uid);
extern void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk,
__be32 mtu);
-extern void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark);
+extern void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
+ kuid_t uid);
extern void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk);
struct netlink_callback;
diff --git a/include/net/route.h b/include/net/route.h
index 647bb2a..4fe6762 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -142,7 +142,7 @@
flowi4_init_output(fl4, oif, sk ? sk->sk_mark : 0, tos,
RT_SCOPE_UNIVERSE, proto,
sk ? inet_sk_flowi_flags(sk) : 0,
- daddr, saddr, dport, sport, sk ? sock_i_uid(sk) : 0);
+ daddr, saddr, dport, sport, sock_net_uid(net, sk));
if (sk)
security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
return ip_route_output_flow(net, fl4, sk);
@@ -254,7 +254,7 @@
flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE,
protocol, flow_flags, dst, src, dport, sport,
- sock_i_uid(sk));
+ sk->sk_uid);
}
static inline struct rtable *ip_route_connect(struct flowi4 *fl4,
diff --git a/include/net/sock.h b/include/net/sock.h
index 2947dbf..cadadda 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -395,6 +395,7 @@
void *sk_security;
#endif
__u32 sk_mark;
+ kuid_t sk_uid;
u32 sk_classid;
struct cg_proto *sk_cgrp;
void (*sk_state_change)(struct sock *sk);
@@ -1709,6 +1710,7 @@
sk->sk_wq = parent->wq;
parent->sk = sk;
sk_set_socket(sk, parent);
+ sk->sk_uid = SOCK_INODE(parent)->i_uid;
security_sock_graft(sk, parent);
write_unlock_bh(&sk->sk_callback_lock);
}
@@ -1716,6 +1718,11 @@
extern kuid_t sock_i_uid(struct sock *sk);
extern unsigned long sock_i_ino(struct sock *sk);
+static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk)
+{
+ return sk ? sk->sk_uid : make_kuid(net->user_ns, 0);
+}
+
static inline struct dst_entry *
__sk_dst_get(struct sock *sk)
{
diff --git a/include/uapi/linux/fib_rules.h b/include/uapi/linux/fib_rules.h
index 9dcdb62..209abc4 100644
--- a/include/uapi/linux/fib_rules.h
+++ b/include/uapi/linux/fib_rules.h
@@ -29,6 +29,11 @@
__u32 flags;
};
+struct fib_rule_uid_range {
+ __u32 start;
+ __u32 end;
+};
+
enum {
FRA_UNSPEC,
FRA_DST, /* destination address */
@@ -49,8 +54,9 @@
FRA_TABLE, /* Extended table id */
FRA_FWMASK, /* mask for netfilter mark */
FRA_OIFNAME,
- FRA_UID_START, /* UID range */
- FRA_UID_END,
+ FRA_PAD,
+ FRA_L3MDEV, /* iif or oif is l3mdev goto its table */
+ FRA_UID_RANGE, /* UID range */
__FRA_MAX
};
diff --git a/include/uapi/linux/hw_breakpoint.h b/include/uapi/linux/hw_breakpoint.h
index b04000a..2b65efd 100644
--- a/include/uapi/linux/hw_breakpoint.h
+++ b/include/uapi/linux/hw_breakpoint.h
@@ -4,7 +4,11 @@
enum {
HW_BREAKPOINT_LEN_1 = 1,
HW_BREAKPOINT_LEN_2 = 2,
+ HW_BREAKPOINT_LEN_3 = 3,
HW_BREAKPOINT_LEN_4 = 4,
+ HW_BREAKPOINT_LEN_5 = 5,
+ HW_BREAKPOINT_LEN_6 = 6,
+ HW_BREAKPOINT_LEN_7 = 7,
HW_BREAKPOINT_LEN_8 = 8,
};
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index a3fd4d1..6c7fbd5 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -162,9 +162,26 @@
DEVCONF_NDISC_NOTIFY,
DEVCONF_ACCEPT_RA_PREFIX_ROUTE,
DEVCONF_ACCEPT_RA_RT_TABLE,
+ DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL,
+ DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL,
+ DEVCONF_SUPPRESS_FRAG_NDISC,
+ DEVCONF_ACCEPT_RA_FROM_LOCAL,
DEVCONF_USE_OPTIMISTIC,
DEVCONF_ACCEPT_RA_MTU,
+ DEVCONF_STABLE_SECRET,
DEVCONF_USE_OIF_ADDRS_ONLY,
+ DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT,
+ DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN,
+ DEVCONF_DROP_UNICAST_IN_L2_MULTICAST,
+ DEVCONF_DROP_UNSOLICITED_NA,
+ DEVCONF_KEEP_ADDR_ON_DOWN,
+ DEVCONF_RTR_SOLICIT_MAX_INTERVAL,
+ DEVCONF_SEG6_ENABLED,
+ DEVCONF_SEG6_REQUIRE_HMAC,
+ DEVCONF_ENHANCED_DAD,
+ DEVCONF_ADDR_GEN_MODE,
+ DEVCONF_DISABLE_POLICY,
+ DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN,
DEVCONF_MAX
};
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index 04c5ff2..fc9697a 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -52,7 +52,7 @@
#define REISER2FS_SUPER_MAGIC_STRING "ReIsEr2Fs"
#define REISER2FS_JR_SUPER_MAGIC_STRING "ReIsEr3Fs"
-#define SDCARDFS_SUPER_MAGIC 0xb550ca10
+#define SDCARDFS_SUPER_MAGIC 0x5dca2df5
#define SMB_SUPER_MAGIC 0x517B
#define CGROUP_SUPER_MAGIC 0x27e0eb
diff --git a/include/uapi/linux/msm_kgsl.h b/include/uapi/linux/msm_kgsl.h
index 9bcdaf7..412b173 100644
--- a/include/uapi/linux/msm_kgsl.h
+++ b/include/uapi/linux/msm_kgsl.h
@@ -57,6 +57,8 @@
#define KGSL_CONTEXT_TYPE_RS 4
#define KGSL_CONTEXT_TYPE_UNKNOWN 0x1E
+#define KGSL_CONTEXT_INVALIDATE_ON_FAULT 0x10000000
+
#define KGSL_CONTEXT_INVALID 0xffffffff
/*
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 23e8f3d..29a942b 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -1535,6 +1535,49 @@
* @NL80211_ATTR_TDLS_PEER_CAPABILITY: flags for TDLS peer capabilities, u32.
* As specified in the &enum nl80211_tdls_peer_capability.
*
+ * @NL80211_ATTR_SOCKET_OWNER: Flag attribute, if set during interface
+ * creation then the new interface will be owned by the netlink socket
+ * that created it and will be destroyed when the socket is closed.
+ * If set during scheduled scan start then the new scan req will be
+ * owned by the netlink socket that created it and the scheduled scan will
+ * be stopped when the socket is closed.
+ * If set during configuration of regulatory indoor operation then the
+ * regulatory indoor configuration would be owned by the netlink socket
+ * that configured the indoor setting, and the indoor operation would be
+ * cleared when the socket is closed.
+ *
+ * @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is
+ * the TDLS link initiator.
+ *
+ * @NL80211_ATTR_USE_RRM: flag for indicating whether the current connection
+ * shall support Radio Resource Measurements (11k). This attribute can be
+ * used with %NL80211_CMD_ASSOCIATE and %NL80211_CMD_CONNECT requests.
+ * User space applications are expected to use this flag only if the
+ * underlying device supports these minimal RRM features:
+ * %NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES,
+ * %NL80211_FEATURE_QUIET,
+ * If this flag is used, driver must add the Power Capabilities IE to the
+ * association request. In addition, it must also set the RRM capability
+ * flag in the association request's Capability Info field.
+ *
+ * @NL80211_ATTR_WIPHY_DYN_ACK: flag attribute used to enable ACK timeout
+ * estimation algorithm (dynack). In order to activate dynack
+ * %NL80211_FEATURE_ACKTO_ESTIMATION feature flag must be set by lower
+ * drivers to indicate dynack capability. Dynack is automatically disabled
+ * setting valid value for coverage class.
+ *
+ * @NL80211_ATTR_TSID: a TSID value (u8 attribute)
+ * @NL80211_ATTR_USER_PRIO: user priority value (u8 attribute)
+ * @NL80211_ATTR_ADMITTED_TIME: admitted time in units of 32 microseconds
+ * (per second) (u16 attribute)
+ *
+ * @NL80211_ATTR_SMPS_MODE: SMPS mode to use (ap mode). see
+ * &enum nl80211_smps_mode.
+ *
+ * @NL80211_ATTR_OPER_CLASS: operating class
+ *
+ * @NL80211_ATTR_MAC_MASK: MAC address mask
+ *
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
*/
@@ -1871,6 +1914,26 @@
NL80211_ATTR_TDLS_PEER_CAPABILITY,
+ NL80211_ATTR_SOCKET_OWNER,
+
+ NL80211_ATTR_CSA_C_OFFSETS_TX,
+ NL80211_ATTR_MAX_CSA_COUNTERS,
+
+ NL80211_ATTR_TDLS_INITIATOR,
+
+ NL80211_ATTR_USE_RRM,
+
+ NL80211_ATTR_WIPHY_DYN_ACK,
+
+ NL80211_ATTR_TSID,
+ NL80211_ATTR_USER_PRIO,
+ NL80211_ATTR_ADMITTED_TIME,
+
+ NL80211_ATTR_SMPS_MODE,
+
+ NL80211_ATTR_OPER_CLASS,
+
+ NL80211_ATTR_MAC_MASK,
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -1880,6 +1943,7 @@
/* source-level API compatibility */
#define NL80211_ATTR_SCAN_GENERATION NL80211_ATTR_GENERATION
#define NL80211_ATTR_MESH_PARAMS NL80211_ATTR_MESH_CONFIG
+#define NL80211_ATTR_IFACE_SOCKET_OWNER NL80211_ATTR_SOCKET_OWNER
/*
* Allow user space programs to use #ifdef on new attributes by defining them
@@ -3788,6 +3852,38 @@
* @NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE: This driver supports dynamic
* channel bandwidth change (e.g., HT 20 <-> 40 MHz channel) during the
* lifetime of a BSS.
+ * @NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES: This device adds a DS Parameter
+ * Set IE to probe requests.
+ * @NL80211_FEATURE_WFA_TPC_IE_IN_PROBES: This device adds a WFA TPC Report IE
+ * to probe requests.
+ * @NL80211_FEATURE_QUIET: This device, in client mode, supports Quiet Period
+ * requests sent to it by an AP.
+ * @NL80211_FEATURE_TX_POWER_INSERTION: This device is capable of inserting the
+ * current tx power value into the TPC Report IE in the spectrum
+ * management TPC Report action frame, and in the Radio Measurement Link
+ * Measurement Report action frame.
+ * @NL80211_FEATURE_ACKTO_ESTIMATION: This driver supports dynamic ACK timeout
+ * estimation (dynack). %NL80211_ATTR_WIPHY_DYN_ACK flag attribute is used
+ * to enable dynack.
+ * @NL80211_FEATURE_STATIC_SMPS: Device supports static spatial
+ * multiplexing powersave, ie. can turn off all but one chain
+ * even on HT connections that should be using more chains.
+ * @NL80211_FEATURE_DYNAMIC_SMPS: Device supports dynamic spatial
+ * multiplexing powersave, ie. can turn off all but one chain
+ * and then wake the rest up as required after, for example,
+ * rts/cts handshake.
+ * @NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR: This device/driver supports using a
+ * random MAC address during scan (if the device is unassociated); the
+ * %NL80211_SCAN_FLAG_RANDOM_ADDR flag may be set for scans and the MAC
+ * address mask/value will be used.
+ * @NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR: This device/driver supports
+ * using a random MAC address for every scan iteration during scheduled
+ * scan (while not associated), the %NL80211_SCAN_FLAG_RANDOM_ADDR may
+ * be set for scheduled scan and the MAC address mask/value will be used.
+ * @NL80211_FEATURE_ND_RANDOM_MAC_ADDR: This device/driver supports using a
+ * random MAC address for every scan iteration during "net detect", i.e.
+ * scan in unassociated WoWLAN, the %NL80211_SCAN_FLAG_RANDOM_ADDR may
+ * be set for scheduled scan and the MAC address mask/value will be used.
*/
enum nl80211_feature_flags {
NL80211_FEATURE_SK_TX_STATUS = 1 << 0,
@@ -3808,6 +3904,16 @@
NL80211_FEATURE_FULL_AP_CLIENT_STATE = 1 << 15,
NL80211_FEATURE_USERSPACE_MPM = 1 << 16,
NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE = 1 << 18,
+ NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES = 1 << 19,
+ NL80211_FEATURE_WFA_TPC_IE_IN_PROBES = 1 << 20,
+ NL80211_FEATURE_QUIET = 1 << 21,
+ NL80211_FEATURE_TX_POWER_INSERTION = 1 << 22,
+ NL80211_FEATURE_ACKTO_ESTIMATION = 1 << 23,
+ NL80211_FEATURE_STATIC_SMPS = 1 << 24,
+ NL80211_FEATURE_DYNAMIC_SMPS = 1 << 25,
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR = 1 << 29,
+ NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR = 1 << 30,
+ NL80211_FEATURE_ND_RANDOM_MAC_ADDR = 1 << 31,
};
/**
@@ -3856,11 +3962,21 @@
* dangerous because will destroy stations performance as a lot of frames
* will be lost while scanning off-channel, therefore it must be used only
* when really needed
+ * @NL80211_SCAN_FLAG_RANDOM_ADDR: use a random MAC address for this scan (or
+ * for scheduled scan: a different one for every scan iteration). When the
+ * flag is set, depending on device capabilities the @NL80211_ATTR_MAC and
+ * @NL80211_ATTR_MAC_MASK attributes may also be given in which case only
+ * the masked bits will be preserved from the MAC address and the remainder
+ * randomised. If the attributes are not given full randomisation (46 bits,
+ * locally administered 1, multicast 0) is assumed.
+ * This flag must not be requested when the feature isn't supported, check
+ * the nl80211 feature flags for the device.
*/
enum nl80211_scan_flags {
NL80211_SCAN_FLAG_LOW_PRIORITY = 1<<0,
NL80211_SCAN_FLAG_FLUSH = 1<<1,
NL80211_SCAN_FLAG_AP = 1<<2,
+ NL80211_SCAN_FLAG_RANDOM_ADDR = 1<<3,
};
/**
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 28bb0b3..5210e82 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -158,4 +158,11 @@
#define PR_SET_VMA 0x53564d41
# define PR_SET_VMA_ANON_NAME 0
+/* Control the ambient capability set */
+#define PR_CAP_AMBIENT 47
+# define PR_CAP_AMBIENT_IS_SET 1
+# define PR_CAP_AMBIENT_RAISE 2
+# define PR_CAP_AMBIENT_LOWER 3
+# define PR_CAP_AMBIENT_CLEAR_ALL 4
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 07c1146..99c4df8 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -297,6 +297,13 @@
RTA_TABLE,
RTA_MARK,
RTA_MFC_STATS,
+ RTA_VIA,
+ RTA_NEWDST,
+ RTA_PREF,
+ RTA_ENCAP_TYPE,
+ RTA_ENCAP,
+ RTA_EXPIRES,
+ RTA_PAD,
RTA_UID,
__RTA_MAX
};
diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
index 0b3eead..9780e8a 100644
--- a/include/uapi/linux/sysctl.h
+++ b/include/uapi/linux/sysctl.h
@@ -569,6 +569,8 @@
NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN=22,
NET_IPV6_PROXY_NDP=23,
NET_IPV6_ACCEPT_SOURCE_ROUTE=25,
+ NET_IPV6_ACCEPT_RA_FROM_LOCAL=26,
+ NET_IPV6_ACCEPT_RA_RT_INFO_MIN_PLEN=27,
__NET_IPV6_MAX
};
diff --git a/include/uapi/linux/usb/functionfs.h b/include/uapi/linux/usb/functionfs.h
index 0f8f7be..ccaa477 100644
--- a/include/uapi/linux/usb/functionfs.h
+++ b/include/uapi/linux/usb/functionfs.h
@@ -169,6 +169,17 @@
*/
#define FUNCTIONFS_ENDPOINT_REVMAP _IO('g', 129)
+/*
+ * Returns endpoint descriptor. If function is not active returns -ENODEV.
+ */
+#define FUNCTIONFS_ENDPOINT_DESC _IOR('g', 130, \
+ struct usb_endpoint_descriptor)
+
+/*
+ * Sets a buffer length for which all r/w operations under that size use a
+ * preallocated buffer. Behavior of larger operations does not change.
+ */
+#define FUNCTIONFS_ENDPOINT_ALLOC _IOR('g', 231, __u32)
#endif /* _UAPI__LINUX_FUNCTIONFS_H__ */
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index bb0248f..88d97d2 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -754,7 +754,7 @@
}
mode &= ~current_umask();
- ret = vfs_create(dir, path->dentry, mode, true);
+ ret = vfs_create2(path->mnt, dir, path->dentry, mode, true);
path->dentry->d_fsdata = NULL;
if (ret)
return ERR_PTR(ret);
@@ -770,7 +770,7 @@
if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
return ERR_PTR(-EINVAL);
acc = oflag2acc[oflag & O_ACCMODE];
- if (inode_permission(path->dentry->d_inode, acc))
+ if (inode_permission2(path->mnt, path->dentry->d_inode, acc))
return ERR_PTR(-EACCES);
return dentry_open(path, oflag, current_cred());
}
@@ -803,7 +803,7 @@
ro = mnt_want_write(mnt); /* we'll drop it in any case */
error = 0;
mutex_lock(&root->d_inode->i_mutex);
- path.dentry = lookup_one_len(name->name, root, strlen(name->name));
+ path.dentry = lookup_one_len2(name->name, mnt, root, strlen(name->name));
if (IS_ERR(path.dentry)) {
error = PTR_ERR(path.dentry);
goto out_putfd;
@@ -874,7 +874,7 @@
if (err)
goto out_name;
mutex_lock_nested(&mnt->mnt_root->d_inode->i_mutex, I_MUTEX_PARENT);
- dentry = lookup_one_len(name->name, mnt->mnt_root,
+ dentry = lookup_one_len2(name->name, mnt, mnt->mnt_root,
strlen(name->name));
if (IS_ERR(dentry)) {
err = PTR_ERR(dentry);
@@ -886,7 +886,7 @@
err = -ENOENT;
} else {
ihold(inode);
- err = vfs_unlink(dentry->d_parent->d_inode, dentry);
+ err = vfs_unlink2(mnt, dentry->d_parent->d_inode, dentry);
}
dput(dentry);
diff --git a/kernel/printk.c b/kernel/printk.c
index cc23364..898f6fb 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -3228,7 +3228,7 @@
{
dump_stack_print_info(log_lvl);
- printk("%stask: %p ti: %p task.ti: %p\n",
+ printk("%stask: %pP ti: %pP task.ti: %pP\n",
log_lvl, current, current_thread_info(),
task_thread_info(current));
}
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 0208bbe..944ef7f 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -342,24 +342,24 @@
put_seccomp_filter(thread);
smp_store_release(&thread->seccomp.filter,
caller->seccomp.filter);
+
+ /*
+ * Don't let an unprivileged task work around
+ * the no_new_privs restriction by creating
+ * a thread that sets it up, enters seccomp,
+ * then dies.
+ */
+ if (task_no_new_privs(caller))
+ task_set_no_new_privs(thread);
+
/*
* Opt the other thread into seccomp if needed.
* As threads are considered to be trust-realm
* equivalent (see ptrace_may_access), it is safe to
* allow one thread to transition the other.
*/
- if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) {
- /*
- * Don't let an unprivileged task work around
- * the no_new_privs restriction by creating
- * a thread that sets it up, enters seccomp,
- * then dies.
- */
- if (task_no_new_privs(caller))
- task_set_no_new_privs(thread);
-
+ if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
- }
}
}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 1d09b87..eaa062a 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -129,6 +129,7 @@
static int __maybe_unused one = 1;
static int __maybe_unused two = 2;
static int __maybe_unused three = 3;
+static int __maybe_unused four = 4;
static unsigned long one_ul = 1;
static int one_hundred = 100;
#ifdef CONFIG_PRINTK
@@ -953,7 +954,7 @@
.mode = 0644,
.proc_handler = proc_dointvec_minmax_sysadmin,
.extra1 = &zero,
- .extra2 = &two,
+ .extra2 = &four,
},
#endif
{
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 3f2fb33..5dc3603 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -39,6 +39,7 @@
cred->cap_inheritable = CAP_EMPTY_SET;
cred->cap_permitted = CAP_FULL_SET;
cred->cap_effective = CAP_FULL_SET;
+ cred->cap_ambient = CAP_EMPTY_SET;
cred->cap_bset = CAP_FULL_SET;
#ifdef CONFIG_KEYS
key_put(cred->request_key_auth);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 3784453..7f5a08b 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -377,6 +377,32 @@
s16 precision; /* # of digits/chars */
};
+int kptr_restrict __read_mostly = 4;
+
+/*
+ * Always cleanse %p and %pK specifiers
+ */
+static inline int kptr_restrict_always_cleanse_pointers(void)
+{
+ return kptr_restrict >= 3;
+}
+
+/*
+ * Always cleanse physical addresses (%pa* specifiers)
+ */
+static inline int kptr_restrict_cleanse_addresses(void)
+{
+ return kptr_restrict >= 4;
+}
+
+/*
+ * Always cleanse resource addresses (%p[rR] specifiers)
+ */
+static inline int kptr_restrict_cleanse_resources(void)
+{
+ return kptr_restrict >= 4;
+}
+
static noinline_for_stack
char *number(char *buf, char *end, unsigned long long num,
struct printf_spec spec)
@@ -620,6 +646,7 @@
char *p = sym, *pend = sym + sizeof(sym);
int decode = (fmt[0] == 'R') ? 1 : 0;
+ int cleanse = kptr_restrict_cleanse_resources();
const struct printf_spec *specp;
*p++ = '[';
@@ -643,10 +670,11 @@
specp = &mem_spec;
decode = 0;
}
- p = number(p, pend, res->start, *specp);
+ p = number(p, pend, cleanse ? 0UL : res->start, *specp);
if (res->start != res->end) {
*p++ = '-';
- p = number(p, pend, res->end, *specp);
+ p = number(p, pend,
+ cleanse ? res->end - res->start : res->end, *specp);
}
if (decode) {
if (res->flags & IORESOURCE_MEM_64)
@@ -665,6 +693,7 @@
*p = '\0';
return string(buf, end, sym, spec);
+
}
static noinline_for_stack
@@ -982,8 +1011,6 @@
return number(buf, end, *(const netdev_features_t *)addr, spec);
}
-int kptr_restrict __read_mostly;
-
/*
* Show a '%p' thing. A kernel extension is that the '%p' is followed
* by an extra set of alphanumeric characters that are extended format
@@ -1031,6 +1058,7 @@
* Do not use this feature without some mechanism to verify the
* correctness of the format string and va_list arguments.
* - 'K' For a kernel pointer that should be hidden from unprivileged users
+ * - 'P' For a kernel pointer that should be shown to all users
* - 'NF' For a netdev_features_t
* - 'h[CDN]' For a variable-length buffer, it prints it as a hex string with
* a certain separator (' ' by default):
@@ -1044,6 +1072,15 @@
* Note: The difference between 'S' and 'F' is that on ia64 and ppc64
* function pointers are really function descriptors, which contain a
* pointer to the real address.
+ *
+ * Note: That for kptr_restrict set to 3, %p and %pK have the same
+ * meaning.
+ *
+ * Note: That for kptr_restrict set to 4, %pa will null out the physical
+ * address.
+ *
+ * Note: That for kptr_restrict set to 4, %p[rR] will null out the memory
+ * address.
*/
static noinline_for_stack
char *pointer(const char *fmt, char *buf, char *end, void *ptr,
@@ -1051,7 +1088,7 @@
{
int default_width = 2 * sizeof(void *) + (spec.flags & SPECIAL ? 2 : 0);
- if (!ptr && *fmt != 'K') {
+ if (!ptr && *fmt != 'K' && !kptr_restrict_always_cleanse_pointers()) {
/*
* Print (null) with the same width as a pointer so it makes
* tabular output look nice.
@@ -1108,48 +1145,6 @@
va_end(va);
return buf;
}
- case 'K':
- /*
- * %pK cannot be used in IRQ context because its test
- * for CAP_SYSLOG would be meaningless.
- */
- if (kptr_restrict && (in_irq() || in_serving_softirq() ||
- in_nmi())) {
- if (spec.field_width == -1)
- spec.field_width = default_width;
- return string(buf, end, "pK-error", spec);
- }
-
- switch (kptr_restrict) {
- case 0:
- /* Always print %pK values */
- break;
- case 1: {
- /*
- * Only print the real pointer value if the current
- * process has CAP_SYSLOG and is running with the
- * same credentials it started with. This is because
- * access to files is checked at open() time, but %pK
- * checks permission at read() time. We don't want to
- * leak pointer values if a binary opens a file using
- * %pK and then elevates privileges before reading it.
- */
- const struct cred *cred = current_cred();
-
- if (!has_capability_noaudit(current, CAP_SYSLOG) ||
- !uid_eq(cred->euid, cred->uid) ||
- !gid_eq(cred->egid, cred->gid))
- ptr = NULL;
- break;
- }
- case 2:
- default:
- /* Always print 0's for %pK */
- ptr = NULL;
- break;
- }
- break;
-
case 'N':
switch (fmt[1]) {
case 'F':
@@ -1157,11 +1152,72 @@
}
break;
case 'a':
- spec.flags |= SPECIAL | SMALL | ZEROPAD;
- spec.field_width = sizeof(phys_addr_t) * 2 + 2;
- spec.base = 16;
- return number(buf, end,
- (unsigned long long) *((phys_addr_t *)ptr), spec);
+ {
+ unsigned long long addr = *((phys_addr_t *)ptr);
+ spec.flags |= SPECIAL | SMALL | ZEROPAD;
+ spec.field_width = sizeof(phys_addr_t) * 2 + 2;
+ spec.base = 16;
+ return number(buf, end,
+ kptr_restrict_cleanse_addresses() ? 0UL : addr,
+ spec);
+ }
+ case 'P':
+ /*
+ * an explicitly whitelisted kernel pointer should never be
+ * cleansed
+ */
+ break;
+ default:
+ /*
+ * plain %p, no extension, check if we should always cleanse and
+ * treat like %pK.
+ */
+ if (!kptr_restrict_always_cleanse_pointers()) {
+ break;
+ }
+ /* fallthrough */
+ case 'K':
+ switch (kptr_restrict) {
+ case 0:
+ /* Always print %p values */
+ break;
+ case 1: {
+ const struct cred *cred;
+
+ /*
+ * kptr_restrict==1 cannot be used in IRQ context
+ * because its test for CAP_SYSLOG would be meaningless.
+ */
+ if (in_irq() || in_serving_softirq() || in_nmi()) {
+ if (spec.field_width == -1)
+ spec.field_width = default_width;
+ return string(buf, end, "pK-error", spec);
+ }
+
+ /*
+ * Only print the real pointer value if the current
+ * process has CAP_SYSLOG and is running with the
+ * same credentials it started with. This is because
+ * access to files is checked at open() time, but %p
+ * checks permission at read() time. We don't want to
+ * leak pointer values if a binary opens a file using
+ * %pK and then elevates privileges before reading it.
+ */
+ cred = current_cred();
+ if (!has_capability_noaudit(current, CAP_SYSLOG) ||
+ !uid_eq(cred->euid, cred->uid) ||
+ !gid_eq(cred->egid, cred->gid))
+ ptr = NULL;
+ break;
+ }
+ case 2: /* restrict only %pK */
+ case 3: /* restrict all non-extensioned %p and %pK */
+ case 4: /* restrict all non-extensioned %p, %pK, %pa*, %p[rR] */
+ default:
+ ptr = NULL;
+ break;
+ }
+ break;
}
spec.flags |= SMALL;
if (spec.field_width == -1) {
@@ -1170,7 +1226,7 @@
}
spec.base = 16;
- return number(buf, end, (unsigned long) ptr, spec);
+ return number(buf, end, (unsigned long long) ptr, spec);
}
/*
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index da78f5c..59b632f 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -17,6 +17,11 @@
#include <net/sock.h>
#include <net/fib_rules.h>
+static const struct fib_kuid_range fib_kuid_range_unset = {
+ KUIDT_INIT(0),
+ KUIDT_INIT(~0),
+};
+
int fib_default_rule_add(struct fib_rules_ops *ops,
u32 pref, u32 table, u32 flags)
{
@@ -31,9 +36,8 @@
r->pref = pref;
r->table = table;
r->flags = flags;
- r->uid_start = INVALID_UID;
- r->uid_end = INVALID_UID;
r->fr_net = hold_net(ops->fro_net);
+ r->uid_range = fib_kuid_range_unset;
/* The lock is not required here, the list in unreacheable
* at the moment this function is called */
@@ -181,21 +185,32 @@
}
EXPORT_SYMBOL_GPL(fib_rules_unregister);
-static inline kuid_t fib_nl_uid(struct nlattr *nla)
+static int uid_range_set(struct fib_kuid_range *range)
{
- return make_kuid(current_user_ns(), nla_get_u32(nla));
+ return uid_valid(range->start) && uid_valid(range->end);
}
-static int nla_put_uid(struct sk_buff *skb, int idx, kuid_t uid)
+static struct fib_kuid_range nla_get_kuid_range(struct nlattr **tb)
{
- return nla_put_u32(skb, idx, from_kuid_munged(current_user_ns(), uid));
+ struct fib_rule_uid_range *in;
+ struct fib_kuid_range out;
+
+ in = (struct fib_rule_uid_range *)nla_data(tb[FRA_UID_RANGE]);
+
+ out.start = make_kuid(current_user_ns(), in->start);
+ out.end = make_kuid(current_user_ns(), in->end);
+
+ return out;
}
-static int fib_uid_range_match(struct flowi *fl, struct fib_rule *rule)
+static int nla_put_uid_range(struct sk_buff *skb, struct fib_kuid_range *range)
{
- return (!uid_valid(rule->uid_start) && !uid_valid(rule->uid_end)) ||
- (uid_gte(fl->flowi_uid, rule->uid_start) &&
- uid_lte(fl->flowi_uid, rule->uid_end));
+ struct fib_rule_uid_range out = {
+ from_kuid_munged(current_user_ns(), range->start),
+ from_kuid_munged(current_user_ns(), range->end)
+ };
+
+ return nla_put(skb, FRA_UID_RANGE, sizeof(out), &out);
}
static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
@@ -212,7 +227,8 @@
if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
goto out;
- if (!fib_uid_range_match(fl, rule))
+ if (uid_lt(fl->flowi_uid, rule->uid_range.start) ||
+ uid_gt(fl->flowi_uid, rule->uid_range.end))
goto out;
ret = ops->match(rule, fl, flags);
@@ -385,17 +401,19 @@
} else if (rule->action == FR_ACT_GOTO)
goto errout_free;
- /* UID start and end must either both be valid or both unspecified. */
- rule->uid_start = rule->uid_end = INVALID_UID;
- if (tb[FRA_UID_START] || tb[FRA_UID_END]) {
- if (tb[FRA_UID_START] && tb[FRA_UID_END]) {
- rule->uid_start = fib_nl_uid(tb[FRA_UID_START]);
- rule->uid_end = fib_nl_uid(tb[FRA_UID_END]);
+ if (tb[FRA_UID_RANGE]) {
+ if (current_user_ns() != net->user_ns) {
+ err = -EPERM;
+ goto errout_free;
}
- if (!uid_valid(rule->uid_start) ||
- !uid_valid(rule->uid_end) ||
- !uid_lte(rule->uid_start, rule->uid_end))
- goto errout_free;
+
+ rule->uid_range = nla_get_kuid_range(tb);
+
+ if (!uid_range_set(&rule->uid_range) ||
+ !uid_lte(rule->uid_range.start, rule->uid_range.end))
+ goto errout_free;
+ } else {
+ rule->uid_range = fib_kuid_range_unset;
}
err = ops->configure(rule, skb, frh, tb);
@@ -457,6 +475,7 @@
struct fib_rules_ops *ops = NULL;
struct fib_rule *rule, *tmp;
struct nlattr *tb[FRA_MAX+1];
+ struct fib_kuid_range range;
int err = -EINVAL;
if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
@@ -476,6 +495,14 @@
if (err < 0)
goto errout;
+ if (tb[FRA_UID_RANGE]) {
+ range = nla_get_kuid_range(tb);
+ if (!uid_range_set(&range))
+ goto errout;
+ } else {
+ range = fib_kuid_range_unset;
+ }
+
list_for_each_entry(rule, &ops->rules_list, list) {
if (frh->action && (frh->action != rule->action))
continue;
@@ -504,12 +531,9 @@
(rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
continue;
- if (tb[FRA_UID_START] &&
- !uid_eq(rule->uid_start, fib_nl_uid(tb[FRA_UID_START])))
- continue;
-
- if (tb[FRA_UID_END] &&
- !uid_eq(rule->uid_end, fib_nl_uid(tb[FRA_UID_END])))
+ if (uid_range_set(&range) &&
+ (!uid_eq(rule->uid_range.start, range.start) ||
+ !uid_eq(rule->uid_range.end, range.end)))
continue;
if (!ops->compare(rule, frh, tb))
@@ -569,8 +593,7 @@
+ nla_total_size(4) /* FRA_TABLE */
+ nla_total_size(4) /* FRA_FWMARK */
+ nla_total_size(4) /* FRA_FWMASK */
- + nla_total_size(4) /* FRA_UID_START */
- + nla_total_size(4); /* FRA_UID_END */
+ + nla_total_size(sizeof(struct fib_kuid_range));
if (ops->nlmsg_payload)
payload += ops->nlmsg_payload(rule);
@@ -625,10 +648,8 @@
nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
(rule->target &&
nla_put_u32(skb, FRA_GOTO, rule->target)) ||
- (uid_valid(rule->uid_start) &&
- nla_put_uid(skb, FRA_UID_START, rule->uid_start)) ||
- (uid_valid(rule->uid_end) &&
- nla_put_uid(skb, FRA_UID_END, rule->uid_end)))
+ (uid_range_set(&rule->uid_range) &&
+ nla_put_uid_range(skb, &rule->uid_range)))
goto nla_put_failure;
if (ops->fill(rule, skb, frh) < 0)
goto nla_put_failure;
diff --git a/net/core/sock.c b/net/core/sock.c
index de1074c..4e3d16d 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2292,8 +2292,11 @@
sk->sk_type = sock->type;
sk->sk_wq = sock->wq;
sock->sk = sk;
- } else
+ sk->sk_uid = SOCK_INODE(sock)->i_uid;
+ } else {
sk->sk_wq = NULL;
+ sk->sk_uid = make_kuid(sock_net(sk)->user_ns, 0);
+ }
spin_lock_init(&sk->sk_dst_lock);
rwlock_init(&sk->sk_callback_lock);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f5b5586..78c886a 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -89,6 +89,7 @@
#include <linux/netfilter_ipv4.h>
#include <linux/random.h>
#include <linux/slab.h>
+#include <linux/netfilter/xt_qtaguid.h>
#include <asm/uaccess.h>
@@ -445,6 +446,9 @@
if (sk) {
long timeout;
+#ifdef CONFIG_NETFILTER_XT_MATCH_QTAGUID
+ qtaguid_untag(sock, true);
+#endif
sock_rps_reset_flow(sk);
/* Applications forget to leave groups before exiting */
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 5af8781..59e63dc 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -364,6 +364,7 @@
fl4.daddr = daddr;
fl4.saddr = saddr;
fl4.flowi4_mark = mark;
+ fl4.flowi4_uid = sock_net_uid(net, NULL);
fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
fl4.flowi4_proto = IPPROTO_ICMP;
security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
@@ -395,6 +396,7 @@
param->replyopts.opt.opt.faddr : iph->saddr);
fl4->saddr = saddr;
fl4->flowi4_mark = mark;
+ fl4->flowi4_uid = sock_net_uid(net, NULL);
fl4->flowi4_tos = RT_TOS(tos);
fl4->flowi4_proto = IPPROTO_ICMP;
fl4->fl4_icmp_type = type;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 008b52b..912cb4d 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -430,7 +430,7 @@
flags,
(opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport,
- sock_i_uid(sk));
+ sk->sk_uid);
security_req_classify_flow(req, flowi4_to_flowi(fl4));
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt))
@@ -467,7 +467,7 @@
sk->sk_protocol, inet_sk_flowi_flags(sk),
(opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport,
- sock_i_uid(sk));
+ sk->sk_uid);
security_req_classify_flow(req, flowi4_to_flowi(fl4));
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt))
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index aaa70dd..32703d4 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -783,7 +783,7 @@
flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
RT_SCOPE_UNIVERSE, sk->sk_protocol,
inet_sk_flowi_flags(sk), faddr, saddr, 0, 0,
- sock_i_uid(sk));
+ sk->sk_uid);
security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
rt = ip_route_output_flow(net, &fl4, sk);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index b9c87c8..c9b79f4 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -573,8 +573,7 @@
inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP |
(inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
- daddr, saddr, 0, 0,
- sock_i_uid(sk));
+ daddr, saddr, 0, 0, sk->sk_uid);
if (!inet->hdrincl) {
err = raw_probe_proto_opt(&fl4, msg);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 5778011..4f25f88 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -515,7 +515,8 @@
}
EXPORT_SYMBOL(__ip_select_ident);
-static void __build_flow_key(struct flowi4 *fl4, struct sock *sk,
+static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
+ const struct sock *sk,
const struct iphdr *iph,
int oif, u8 tos,
u8 prot, u32 mark, int flow_flags)
@@ -532,22 +533,23 @@
RT_SCOPE_UNIVERSE, prot,
flow_flags,
iph->daddr, iph->saddr, 0, 0,
- sk ? sock_i_uid(sk) : 0);
+ sock_net_uid(net, sk));
}
static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
- struct sock *sk)
+ const struct sock *sk)
{
+ const struct net *net = dev_net(skb->dev);
const struct iphdr *iph = ip_hdr(skb);
int oif = skb->dev->ifindex;
u8 tos = RT_TOS(iph->tos);
u8 prot = iph->protocol;
u32 mark = skb->mark;
- __build_flow_key(fl4, sk, iph, oif, tos, prot, mark, 0);
+ __build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
}
-static void build_sk_flow_key(struct flowi4 *fl4, struct sock *sk)
+static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
{
const struct inet_sock *inet = inet_sk(sk);
const struct ip_options_rcu *inet_opt;
@@ -561,12 +563,11 @@
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
inet_sk_flowi_flags(sk),
- daddr, inet->inet_saddr, 0, 0,
- sock_i_uid(sk));
+ daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
rcu_read_unlock();
}
-static void ip_rt_build_flow_key(struct flowi4 *fl4, struct sock *sk,
+static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
const struct sk_buff *skb)
{
if (skb)
@@ -762,7 +763,7 @@
rt = (struct rtable *) dst;
- __build_flow_key(&fl4, sk, iph, oif, tos, prot, mark, 0);
+ __build_flow_key(sock_net(sk), &fl4, sk, iph, oif, tos, prot, mark, 0);
__ip_do_redirect(rt, skb, &fl4, true);
}
@@ -976,7 +977,7 @@
if (!mark)
mark = IP4_REPLY_MARK(net, skb->mark);
- __build_flow_key(&fl4, NULL, iph, oif,
+ __build_flow_key(net, &fl4, NULL, iph, oif,
RT_TOS(iph->tos), protocol, mark, flow_flags);
rt = __ip_route_output_key(net, &fl4);
if (!IS_ERR(rt)) {
@@ -992,7 +993,7 @@
struct flowi4 fl4;
struct rtable *rt;
- __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
+ __build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
if (!fl4.flowi4_mark)
fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
@@ -1011,6 +1012,7 @@
struct rtable *rt;
struct dst_entry *odst = NULL;
bool new = false;
+ struct net *net = sock_net(sk);
bh_lock_sock(sk);
odst = sk_dst_get(sk);
@@ -1020,7 +1022,7 @@
goto out;
}
- __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
+ __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
rt = (struct rtable *)odst;
if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
@@ -1060,7 +1062,7 @@
struct flowi4 fl4;
struct rtable *rt;
- __build_flow_key(&fl4, NULL, iph, oif,
+ __build_flow_key(net, &fl4, NULL, iph, oif,
RT_TOS(iph->tos), protocol, mark, flow_flags);
rt = __ip_route_output_key(net, &fl4);
if (!IS_ERR(rt)) {
@@ -1075,9 +1077,10 @@
const struct iphdr *iph = (const struct iphdr *) skb->data;
struct flowi4 fl4;
struct rtable *rt;
+ struct net *net = sock_net(sk);
- __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
- rt = __ip_route_output_key(sock_net(sk), &fl4);
+ __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
+ rt = __ip_route_output_key(net, &fl4);
if (!IS_ERR(rt)) {
__ip_do_redirect(rt, skb, &fl4, false);
ip_rt_put(rt);
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index c94032b..296a0a4 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -353,8 +353,7 @@
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
inet_sk_flowi_flags(sk),
(opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
- ireq->loc_addr, th->source, th->dest,
- sock_i_uid(sk));
+ ireq->loc_addr, th->source, th->dest, sk->sk_uid);
security_req_classify_flow(req, flowi4_to_flowi(&fl4));
rt = ip_route_output_key(sock_net(sk), &fl4);
if (IS_ERR(rt)) {
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index e2f1dff..b15acfc 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -647,6 +647,7 @@
arg.iov[0].iov_base = (unsigned char *)&rep;
arg.iov[0].iov_len = sizeof(rep.th);
+ net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
#ifdef CONFIG_TCP_MD5SIG
hash_location = tcp_parse_md5sig_option(th);
if (!sk && hash_location) {
@@ -657,7 +658,7 @@
* Incoming packet is checked with md5 hash with finding key,
* no RST generated if md5 hash doesn't match.
*/
- sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
+ sk1 = __inet_lookup_listener(net,
&tcp_hashinfo, ip_hdr(skb)->saddr,
th->source, ip_hdr(skb)->daddr,
ntohs(th->source), inet_iif(skb));
@@ -705,8 +706,8 @@
if (sk)
arg.bound_dev_if = sk->sk_bound_dev_if;
- net = dev_net(skb_dst(skb)->dev);
arg.tos = ip_hdr(skb)->tos;
+ arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
skb, ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
@@ -727,7 +728,8 @@
outside socket context is ugly, certainly. What can I do?
*/
-static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
+static void tcp_v4_send_ack(const struct sock *sk, struct sk_buff *skb,
+ u32 seq, u32 ack,
u32 win, u32 tsval, u32 tsecr, int oif,
struct tcp_md5sig_key *key,
int reply_flags, u8 tos)
@@ -742,7 +744,7 @@
];
} rep;
struct ip_reply_arg arg;
- struct net *net = dev_net(skb_dst(skb)->dev);
+ struct net *net = sock_net(sk);
memset(&rep.th, 0, sizeof(struct tcphdr));
memset(&arg, 0, sizeof(arg));
@@ -791,6 +793,7 @@
if (oif)
arg.bound_dev_if = oif;
arg.tos = tos;
+ arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
skb, ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
@@ -803,7 +806,7 @@
struct inet_timewait_sock *tw = inet_twsk(sk);
struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
- tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
+ tcp_v4_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
tcp_time_stamp + tcptw->tw_ts_offset,
tcptw->tw_ts_recent,
@@ -822,7 +825,7 @@
/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
* sk->sk_state == TCP_SYN_RECV -> for Fast Open.
*/
- tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
+ tcp_v4_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
tcp_time_stamp,
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 6d181ec..333dcb2 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -970,7 +970,7 @@
RT_SCOPE_UNIVERSE, sk->sk_protocol,
inet_sk_flowi_flags(sk)|FLOWI_FLAG_CAN_SLEEP,
faddr, saddr, dport, inet->inet_sport,
- sock_i_uid(sk));
+ sk->sk_uid);
security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
rt = ip_route_output_flow(net, fl4, sk);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index f96b907..57da416 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -113,6 +113,27 @@
return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
}
+static inline s32 rfc3315_s14_backoff_init(s32 irt)
+{
+ /* multiply 'initial retransmission time' by 0.9 .. 1.1 */
+ u64 tmp = (900000 + prandom_u32() % 200001) * (u64)irt;
+ do_div(tmp, 1000000);
+ return (s32)tmp;
+}
+
+static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt)
+{
+ /* multiply 'retransmission timeout' by 1.9 .. 2.1 */
+ u64 tmp = (1900000 + prandom_u32() % 200001) * (u64)rt;
+ do_div(tmp, 1000000);
+ if ((s32)tmp > mrt) {
+ /* multiply 'maximum retransmission time' by 0.9 .. 1.1 */
+ tmp = (900000 + prandom_u32() % 200001) * (u64)mrt;
+ do_div(tmp, 1000000);
+ }
+ return (s32)tmp;
+}
+
#ifdef CONFIG_SYSCTL
static void addrconf_sysctl_register(struct inet6_dev *idev);
static void addrconf_sysctl_unregister(struct inet6_dev *idev);
@@ -182,6 +203,7 @@
.dad_transmits = 1,
.rtr_solicits = MAX_RTR_SOLICITATIONS,
.rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
+ .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
.rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
#ifdef CONFIG_IPV6_PRIVACY
.use_tempaddr = 0,
@@ -197,6 +219,7 @@
.accept_ra_rtr_pref = 1,
.rtr_probe_interval = 60 * HZ,
#ifdef CONFIG_IPV6_ROUTE_INFO
+ .accept_ra_rt_info_min_plen = 0,
.accept_ra_rt_info_max_plen = 0,
#endif
#endif
@@ -220,6 +243,7 @@
.dad_transmits = 1,
.rtr_solicits = MAX_RTR_SOLICITATIONS,
.rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
+ .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
.rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
#ifdef CONFIG_IPV6_PRIVACY
.use_tempaddr = 0,
@@ -235,6 +259,7 @@
.accept_ra_rtr_pref = 1,
.rtr_probe_interval = 60 * HZ,
#ifdef CONFIG_IPV6_ROUTE_INFO
+ .accept_ra_rt_info_min_plen = 0,
.accept_ra_rt_info_max_plen = 0,
#endif
#endif
@@ -3259,12 +3284,14 @@
goto out;
spin_lock(&ifp->lock);
- if (ifp->probes++ < idev->cnf.rtr_solicits) {
+ if (ifp->probes++ < idev->cnf.rtr_solicits || idev->cnf.rtr_solicits < 0) {
+ idev->rs_interval = rfc3315_s14_backoff_update(
+ idev->rs_interval, idev->cnf.rtr_solicit_max_interval);
/* The wait after the last probe can be shorter */
addrconf_mod_timer(ifp, AC_RS,
(ifp->probes == idev->cnf.rtr_solicits) ?
idev->cnf.rtr_solicit_delay :
- idev->cnf.rtr_solicit_interval);
+ idev->rs_interval);
spin_unlock(&ifp->lock);
ndisc_send_rs(idev->dev, &ifp->addr, &in6addr_linklocal_allrouters);
@@ -3421,7 +3448,7 @@
*/
if (ipv6_accept_ra(ifp->idev) &&
- ifp->idev->cnf.rtr_solicits > 0 &&
+ ifp->idev->cnf.rtr_solicits != 0 &&
(dev->flags&IFF_LOOPBACK) == 0 &&
(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) {
/*
@@ -3432,9 +3459,11 @@
ndisc_send_rs(ifp->idev->dev, &ifp->addr, &in6addr_linklocal_allrouters);
spin_lock_bh(&ifp->lock);
+ ifp->idev->rs_interval = rfc3315_s14_backoff_init(
+ ifp->idev->cnf.rtr_solicit_interval);
ifp->probes = 1;
ifp->idev->if_flags |= IF_RS_SENT;
- addrconf_mod_timer(ifp, AC_RS, ifp->idev->cnf.rtr_solicit_interval);
+ addrconf_mod_timer(ifp, AC_RS, ifp->idev->rs_interval);
spin_unlock_bh(&ifp->lock);
}
}
@@ -4270,6 +4299,8 @@
array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits;
array[DEVCONF_RTR_SOLICIT_INTERVAL] =
jiffies_to_msecs(cnf->rtr_solicit_interval);
+ array[DEVCONF_RTR_SOLICIT_MAX_INTERVAL] =
+ jiffies_to_msecs(cnf->rtr_solicit_max_interval);
array[DEVCONF_RTR_SOLICIT_DELAY] =
jiffies_to_msecs(cnf->rtr_solicit_delay);
array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
@@ -4288,6 +4319,7 @@
array[DEVCONF_RTR_PROBE_INTERVAL] =
jiffies_to_msecs(cnf->rtr_probe_interval);
#ifdef CONFIG_IPV6_ROUTE_INFO
+ array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] = cnf->accept_ra_rt_info_min_plen;
array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
#endif
#endif
@@ -4452,7 +4484,7 @@
return -EINVAL;
if (!ipv6_accept_ra(idev))
return -EINVAL;
- if (idev->cnf.rtr_solicits <= 0)
+ if (idev->cnf.rtr_solicits == 0)
return -EINVAL;
write_lock_bh(&idev->lock);
@@ -4907,6 +4939,13 @@
.proc_handler = proc_dointvec_jiffies,
},
{
+ .procname = "router_solicitation_max_interval",
+ .data = &ipv6_devconf.rtr_solicit_max_interval,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ {
.procname = "router_solicitation_delay",
.data = &ipv6_devconf.rtr_solicit_delay,
.maxlen = sizeof(int),
@@ -4995,6 +5034,13 @@
},
#ifdef CONFIG_IPV6_ROUTE_INFO
{
+ .procname = "accept_ra_rt_info_min_plen",
+ .data = &ipv6_devconf.accept_ra_rt_info_min_plen,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "accept_ra_rt_info_max_plen",
.data = &ipv6_devconf.accept_ra_rt_info_max_plen,
.maxlen = sizeof(int),
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 04e88b5..e3a2601 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -696,7 +696,7 @@
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = inet->inet_dport;
fl6.fl6_sport = inet->inet_sport;
- fl6.flowi6_uid = sock_i_uid(sk);
+ fl6.flowi6_uid = sk->sk_uid;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
rcu_read_lock();
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index b903e19..15e41c5 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -628,9 +628,9 @@
return;
if (type == NDISC_REDIRECT)
- ip6_redirect(skb, net, 0, 0);
+ ip6_redirect(skb, net, 0, 0, sock_net_uid(net, NULL));
else
- ip6_update_pmtu(skb, net, info, 0, 0, INVALID_UID);
+ ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
xfrm_state_put(x);
}
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 7d76630..61c7a13 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -162,7 +162,7 @@
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = inet->inet_dport;
fl6.fl6_sport = inet->inet_sport;
- fl6.flowi6_uid = sock_i_uid(sk);
+ fl6.flowi6_uid = sk->sk_uid;
if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
fl6.flowi6_oif = np->mcast_oif;
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index fdc81cb..4ad6525 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -447,9 +447,9 @@
return;
if (type == NDISC_REDIRECT)
- ip6_redirect(skb, net, 0, 0);
+ ip6_redirect(skb, net, 0, 0, sock_net_uid(net, NULL));
else
- ip6_update_pmtu(skb, net, info, 0, 0, INVALID_UID);
+ ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
xfrm_state_put(x);
}
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 2a6a0e7..b022004 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -90,9 +90,9 @@
struct net *net = dev_net(skb->dev);
if (type == ICMPV6_PKT_TOOBIG)
- ip6_update_pmtu(skb, net, info, 0, 0, INVALID_UID);
+ ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
else if (type == NDISC_REDIRECT)
- ip6_redirect(skb, net, 0, 0);
+ ip6_redirect(skb, net, 0, 0, sock_net_uid(net, NULL));
if (!(type & ICMPV6_INFOMSG_MASK))
if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST)
@@ -467,6 +467,7 @@
fl6.flowi6_oif = iif;
fl6.fl6_icmp_type = type;
fl6.fl6_icmp_code = code;
+ fl6.flowi6_uid = sock_net_uid(net, NULL);
security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
sk = icmpv6_xmit_lock(net);
@@ -572,6 +573,7 @@
fl6.flowi6_oif = skb->dev->ifindex;
fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY;
fl6.flowi6_mark = mark;
+ fl6.flowi6_uid = sock_net_uid(net, NULL);
security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
sk = icmpv6_xmit_lock(net);
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 157f2b2..a2072a8 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -86,7 +86,7 @@
fl6->flowi6_mark = inet_rsk(req)->ir_mark;
fl6->fl6_dport = inet_rsk(req)->rmt_port;
fl6->fl6_sport = inet_rsk(req)->loc_port;
- fl6->flowi6_uid = sock_i_uid(sk);
+ fl6->flowi6_uid = sk->sk_uid;
security_req_classify_flow(req, flowi6_to_flowi(fl6));
dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
@@ -214,7 +214,7 @@
fl6->flowi6_mark = sk->sk_mark;
fl6->fl6_sport = inet->inet_sport;
fl6->fl6_dport = inet->inet_dport;
- fl6->flowi6_uid = sock_i_uid(sk);
+ fl6->flowi6_uid = sk->sk_uid;
security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
rcu_read_lock();
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 6391c7d..3a9420e 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -799,6 +799,8 @@
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
fl6.flowi6_mark = skb->mark;
+ fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+
err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
if (err != 0) {
/* XXX: send ICMP error even if DF is not set. */
@@ -849,6 +851,8 @@
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
fl6.flowi6_mark = skb->mark;
+ fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+
err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
if (err != 0) {
if (err == -EMSGSIZE)
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 14f46af..cb30f13 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1066,6 +1066,8 @@
memcpy(&fl6, &t->fl.u.ip6, sizeof (fl6));
fl6.flowi6_proto = IPPROTO_IPIP;
+ fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+
dsfield = ipv4_get_dsfield(iph);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
@@ -1117,6 +1119,7 @@
memcpy(&fl6, &t->fl.u.ip6, sizeof (fl6));
fl6.flowi6_proto = IPPROTO_IPV6;
+ fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
dsfield = ipv6_get_dsfield(ipv6h);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index a1beb59..a4a4e1c 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -76,9 +76,9 @@
return;
if (type == NDISC_REDIRECT)
- ip6_redirect(skb, net, 0, 0);
+ ip6_redirect(skb, net, 0, 0, sock_net_uid(net, NULL));
else
- ip6_update_pmtu(skb, net, info, 0, 0, INVALID_UID);
+ ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
xfrm_state_put(x);
}
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 3939f77..ddff922 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1286,6 +1286,8 @@
ri->prefix_len == 0)
continue;
#endif
+ if (ri->prefix_len < in6_dev->cnf.accept_ra_rt_info_min_plen)
+ continue;
if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen)
continue;
rt6_route_rcv(skb->dev, (u8*)p, (p->nd_opt_len) << 3,
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index d38e6a8..e48d26b 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -27,6 +27,7 @@
struct flowi6 fl6 = {
.flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
.flowi6_mark = skb->mark,
+ .flowi6_uid = sock_net_uid(net, skb->sk),
.daddr = iph->daddr,
.saddr = iph->saddr,
};
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 27de137..6a38935 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -167,7 +167,7 @@
fl6.daddr = *daddr;
fl6.flowi6_oif = oif;
fl6.flowi6_mark = sk->sk_mark;
- fl6.flowi6_uid = sock_i_uid(sk);
+ fl6.flowi6_uid = sk->sk_uid;
fl6.fl6_icmp_type = user_icmph.icmp6_type;
fl6.fl6_icmp_code = user_icmph.icmp6_code;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 5693cf2..c9ccdd7 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -760,7 +760,7 @@
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_mark = sk->sk_mark;
- fl6.flowi6_uid = sock_i_uid(sk);
+ fl6.flowi6_uid = sk->sk_uid;
if (sin6) {
if (addr_len < SIN6_LEN_RFC2133)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 0bc8046..51fe515 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1171,11 +1171,12 @@
void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
{
ip6_update_pmtu(skb, sock_net(sk), mtu,
- sk->sk_bound_dev_if, sk->sk_mark, sock_i_uid(sk));
+ sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid);
}
EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
-void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
+void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
+ kuid_t uid)
{
const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
struct dst_entry *dst;
@@ -1188,6 +1189,7 @@
fl6.daddr = iph->daddr;
fl6.saddr = iph->saddr;
fl6.flowlabel = ip6_flowinfo(iph);
+ fl6.flowi6_uid = uid;
dst = ip6_route_output(net, NULL, &fl6);
if (!dst->error)
@@ -1198,7 +1200,8 @@
void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
{
- ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark);
+ ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
+ sk->sk_uid);
}
EXPORT_SYMBOL_GPL(ip6_sk_redirect);
@@ -2635,6 +2638,9 @@
if (tb[RTA_OIF])
oif = nla_get_u32(tb[RTA_OIF]);
+ if (tb[RTA_MARK])
+ fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
+
if (tb[RTA_UID])
fl6.flowi6_uid = make_kuid(current_user_ns(),
nla_get_u32(tb[RTA_UID]));
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 701d065..0428544 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -243,7 +243,7 @@
fl6.flowi6_mark = ireq->ir_mark;
fl6.fl6_dport = inet_rsk(req)->rmt_port;
fl6.fl6_sport = inet_sk(sk)->inet_sport;
- fl6.flowi6_uid = sock_i_uid(sk);
+ fl6.flowi6_uid = sk->sk_uid;
security_req_classify_flow(req, flowi6_to_flowi(&fl6));
dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 6a9e5db..3ba57fc8 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -253,7 +253,7 @@
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = usin->sin6_port;
fl6.fl6_sport = inet->inet_sport;
- fl6.flowi6_uid = sock_i_uid(sk);
+ fl6.flowi6_uid = sk->sk_uid;
opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
final_p = fl6_update_dst(&fl6, opt, &final);
@@ -722,15 +722,15 @@
};
#endif
-static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
- u32 tsval, u32 tsecr,
+static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
+ u32 ack, u32 win, u32 tsval, u32 tsecr,
struct tcp_md5sig_key *key, int rst, u8 tclass)
{
const struct tcphdr *th = tcp_hdr(skb);
struct tcphdr *t1;
struct sk_buff *buff;
struct flowi6 fl6;
- struct net *net = dev_net(skb_dst(skb)->dev);
+ struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
struct sock *ctl_sk = net->ipv6.tcp_sk;
unsigned int tot_len = sizeof(struct tcphdr);
struct dst_entry *dst;
@@ -798,6 +798,7 @@
fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
fl6.fl6_dport = t1->dest;
fl6.fl6_sport = t1->source;
+ fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
/* Pass a socket to ip6_dst_lookup either it is for RST
@@ -872,7 +873,7 @@
ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
(th->doff << 2);
- tcp_v6_send_response(skb, seq, ack_seq, 0, 0, 0, key, 1, 0);
+ tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, key, 1, 0);
#ifdef CONFIG_TCP_MD5SIG
release_sk1:
@@ -883,11 +884,12 @@
#endif
}
-static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
- u32 win, u32 tsval, u32 tsecr,
+static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb,
+ u32 seq, u32 ack, u32 win, u32 tsval, u32 tsecr,
struct tcp_md5sig_key *key, u8 tclass)
{
- tcp_v6_send_response(skb, seq, ack, win, tsval, tsecr, key, 0, tclass);
+ tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, key, 0,
+ tclass);
}
static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
@@ -895,7 +897,7 @@
struct inet_timewait_sock *tw = inet_twsk(sk);
struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
- tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
+ tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
tcp_time_stamp + tcptw->tw_ts_offset,
tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
@@ -907,7 +909,7 @@
static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
struct request_sock *req)
{
- tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1,
+ tcp_v6_send_ack(sk, skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1,
req->rcv_wnd, tcp_time_stamp, req->ts_recent,
tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 1791e97..9fc2637 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1150,7 +1150,7 @@
fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
fl6.flowi6_mark = sk->sk_mark;
- fl6.flowi6_uid = sock_i_uid(sk);
+ fl6.flowi6_uid = sk->sk_uid;
if (msg->msg_controllen) {
opt = &opt_space;
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 19705cb..e0d1c86 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -515,6 +515,7 @@
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_mark = sk->sk_mark;
+ fl6.flowi6_uid = sk->sk_uid;
if (lsa) {
if (addr_len < SIN6_LEN_RFC2133)
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
index 9ce6228..0642f32 100644
--- a/net/netfilter/xt_qtaguid.c
+++ b/net/netfilter/xt_qtaguid.c
@@ -320,7 +320,7 @@
st_entry->tag,
get_uid_from_tag(st_entry->tag));
rb_erase(&st_entry->sock_node, st_to_free_tree);
- sockfd_put(st_entry->socket);
+ sock_put(st_entry->sk);
kfree(st_entry);
}
}
@@ -1791,8 +1791,11 @@
}
#ifdef DDEBUG
-/* This function is not in xt_qtaguid_print.c because of locks visibility */
-static void prdebug_full_state(int indent_level, const char *fmt, ...)
+/*
+ * This function is not in xt_qtaguid_print.c because of locks visibility.
+ * The lock of sock_tag_list must be aquired before calling this function
+ */
+static void prdebug_full_state_locked(int indent_level, const char *fmt, ...)
{
va_list args;
char *fmt_buff;
@@ -1813,16 +1816,12 @@
kfree(buff);
va_end(args);
- spin_lock_bh(&sock_tag_list_lock);
prdebug_sock_tag_tree(indent_level, &sock_tag_tree);
- spin_unlock_bh(&sock_tag_list_lock);
- spin_lock_bh(&sock_tag_list_lock);
spin_lock_bh(&uid_tag_data_tree_lock);
prdebug_uid_tag_data_tree(indent_level, &uid_tag_data_tree);
prdebug_proc_qtu_data_tree(indent_level, &proc_qtu_data_tree);
spin_unlock_bh(&uid_tag_data_tree_lock);
- spin_unlock_bh(&sock_tag_list_lock);
spin_lock_bh(&iface_stat_list_lock);
prdebug_iface_stat_list(indent_level, &iface_stat_list);
@@ -1831,7 +1830,7 @@
pr_debug("qtaguid: %s(): }\n", __func__);
}
#else
-static void prdebug_full_state(int indent_level, const char *fmt, ...) {}
+static void prdebug_full_state_locked(int indent_level, const char *fmt, ...) {}
#endif
struct proc_ctrl_print_info {
@@ -1907,12 +1906,12 @@
{
struct sock_tag *sock_tag_entry = v;
uid_t uid;
- long f_count;
CT_DEBUG("qtaguid: proc ctrl pid=%u tgid=%u uid=%u\n",
current->pid, current->tgid, current_fsuid());
if (sock_tag_entry != SEQ_START_TOKEN) {
+ int sk_ref_count;
uid = get_uid_from_tag(sock_tag_entry->tag);
CT_DEBUG("qtaguid: proc_read(): sk=%p tag=0x%llx (uid=%u) "
"pid=%u\n",
@@ -1921,13 +1920,13 @@
uid,
sock_tag_entry->pid
);
- f_count = atomic_long_read(
- &sock_tag_entry->socket->file->f_count);
+ sk_ref_count = atomic_read(
+ &sock_tag_entry->sk->sk_refcnt);
seq_printf(m, "sock=%pK tag=0x%llx (uid=%u) pid=%u "
- "f_count=%lu\n",
+ "f_count=%d\n",
sock_tag_entry->sk,
sock_tag_entry->tag, uid,
- sock_tag_entry->pid, f_count);
+ sock_tag_entry->pid, sk_ref_count);
} else {
seq_printf(m, "events: sockets_tagged=%llu "
"sockets_untagged=%llu "
@@ -1954,8 +1953,11 @@
(u64)atomic64_read(&qtu_events.match_no_sk),
(u64)atomic64_read(&qtu_events.match_no_sk_file));
- /* Count the following as part of the last item_index */
- prdebug_full_state(0, "proc ctrl");
+ /* Count the following as part of the last item_index. No need
+ * to lock the sock_tag_list here since it is already locked when
+ * starting the seq_file operation
+ */
+ prdebug_full_state_locked(0, "proc ctrl");
}
return 0;
@@ -2215,8 +2217,8 @@
current_fsuid());
goto err;
}
- CT_DEBUG("qtaguid: ctrl_tag(%s): socket->...->f_count=%ld ->sk=%p\n",
- input, atomic_long_read(&el_socket->file->f_count),
+ CT_DEBUG("qtaguid: ctrl_tag(%s): socket->...->sk_refcnt=%d ->sk=%p\n",
+ input, atomic_read(&el_socket->sk->sk_refcnt),
el_socket->sk);
if (argc < 3) {
acct_tag = make_atag_from_value(0);
@@ -2257,16 +2259,9 @@
struct tag_ref *prev_tag_ref_entry;
CT_DEBUG("qtaguid: ctrl_tag(%s): retag for sk=%p "
- "st@%p ...->f_count=%ld\n",
+ "st@%p ...->sk_refcnt=%d\n",
input, el_socket->sk, sock_tag_entry,
- atomic_long_read(&el_socket->file->f_count));
- /*
- * This is a re-tagging, so release the sock_fd that was
- * locked at the time of the 1st tagging.
- * There is still the ref from this call's sockfd_lookup() so
- * it can be done within the spinlock.
- */
- sockfd_put(sock_tag_entry->socket);
+ atomic_read(&el_socket->sk->sk_refcnt));
prev_tag_ref_entry = lookup_tag_ref(sock_tag_entry->tag,
&uid_tag_data_entry);
BUG_ON(IS_ERR_OR_NULL(prev_tag_ref_entry));
@@ -2286,8 +2281,12 @@
res = -ENOMEM;
goto err_tag_unref_put;
}
+ /*
+ * Hold the sk refcount here to make sure the sk pointer cannot
+ * be freed and reused
+ */
+ sock_hold(el_socket->sk);
sock_tag_entry->sk = el_socket->sk;
- sock_tag_entry->socket = el_socket;
sock_tag_entry->pid = current->tgid;
sock_tag_entry->tag = combine_atag_with_uid(acct_tag,
uid);
@@ -2315,10 +2314,11 @@
atomic64_inc(&qtu_events.sockets_tagged);
}
spin_unlock_bh(&sock_tag_list_lock);
- /* We keep the ref to the socket (file) until it is untagged */
- CT_DEBUG("qtaguid: ctrl_tag(%s): done st@%p ...->f_count=%ld\n",
+ /* We keep the ref to the sk until it is untagged */
+ CT_DEBUG("qtaguid: ctrl_tag(%s): done st@%p ...->sk_refcnt=%d\n",
input, sock_tag_entry,
- atomic_long_read(&el_socket->file->f_count));
+ atomic_read(&el_socket->sk->sk_refcnt));
+ sockfd_put(el_socket);
return 0;
err_tag_unref_put:
@@ -2326,8 +2326,8 @@
tag_ref_entry->num_sock_tags--;
free_tag_ref_from_utd_entry(tag_ref_entry, uid_tag_data_entry);
err_put:
- CT_DEBUG("qtaguid: ctrl_tag(%s): done. ...->f_count=%ld\n",
- input, atomic_long_read(&el_socket->file->f_count) - 1);
+ CT_DEBUG("qtaguid: ctrl_tag(%s): done. ...->sk_refcnt=%d\n",
+ input, atomic_read(&el_socket->sk->sk_refcnt) - 1);
/* Release the sock_fd that was grabbed by sockfd_lookup(). */
sockfd_put(el_socket);
return res;
@@ -2343,17 +2343,13 @@
int sock_fd = 0;
struct socket *el_socket;
int res, argc;
- struct sock_tag *sock_tag_entry;
- struct tag_ref *tag_ref_entry;
- struct uid_tag_data *utd_entry;
- struct proc_qtu_data *pqd_entry;
argc = sscanf(input, "%c %d", &cmd, &sock_fd);
CT_DEBUG("qtaguid: ctrl_untag(%s): argc=%d cmd=%c sock_fd=%d\n",
input, argc, cmd, sock_fd);
if (argc < 2) {
res = -EINVAL;
- goto err;
+ return res;
}
el_socket = sockfd_lookup(sock_fd, &res); /* This locks the file */
if (!el_socket) {
@@ -2361,17 +2357,31 @@
" sock_fd=%d err=%d pid=%u tgid=%u uid=%u\n",
input, sock_fd, res, current->pid, current->tgid,
current_fsuid());
- goto err;
+ return res;
}
CT_DEBUG("qtaguid: ctrl_untag(%s): socket->...->f_count=%ld ->sk=%p\n",
input, atomic_long_read(&el_socket->file->f_count),
el_socket->sk);
+ res = qtaguid_untag(el_socket, false);
+ sockfd_put(el_socket);
+ return res;
+}
+
+int qtaguid_untag(struct socket *el_socket, bool kernel)
+{
+ int res;
+ pid_t pid;
+ struct sock_tag *sock_tag_entry;
+ struct tag_ref *tag_ref_entry;
+ struct uid_tag_data *utd_entry;
+ struct proc_qtu_data *pqd_entry;
+
spin_lock_bh(&sock_tag_list_lock);
sock_tag_entry = get_sock_stat_nl(el_socket->sk);
if (!sock_tag_entry) {
spin_unlock_bh(&sock_tag_list_lock);
res = -EINVAL;
- goto err_put;
+ return res;
}
/*
* The socket already belongs to the current process
@@ -2383,20 +2393,26 @@
BUG_ON(!tag_ref_entry);
BUG_ON(tag_ref_entry->num_sock_tags <= 0);
spin_lock_bh(&uid_tag_data_tree_lock);
+ if (kernel)
+ pid = sock_tag_entry->pid;
+ else
+ pid = current->tgid;
pqd_entry = proc_qtu_data_tree_search(
- &proc_qtu_data_tree, current->tgid);
+ &proc_qtu_data_tree, pid);
/*
* TODO: remove if, and start failing.
* At first, we want to catch user-space code that is not
* opening the /dev/xt_qtaguid.
*/
- if (IS_ERR_OR_NULL(pqd_entry))
+ if (IS_ERR_OR_NULL(pqd_entry) || !sock_tag_entry->list.next) {
pr_warn_once("qtaguid: %s(): "
"User space forgot to open /dev/xt_qtaguid? "
- "pid=%u tgid=%u uid=%u\n", __func__,
- current->pid, current->tgid, current_fsuid());
- else
+ "pid=%u tgid=%u sk_pid=%u, uid=%u\n", __func__,
+ current->pid, current->tgid, sock_tag_entry->pid,
+ from_kuid(&init_user_ns, current_fsuid()));
+ } else {
list_del(&sock_tag_entry->list);
+ }
spin_unlock_bh(&uid_tag_data_tree_lock);
/*
* We don't free tag_ref from the utd_entry here,
@@ -2405,30 +2421,17 @@
tag_ref_entry->num_sock_tags--;
spin_unlock_bh(&sock_tag_list_lock);
/*
- * Release the sock_fd that was grabbed at tag time,
- * and once more for the sockfd_lookup() here.
+ * Release the sock_fd that was grabbed at tag time.
*/
- sockfd_put(sock_tag_entry->socket);
- CT_DEBUG("qtaguid: ctrl_untag(%s): done. st@%p ...->f_count=%ld\n",
- input, sock_tag_entry,
- atomic_long_read(&el_socket->file->f_count) - 1);
- sockfd_put(el_socket);
+ sock_put(sock_tag_entry->sk);
+ CT_DEBUG("qtaguid: done. st@%p ...->sk_refcnt=%d\n",
+ sock_tag_entry,
+ atomic_read(&el_socket->sk->sk_refcnt));
kfree(sock_tag_entry);
atomic64_inc(&qtu_events.sockets_untagged);
return 0;
-
-err_put:
- CT_DEBUG("qtaguid: ctrl_untag(%s): done. socket->...->f_count=%ld\n",
- input, atomic_long_read(&el_socket->file->f_count) - 1);
- /* Release the sock_fd that was grabbed by sockfd_lookup(). */
- sockfd_put(el_socket);
- return res;
-
-err:
- CT_DEBUG("qtaguid: ctrl_untag(%s): done.\n", input);
- return res;
}
static int qtaguid_ctrl_parse(const char *input, int count)
@@ -2860,8 +2863,10 @@
sock_tag_tree_erase(&st_to_free_tree);
- prdebug_full_state(0, "%s(): pid=%u tgid=%u", __func__,
+ spin_lock_bh(&sock_tag_list_lock);
+ prdebug_full_state_locked(0, "%s(): pid=%u tgid=%u", __func__,
current->pid, current->tgid);
+ spin_unlock_bh(&sock_tag_list_lock);
return 0;
}
diff --git a/net/netfilter/xt_qtaguid_internal.h b/net/netfilter/xt_qtaguid_internal.h
index 6dc14a9..8178fbd 100644
--- a/net/netfilter/xt_qtaguid_internal.h
+++ b/net/netfilter/xt_qtaguid_internal.h
@@ -256,8 +256,6 @@
struct sock_tag {
struct rb_node sock_node;
struct sock *sk; /* Only used as a number, never dereferenced */
- /* The socket is needed for sockfd_put() */
- struct socket *socket;
/* Used to associate with a given pid */
struct list_head list; /* in proc_qtu_data.sock_tag_list */
pid_t pid;
diff --git a/net/netfilter/xt_qtaguid_print.c b/net/netfilter/xt_qtaguid_print.c
index f6a00a3..2a7190d 100644
--- a/net/netfilter/xt_qtaguid_print.c
+++ b/net/netfilter/xt_qtaguid_print.c
@@ -24,7 +24,7 @@
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/spinlock_types.h>
-
+#include <net/sock.h>
#include "xt_qtaguid_internal.h"
#include "xt_qtaguid_print.h"
@@ -237,10 +237,10 @@
tag_str = pp_tag_t(&st->tag);
res = kasprintf(GFP_ATOMIC, "sock_tag@%p{"
"sock_node=rb_node{...}, "
- "sk=%p socket=%p (f_count=%lu), list=list_head{...}, "
+ "sk=%p (f_count=%d), list=list_head{...}, "
"pid=%u, tag=%s}",
- st, st->sk, st->socket, atomic_long_read(
- &st->socket->file->f_count),
+ st, st->sk, atomic_read(
+ &st->sk->sk_refcnt),
st->pid, tag_str);
_bug_on_err_or_null(res);
kfree(tag_str);
diff --git a/net/socket.c b/net/socket.c
index 59c9896..aea6b11 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -526,9 +526,23 @@
return used;
}
+int sockfs_setattr(struct dentry *dentry, struct iattr *iattr)
+{
+ int err = simple_setattr(dentry, iattr);
+
+ if (!err && (iattr->ia_valid & ATTR_UID)) {
+ struct socket *sock = SOCKET_I(dentry->d_inode);
+
+ sock->sk->sk_uid = iattr->ia_uid;
+ }
+
+ return err;
+}
+
static const struct inode_operations sockfs_inode_ops = {
.getxattr = sockfs_getxattr,
.listxattr = sockfs_listxattr,
+ .setattr = sockfs_setattr,
};
/**
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index f927434..3290bb3 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -389,6 +389,14 @@
[NL80211_ATTR_MAC_HINT] = { .len = ETH_ALEN },
[NL80211_ATTR_WIPHY_FREQ_HINT] = { .type = NLA_U32 },
[NL80211_ATTR_TDLS_PEER_CAPABILITY] = { .type = NLA_U32 },
+ [NL80211_ATTR_IFACE_SOCKET_OWNER] = { .type = NLA_FLAG },
+ [NL80211_ATTR_CSA_C_OFFSETS_TX] = { .type = NLA_BINARY },
+ [NL80211_ATTR_USE_RRM] = { .type = NLA_FLAG },
+ [NL80211_ATTR_TSID] = { .type = NLA_U8 },
+ [NL80211_ATTR_USER_PRIO] = { .type = NLA_U8 },
+ [NL80211_ATTR_ADMITTED_TIME] = { .type = NLA_U16 },
+ [NL80211_ATTR_SMPS_MODE] = { .type = NLA_U8 },
+ [NL80211_ATTR_MAC_MASK] = { .len = ETH_ALEN },
};
/* policy for the key attributes */
@@ -5199,6 +5207,43 @@
return n_channels;
}
+static int nl80211_parse_random_mac(struct nlattr **attrs,
+ u8 *mac_addr, u8 *mac_addr_mask)
+{
+ int i;
+
+ if (!attrs[NL80211_ATTR_MAC] && !attrs[NL80211_ATTR_MAC_MASK]) {
+ memset(mac_addr, 0, ETH_ALEN);
+ memset(mac_addr_mask, 0, ETH_ALEN);
+ mac_addr[0] = 0x2;
+ mac_addr_mask[0] = 0x3;
+
+ return 0;
+ }
+
+ /* need both or none */
+ if (!attrs[NL80211_ATTR_MAC] || !attrs[NL80211_ATTR_MAC_MASK])
+ return -EINVAL;
+
+ memcpy(mac_addr, nla_data(attrs[NL80211_ATTR_MAC]), ETH_ALEN);
+ memcpy(mac_addr_mask, nla_data(attrs[NL80211_ATTR_MAC_MASK]), ETH_ALEN);
+
+ /* don't allow or configure an mcast address */
+ if (!is_multicast_ether_addr(mac_addr_mask) ||
+ is_multicast_ether_addr(mac_addr))
+ return -EINVAL;
+
+ /*
+ * allow users to pass a MAC address that has bits set outside
+ * of the mask, but don't bother drivers with having to deal
+ * with such bits
+ */
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] &= mac_addr_mask[i];
+
+ return 0;
+}
+
static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -5388,6 +5433,25 @@
err = -EOPNOTSUPP;
goto out_free;
}
+
+ if (request->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ if (!(wiphy->features &
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR)) {
+ err = -EOPNOTSUPP;
+ goto out_free;
+ }
+
+ if (wdev->current_bss) {
+ err = -EOPNOTSUPP;
+ goto out_free;
+ }
+
+ err = nl80211_parse_random_mac(info->attrs,
+ request->mac_addr,
+ request->mac_addr_mask);
+ if (err)
+ goto out_free;
+ }
}
request->no_cck =
@@ -5417,14 +5481,13 @@
return err;
}
-static int nl80211_start_sched_scan(struct sk_buff *skb,
- struct genl_info *info)
+
+static struct cfg80211_sched_scan_request *
+nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct nlattr **attrs)
{
struct cfg80211_sched_scan_request *request;
- struct cfg80211_registered_device *rdev = info->user_ptr[0];
- struct net_device *dev = info->user_ptr[1];
struct nlattr *attr;
- struct wiphy *wiphy;
int err, tmp, n_ssids = 0, n_match_sets = 0, n_channels, i;
u32 interval;
enum ieee80211_band band;
@@ -5432,27 +5495,21 @@
struct nlattr *tb[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1];
s32 default_match_rssi = NL80211_SCAN_RSSI_THOLD_OFF;
- if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ||
- !rdev->ops->sched_scan_start)
- return -EOPNOTSUPP;
+ if (!is_valid_ie_attr(attrs[NL80211_ATTR_IE]))
+ return ERR_PTR(-EINVAL);
- if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
- return -EINVAL;
+ if (!attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
+ return ERR_PTR(-EINVAL);
- if (!info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
- return -EINVAL;
-
- interval = nla_get_u32(info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]);
+ interval = nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]);
if (interval == 0)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
- wiphy = &rdev->wiphy;
-
- if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
+ if (attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
n_channels = validate_scan_freqs(
- info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]);
+ attrs[NL80211_ATTR_SCAN_FREQUENCIES]);
if (!n_channels)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
} else {
n_channels = 0;
@@ -5461,13 +5518,13 @@
n_channels += wiphy->bands[band]->n_channels;
}
- if (info->attrs[NL80211_ATTR_SCAN_SSIDS])
- nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS],
+ if (attrs[NL80211_ATTR_SCAN_SSIDS])
+ nla_for_each_nested(attr, attrs[NL80211_ATTR_SCAN_SSIDS],
tmp)
n_ssids++;
if (n_ssids > wiphy->max_sched_scan_ssids)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
/*
* First, count the number of 'real' matchsets. Due to an issue with
@@ -5478,9 +5535,9 @@
* older userspace that treated a matchset with only the RSSI as the
* global RSSI for all other matchsets - if there are other matchsets.
*/
- if (info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH]) {
+ if (attrs[NL80211_ATTR_SCHED_SCAN_MATCH]) {
nla_for_each_nested(attr,
- info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH],
+ attrs[NL80211_ATTR_SCHED_SCAN_MATCH],
tmp) {
struct nlattr *rssi;
@@ -5488,7 +5545,7 @@
nla_data(attr), nla_len(attr),
nl80211_match_policy);
if (err)
- return err;
+ return ERR_PTR(err);
/* add other standalone attributes here */
if (tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID]) {
n_match_sets++;
@@ -5505,32 +5562,23 @@
n_match_sets = 1;
if (n_match_sets > wiphy->max_match_sets)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
- if (info->attrs[NL80211_ATTR_IE])
- ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
+ if (attrs[NL80211_ATTR_IE])
+ ie_len = nla_len(attrs[NL80211_ATTR_IE]);
else
ie_len = 0;
if (ie_len > wiphy->max_sched_scan_ie_len)
- return -EINVAL;
-
- mutex_lock(&rdev->sched_scan_mtx);
-
- if (rdev->sched_scan_req) {
- err = -EINPROGRESS;
- goto out;
- }
+ return ERR_PTR(-EINVAL);
request = kzalloc(sizeof(*request)
+ sizeof(*request->ssids) * n_ssids
+ sizeof(*request->match_sets) * n_match_sets
+ sizeof(*request->channels) * n_channels
+ ie_len, GFP_KERNEL);
- if (!request) {
- err = -ENOMEM;
- goto out;
- }
+ if (!request)
+ return ERR_PTR(-ENOMEM);
if (n_ssids)
request->ssids = (void *)&request->channels[n_channels];
@@ -5555,10 +5603,10 @@
request->n_match_sets = n_match_sets;
i = 0;
- if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
+ if (attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
/* user specified, bail out if channel not found */
nla_for_each_nested(attr,
- info->attrs[NL80211_ATTR_SCAN_FREQUENCIES],
+ attrs[NL80211_ATTR_SCAN_FREQUENCIES],
tmp) {
struct ieee80211_channel *chan;
@@ -5604,8 +5652,8 @@
request->n_channels = i;
i = 0;
- if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
- nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS],
+ if (attrs[NL80211_ATTR_SCAN_SSIDS]) {
+ nla_for_each_nested(attr, attrs[NL80211_ATTR_SCAN_SSIDS],
tmp) {
if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) {
err = -EINVAL;
@@ -5619,9 +5667,9 @@
}
i = 0;
- if (info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH]) {
+ if (attrs[NL80211_ATTR_SCHED_SCAN_MATCH]) {
nla_for_each_nested(attr,
- info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH],
+ attrs[NL80211_ATTR_SCHED_SCAN_MATCH],
tmp) {
struct nlattr *ssid, *rssi;
@@ -5673,42 +5721,91 @@
request->rssi_thold = request->min_rssi_thold;
- if (info->attrs[NL80211_ATTR_IE]) {
- request->ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
+ if (attrs[NL80211_ATTR_IE]) {
+ request->ie_len = nla_len(attrs[NL80211_ATTR_IE]);
memcpy((void *)request->ie,
- nla_data(info->attrs[NL80211_ATTR_IE]),
+ nla_data(attrs[NL80211_ATTR_IE]),
request->ie_len);
}
- if (info->attrs[NL80211_ATTR_SCAN_FLAGS]) {
+ if (attrs[NL80211_ATTR_SCAN_FLAGS]) {
request->flags = nla_get_u32(
- info->attrs[NL80211_ATTR_SCAN_FLAGS]);
- if (((request->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
- !(wiphy->features & NL80211_FEATURE_LOW_PRIORITY_SCAN)) ||
- ((request->flags & NL80211_SCAN_FLAG_FLUSH) &&
- !(wiphy->features & NL80211_FEATURE_SCAN_FLUSH))) {
+ attrs[NL80211_ATTR_SCAN_FLAGS]);
+ if ((request->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
+ !(wiphy->features & NL80211_FEATURE_LOW_PRIORITY_SCAN)) {
err = -EOPNOTSUPP;
goto out_free;
}
+
+ if (request->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ u32 flg = NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR;
+
+ if (!wdev) /* must be net-detect */
+ flg = NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
+
+ if (!(wiphy->features & flg)) {
+ err = -EOPNOTSUPP;
+ goto out_free;
+ }
+
+ if (wdev && wdev->current_bss) {
+ err = -EOPNOTSUPP;
+ goto out_free;
+ }
+
+ err = nl80211_parse_random_mac(attrs, request->mac_addr,
+ request->mac_addr_mask);
+ if (err)
+ goto out_free;
+ }
}
- request->dev = dev;
- request->wiphy = &rdev->wiphy;
request->interval = interval;
request->scan_start = jiffies;
- err = rdev_sched_scan_start(rdev, dev, request);
- if (!err) {
- rdev->sched_scan_req = request;
- nl80211_send_sched_scan(rdev, dev,
- NL80211_CMD_START_SCHED_SCAN);
- goto out;
- }
+ return request;
out_free:
kfree(request);
-out:
- mutex_unlock(&rdev->sched_scan_mtx);
+ return ERR_PTR(err);
+}
+
+static int nl80211_start_sched_scan(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int err;
+
+ if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ||
+ !rdev->ops->sched_scan_start)
+ return -EOPNOTSUPP;
+
+ if (rdev->sched_scan_req)
+ return -EINPROGRESS;
+
+ rdev->sched_scan_req = nl80211_parse_sched_scan(&rdev->wiphy, wdev,
+ info->attrs);
+ err = PTR_ERR_OR_ZERO(rdev->sched_scan_req);
+ if (err)
+ goto out_err;
+
+ err = rdev_sched_scan_start(rdev, dev, rdev->sched_scan_req);
+ if (err)
+ goto out_free;
+
+ rdev->sched_scan_req->dev = dev;
+ rdev->sched_scan_req->wiphy = &rdev->wiphy;
+
+ nl80211_send_sched_scan(rdev, dev,
+ NL80211_CMD_START_SCHED_SCAN);
+ return 0;
+
+out_free:
+ kfree(rdev->sched_scan_req);
+out_err:
+ rdev->sched_scan_req = NULL;
return err;
}
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index ab4ef72..daed579d 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -220,7 +220,7 @@
.uinfo = {
.auth = {
- .icv_truncbits = 96,
+ .icv_truncbits = 128,
.icv_fullbits = 256,
}
},
diff --git a/security/commoncap.c b/security/commoncap.c
index 0405522..0add393 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -283,6 +283,16 @@
new->cap_effective = *effective;
new->cap_inheritable = *inheritable;
new->cap_permitted = *permitted;
+
+ /*
+ * Mask off ambient bits that are no longer both permitted and
+ * inheritable.
+ */
+ new->cap_ambient = cap_intersect(new->cap_ambient,
+ cap_intersect(*permitted,
+ *inheritable));
+ if (WARN_ON(!cap_ambient_invariant_ok(new)))
+ return -EINVAL;
return 0;
}
@@ -363,6 +373,7 @@
/*
* pP' = (X & fP) | (pI & fI)
+ * The addition of pA' is handled later.
*/
new->cap_permitted.cap[i] =
(new->cap_bset.cap[i] & permitted) |
@@ -494,10 +505,13 @@
{
const struct cred *old = current_cred();
struct cred *new = bprm->cred;
- bool effective, has_cap = false;
+ bool effective, has_cap = false, is_setid;
int ret;
kuid_t root_uid;
+ if (WARN_ON(!cap_ambient_invariant_ok(old)))
+ return -EPERM;
+
effective = false;
ret = get_file_caps(bprm, &effective, &has_cap);
if (ret < 0)
@@ -542,8 +556,9 @@
*
* In addition, if NO_NEW_PRIVS, then ensure we get no new privs.
*/
- if ((!uid_eq(new->euid, old->uid) ||
- !gid_eq(new->egid, old->gid) ||
+ is_setid = !uid_eq(new->euid, old->uid) || !gid_eq(new->egid, old->gid);
+
+ if ((is_setid ||
!cap_issubset(new->cap_permitted, old->cap_permitted)) &&
bprm->unsafe & ~LSM_UNSAFE_PTRACE_CAP) {
/* downgrade; they get no more than they had, and maybe less */
@@ -559,10 +574,28 @@
new->suid = new->fsuid = new->euid;
new->sgid = new->fsgid = new->egid;
+ /* File caps or setid cancels ambient. */
+ if (has_cap || is_setid)
+ cap_clear(new->cap_ambient);
+
+ /*
+ * Now that we've computed pA', update pP' to give:
+ * pP' = (X & fP) | (pI & fI) | pA'
+ */
+ new->cap_permitted = cap_combine(new->cap_permitted, new->cap_ambient);
+
+ /*
+ * Set pE' = (fE ? pP' : pA'). Because pA' is zero if fE is set,
+ * this is the same as pE' = (fE ? pP' : 0) | pA'.
+ */
if (effective)
new->cap_effective = new->cap_permitted;
else
- cap_clear(new->cap_effective);
+ new->cap_effective = new->cap_ambient;
+
+ if (WARN_ON(!cap_ambient_invariant_ok(new)))
+ return -EPERM;
+
bprm->cap_effective = effective;
/*
@@ -577,7 +610,7 @@
* Number 1 above might fail if you don't have a full bset, but I think
* that is interesting information to audit.
*/
- if (!cap_isclear(new->cap_effective)) {
+ if (!cap_issubset(new->cap_effective, new->cap_ambient)) {
if (!cap_issubset(CAP_FULL_SET, new->cap_effective) ||
!uid_eq(new->euid, root_uid) || !uid_eq(new->uid, root_uid) ||
issecure(SECURE_NOROOT)) {
@@ -588,6 +621,10 @@
}
new->securebits &= ~issecure_mask(SECURE_KEEP_CAPS);
+
+ if (WARN_ON(!cap_ambient_invariant_ok(new)))
+ return -EPERM;
+
return 0;
}
@@ -609,7 +646,7 @@
if (!uid_eq(cred->uid, root_uid)) {
if (bprm->cap_effective)
return 1;
- if (!cap_isclear(cred->cap_permitted))
+ if (!cap_issubset(cred->cap_permitted, cred->cap_ambient))
return 1;
}
@@ -711,10 +748,18 @@
uid_eq(old->suid, root_uid)) &&
(!uid_eq(new->uid, root_uid) &&
!uid_eq(new->euid, root_uid) &&
- !uid_eq(new->suid, root_uid)) &&
- !issecure(SECURE_KEEP_CAPS)) {
- cap_clear(new->cap_permitted);
- cap_clear(new->cap_effective);
+ !uid_eq(new->suid, root_uid))) {
+ if (!issecure(SECURE_KEEP_CAPS)) {
+ cap_clear(new->cap_permitted);
+ cap_clear(new->cap_effective);
+ }
+
+ /*
+ * Pre-ambient programs expect setresuid to nonroot followed
+ * by exec to drop capabilities. We should make sure that
+ * this remains the case.
+ */
+ cap_clear(new->cap_ambient);
}
if (uid_eq(old->euid, root_uid) && !uid_eq(new->euid, root_uid))
cap_clear(new->cap_effective);
@@ -836,15 +881,20 @@
* Implement PR_CAPBSET_DROP. Attempt to remove the specified capability from
* the current task's bounding set. Returns 0 on success, -ve on error.
*/
-static long cap_prctl_drop(struct cred *new, unsigned long cap)
+static int cap_prctl_drop(unsigned long cap)
{
- if (!capable(CAP_SETPCAP))
+ struct cred *new;
+
+ if (!ns_capable(current_user_ns(), CAP_SETPCAP))
return -EPERM;
if (!cap_valid(cap))
return -EINVAL;
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
cap_lower(new->cap_bset, cap);
- return 0;
+ return commit_creds(new);
}
/**
@@ -862,26 +912,17 @@
int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
+ const struct cred *old = current_cred();
struct cred *new;
- long error = 0;
-
- new = prepare_creds();
- if (!new)
- return -ENOMEM;
switch (option) {
case PR_CAPBSET_READ:
- error = -EINVAL;
if (!cap_valid(arg2))
- goto error;
- error = !!cap_raised(new->cap_bset, arg2);
- goto no_change;
+ return -EINVAL;
+ return !!cap_raised(old->cap_bset, arg2);
case PR_CAPBSET_DROP:
- error = cap_prctl_drop(new, arg2);
- if (error < 0)
- goto error;
- goto changed;
+ return cap_prctl_drop(arg2);
/*
* The next four prctl's remain to assist with transitioning a
@@ -903,10 +944,9 @@
* capability-based-privilege environment.
*/
case PR_SET_SECUREBITS:
- error = -EPERM;
- if ((((new->securebits & SECURE_ALL_LOCKS) >> 1)
- & (new->securebits ^ arg2)) /*[1]*/
- || ((new->securebits & SECURE_ALL_LOCKS & ~arg2)) /*[2]*/
+ if ((((old->securebits & SECURE_ALL_LOCKS) >> 1)
+ & (old->securebits ^ arg2)) /*[1]*/
+ || ((old->securebits & SECURE_ALL_LOCKS & ~arg2)) /*[2]*/
|| (arg2 & ~(SECURE_ALL_LOCKS | SECURE_ALL_BITS)) /*[3]*/
|| (cap_capable(current_cred(),
current_cred()->user_ns, CAP_SETPCAP,
@@ -920,46 +960,76 @@
*/
)
/* cannot change a locked bit */
- goto error;
+ return -EPERM;
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
new->securebits = arg2;
- goto changed;
+ return commit_creds(new);
case PR_GET_SECUREBITS:
- error = new->securebits;
- goto no_change;
+ return old->securebits;
case PR_GET_KEEPCAPS:
- if (issecure(SECURE_KEEP_CAPS))
- error = 1;
- goto no_change;
+ return !!issecure(SECURE_KEEP_CAPS);
case PR_SET_KEEPCAPS:
- error = -EINVAL;
if (arg2 > 1) /* Note, we rely on arg2 being unsigned here */
- goto error;
- error = -EPERM;
+ return -EINVAL;
if (issecure(SECURE_KEEP_CAPS_LOCKED))
- goto error;
+ return -EPERM;
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
if (arg2)
new->securebits |= issecure_mask(SECURE_KEEP_CAPS);
else
new->securebits &= ~issecure_mask(SECURE_KEEP_CAPS);
- goto changed;
+ return commit_creds(new);
+
+ case PR_CAP_AMBIENT:
+ if (arg2 == PR_CAP_AMBIENT_CLEAR_ALL) {
+ if (arg3 | arg4 | arg5)
+ return -EINVAL;
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+ cap_clear(new->cap_ambient);
+ return commit_creds(new);
+ }
+
+ if (((!cap_valid(arg3)) | arg4 | arg5))
+ return -EINVAL;
+
+ if (arg2 == PR_CAP_AMBIENT_IS_SET) {
+ return !!cap_raised(current_cred()->cap_ambient, arg3);
+ } else if (arg2 != PR_CAP_AMBIENT_RAISE &&
+ arg2 != PR_CAP_AMBIENT_LOWER) {
+ return -EINVAL;
+ } else {
+ if (arg2 == PR_CAP_AMBIENT_RAISE &&
+ (!cap_raised(current_cred()->cap_permitted, arg3) ||
+ !cap_raised(current_cred()->cap_inheritable,
+ arg3)))
+ return -EPERM;
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+ if (arg2 == PR_CAP_AMBIENT_RAISE)
+ cap_raise(new->cap_ambient, arg3);
+ else
+ cap_lower(new->cap_ambient, arg3);
+ return commit_creds(new);
+ }
default:
/* No functionality available - continue with default */
- error = -ENOSYS;
- goto error;
+ return -ENOSYS;
}
-
- /* Functionality provided */
-changed:
- return commit_creds(new);
-
-no_change:
-error:
- abort_creds(new);
- return error;
}
/**
diff --git a/security/inode.c b/security/inode.c
index 43ce6e1..94466a5e 100644
--- a/security/inode.c
+++ b/security/inode.c
@@ -105,7 +105,7 @@
dir = parent->d_inode;
mutex_lock(&dir->i_mutex);
- dentry = lookup_one_len(name, parent, strlen(name));
+ dentry = lookup_one_len2(name, mount, parent, strlen(name));
if (IS_ERR(dentry))
goto out;
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index cd871dc..28380e5 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -847,6 +847,7 @@
new->cap_inheritable = old->cap_inheritable;
new->cap_permitted = old->cap_permitted;
new->cap_effective = old->cap_effective;
+ new->cap_ambient = old->cap_ambient;
new->cap_bset = old->cap_bset;
new->jit_keyring = old->jit_keyring;
diff --git a/sound/soc/msm/msm-cpe-lsm.c b/sound/soc/msm/msm-cpe-lsm.c
index 85ec34a..b5520a9 100644
--- a/sound/soc/msm/msm-cpe-lsm.c
+++ b/sound/soc/msm/msm-cpe-lsm.c
@@ -1630,7 +1630,7 @@
event_status->payload_size;
memcpy(udata_32->payload,
event_status->payload,
- u_pld_size);
+ udata_32->payload_size);
}
}
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index f072cb8..348a59b 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -3751,10 +3751,10 @@
int flag = 0, i = 0, rc = 0, idx = 0;
int be_index = 0, port_id, topo_id;
unsigned int session_id = 0;
- uint16_t op_FL_ip_FL_weight;
- uint16_t op_FL_ip_FR_weight;
- uint16_t op_FR_ip_FL_weight;
- uint16_t op_FR_ip_FR_weight;
+ uint16_t op_FL_ip_FL_weight = 0;
+ uint16_t op_FL_ip_FR_weight = 0;
+ uint16_t op_FR_ip_FL_weight = 0;
+ uint16_t op_FR_ip_FR_weight = 0;
flag = ucontrol->value.integer.value[0];
pr_debug("%s E flag %d\n", __func__, flag);
diff --git a/sound/soc/msm/qdsp6v2/q6lsm.c b/sound/soc/msm/qdsp6v2/q6lsm.c
index c843cf9..cc33682 100644
--- a/sound/soc/msm/qdsp6v2/q6lsm.c
+++ b/sound/soc/msm/qdsp6v2/q6lsm.c
@@ -1387,7 +1387,7 @@
struct lsm_module_param_ids ids;
u8 *packet;
- memset(&ids, sizeof(ids), 0);
+ memset(&ids, 0, sizeof(ids));
switch (param_type) {
case LSM_ENDPOINT_DETECT_THRESHOLD: {
ids.module_id = p_info->module_id;