Merge branch 'android-msm-pixel-4.19-tm-security' into android-msm-pixel-4.19-tm
Aug 2022.1
Bug: 216139370
Change-Id: I5a14b51abc45a3f94ccd1eba471e9fb9386fe9a8
diff --git a/arch/arm64/boot/dts/google/lito-redbull-battery.dtsi b/arch/arm64/boot/dts/google/lito-redbull-battery.dtsi
index 26bb89e..ee59634 100644
--- a/arch/arm64/boot/dts/google/lito-redbull-battery.dtsi
+++ b/arch/arm64/boot/dts/google/lito-redbull-battery.dtsi
@@ -167,7 +167,7 @@
google,rl_soc-limits = <20 30 40 50 60 70 90 100>;
google,rl_soc-rates = <25 26 27 28 29 28 27 22>;
/* rest charging */
- google,chg-rest-rate = <8>;
+ google,chg-rest-rate = <15>;
google,chg-rest-soc = <80>;
/* battery history */
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index 67a5b4f..e2d6577 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _A6XX_REG_H
@@ -391,6 +392,8 @@
#define A6XX_RBBM_PERFCTR_RBBM_SEL_2 0x509
#define A6XX_RBBM_PERFCTR_RBBM_SEL_3 0x50A
#define A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED 0x50B
+#define A6XX_RBBM_PERFCTR_SRAM_INIT_CMD 0x50e
+#define A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS 0x50f
#define A6XX_RBBM_PERFCTR_MHUB_0_LO 0x512
#define A6XX_RBBM_PERFCTR_MHUB_0_HI 0x513
#define A6XX_RBBM_PERFCTR_MHUB_1_LO 0x514
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 3c4b2e3..454cf47 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2002,2007-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/input.h>
@@ -1581,6 +1582,8 @@
adreno_debugfs_init(adreno_dev);
adreno_profile_init(adreno_dev);
+ adreno_dev->perfcounter = false;
+
adreno_sysfs_init(adreno_dev);
kgsl_pwrscale_init(&pdev->dev, CONFIG_QCOM_ADRENO_DEFAULT_GOVERNOR);
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index cf16028..c20fa21 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2008-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __ADRENO_H
#define __ADRENO_H
@@ -15,6 +16,9 @@
#define DEVICE_3D_NAME "kgsl-3d"
#define DEVICE_3D0_NAME "kgsl-3d0"
+/* Index to preemption scratch buffer to store KMD postamble */
+#define KMD_POSTAMBLE_IDX 100
+
/* ADRENO_DEVICE - Given a kgsl_device return the adreno device struct */
#define ADRENO_DEVICE(device) \
container_of(device, struct adreno_device, dev)
@@ -240,6 +244,9 @@
/* Time to allow preemption to complete (in ms) */
#define ADRENO_PREEMPT_TIMEOUT 10000
+#define PREEMPT_SCRATCH_ADDR(dev, id) \
+ ((dev)->preempt.scratch.gpuaddr + (id * sizeof(u64)))
+
#define ADRENO_INT_BIT(a, _bit) (((a)->gpucore->gpudev->int_bits) ? \
(adreno_get_int(a, _bit) < 0 ? 0 : \
BIT(adreno_get_int(a, _bit))) : 0)
@@ -274,6 +281,7 @@
* skipsaverestore: To skip saverestore during L1 preemption (for 6XX)
* usesgmem: enable GMEM save/restore across preemption (for 6XX)
* count: Track the number of preemptions triggered
+ * @postamble_len: Number of dwords in KMD postamble pm4 packet
*/
struct adreno_preemption {
atomic_t state;
@@ -284,6 +292,7 @@
bool skipsaverestore;
bool usesgmem;
unsigned int count;
+ u32 postamble_len;
};
@@ -542,6 +551,11 @@
bool gpuhtw_llc_slice_enable;
unsigned int zap_loaded;
unsigned int soc_hw_rev;
+ /*
+ * @perfcounter: Flag to clear perfcounters across contexts and
+ * controls perfcounter ioctl read
+ */
+ bool perfcounter;
};
/**
diff --git a/drivers/gpu/msm/adreno_a6xx_preempt.c b/drivers/gpu/msm/adreno_a6xx_preempt.c
index 1d53237..a58f8b2 100644
--- a/drivers/gpu/msm/adreno_a6xx_preempt.c
+++ b/drivers/gpu/msm/adreno_a6xx_preempt.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "adreno.h"
@@ -545,13 +546,27 @@
if (context) {
struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
struct adreno_ringbuffer *rb = drawctxt->rb;
- uint64_t dest = adreno_dev->preempt.scratch.gpuaddr +
- sizeof(u64) * rb->id;
+ uint64_t dest = PREEMPT_SCRATCH_ADDR(adreno_dev, rb->id);
*cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 2);
cmds += cp_gpuaddr(adreno_dev, cmds, dest);
*cmds++ = lower_32_bits(gpuaddr);
*cmds++ = upper_32_bits(gpuaddr);
+
+ /*
+ * Add a KMD post amble to clear the perf counters during
+ * preemption
+ */
+ if (!adreno_dev->perfcounter) {
+ u64 kmd_postamble_addr =
+ PREEMPT_SCRATCH_ADDR(adreno_dev, KMD_POSTAMBLE_IDX);
+
+ *cmds++ = cp_type7_packet(CP_SET_AMBLE, 3);
+ *cmds++ = lower_32_bits(kmd_postamble_addr);
+ *cmds++ = upper_32_bits(kmd_postamble_addr);
+ *cmds++ = ((CP_KMD_AMBLE_TYPE << 20) | GENMASK(22, 20))
+ | (adreno_dev->preempt.postamble_len | GENMASK(19, 0));
+ }
}
return (unsigned int) (cmds - cmds_orig);
@@ -564,8 +579,7 @@
struct adreno_ringbuffer *rb = adreno_dev->cur_rb;
if (rb) {
- uint64_t dest = adreno_dev->preempt.scratch.gpuaddr +
- sizeof(u64) * rb->id;
+ uint64_t dest = PREEMPT_SCRATCH_ADDR(adreno_dev, rb->id);
*cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 2);
cmds += cp_gpuaddr(adreno_dev, cmds, dest);
@@ -773,6 +787,33 @@
goto err;
}
+ /*
+ * First 8 dwords of the preemption scratch buffer is used to store the
+ * address for CP to save/restore VPC data. Reserve 11 dwords in the
+ * preemption scratch buffer from index KMD_POSTAMBLE_IDX for KMD
+ * postamble pm4 packets
+ */
+ if (!adreno_dev->perfcounter) {
+ u32 *postamble = preempt->scratch.hostptr +
+ (KMD_POSTAMBLE_IDX * sizeof(u64));
+ u32 count = 0;
+
+ postamble[count++] = cp_type7_packet(CP_REG_RMW, 3);
+ postamble[count++] = A6XX_RBBM_PERFCTR_SRAM_INIT_CMD;
+ postamble[count++] = 0x0;
+ postamble[count++] = 0x1;
+
+ postamble[count++] = cp_type7_packet(CP_WAIT_REG_MEM, 6);
+ postamble[count++] = 0x3;
+ postamble[count++] = A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS;
+ postamble[count++] = 0x0;
+ postamble[count++] = 0x1;
+ postamble[count++] = 0x1;
+ postamble[count++] = 0x0;
+
+ preempt->postamble_len = count;
+ }
+
ret = a6xx_preemption_iommu_init(adreno_dev);
err:
diff --git a/drivers/gpu/msm/adreno_compat.c b/drivers/gpu/msm/adreno_compat.c
index e466de9..ba97383 100644
--- a/drivers/gpu/msm/adreno_compat.c
+++ b/drivers/gpu/msm/adreno_compat.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "adreno.h"
@@ -164,6 +165,14 @@
struct kgsl_perfcounter_read_compat *read32 = data;
struct kgsl_perfcounter_read read;
+ /*
+ * When performance counter zapping is enabled, the counters are cleared
+ * across context switches. Reading the counters when they are zapped is
+ * not permitted.
+ */
+ if (!adreno_dev->perfcounter)
+ return -EPERM;
+
read.reads = (struct kgsl_perfcounter_read_group __user *)
(uintptr_t)read32->reads;
read.count = read32->count;
diff --git a/drivers/gpu/msm/adreno_ioctl.c b/drivers/gpu/msm/adreno_ioctl.c
index ee0dc93..396975a 100644
--- a/drivers/gpu/msm/adreno_ioctl.c
+++ b/drivers/gpu/msm/adreno_ioctl.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2002,2007-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/slab.h>
@@ -133,6 +134,14 @@
struct adreno_device *adreno_dev = ADRENO_DEVICE(dev_priv->device);
struct kgsl_perfcounter_read *read = data;
+ /*
+ * When performance counter zapping is enabled, the counters are cleared
+ * across context switches. Reading the counters when they are zapped is
+ * not permitted.
+ */
+ if (!adreno_dev->perfcounter)
+ return -EPERM;
+
return (long) adreno_perfcounter_read_group(adreno_dev, read->reads,
read->count);
}
diff --git a/drivers/gpu/msm/adreno_iommu.c b/drivers/gpu/msm/adreno_iommu.c
index 38c7b12..6044892 100644
--- a/drivers/gpu/msm/adreno_iommu.c
+++ b/drivers/gpu/msm/adreno_iommu.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/slab.h>
#include "a3xx_reg.h"
+#include "a6xx_reg.h"
#include "adreno.h"
#include "adreno_iommu.h"
#include "adreno_pm4types.h"
@@ -261,6 +263,12 @@
cmds += _adreno_iommu_add_idle_cmds(adreno_dev, cmds);
cmds += cp_wait_for_me(adreno_dev, cmds);
+ /* Clear performance counters during contect switches */
+ if (!adreno_dev->perfcounter) {
+ *cmds++ = cp_type4_packet(A6XX_RBBM_PERFCTR_SRAM_INIT_CMD, 1);
+ *cmds++ = 0x1;
+ }
+
/* CP switches the pagetable and flushes the Caches */
*cmds++ = cp_packet(adreno_dev, CP_SMMU_TABLE_UPDATE, 4);
*cmds++ = lower_32_bits(ttbr0);
@@ -280,6 +288,17 @@
cmds += _adreno_iommu_add_idle_cmds(adreno_dev, cmds);
+ /* Wait for performance counter clear to finish */
+ if (!adreno_dev->perfcounter) {
+ *cmds++ = cp_type7_packet(CP_WAIT_REG_MEM, 6);
+ *cmds++ = 0x3;
+ *cmds++ = A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS;
+ *cmds++ = 0x0;
+ *cmds++ = 0x1;
+ *cmds++ = 0x1;
+ *cmds++ = 0x0;
+ }
+
return cmds - cmds_orig;
}
diff --git a/drivers/gpu/msm/adreno_perfcounter.c b/drivers/gpu/msm/adreno_perfcounter.c
index 026ba548..8d6304e 100644
--- a/drivers/gpu/msm/adreno_perfcounter.c
+++ b/drivers/gpu/msm/adreno_perfcounter.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2002,2007-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/slab.h>
@@ -129,7 +130,8 @@
if (adreno_is_a702(adreno_dev))
return;
- if (counters == NULL)
+ /* Do not save/restore if not requested */
+ if (counters == NULL || !adreno_dev->perfcounter)
return;
for (groupid = 0; groupid < counters->group_count; groupid++) {
@@ -165,7 +167,8 @@
if (adreno_is_a702(adreno_dev))
return;
- if (counters == NULL)
+ /* Do not save/restore if not requested */
+ if (counters == NULL || !adreno_dev->perfcounter)
return;
for (groupid = 0; groupid < counters->group_count; groupid++) {
diff --git a/drivers/gpu/msm/adreno_pm4types.h b/drivers/gpu/msm/adreno_pm4types.h
index 756112c..f70541c 100644
--- a/drivers/gpu/msm/adreno_pm4types.h
+++ b/drivers/gpu/msm/adreno_pm4types.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2002,2007-2018,2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __ADRENO_PM4TYPES_H
#define __ADRENO_PM4TYPES_H
@@ -47,6 +48,9 @@
/* switches SMMU pagetable, used on a5xx only */
#define CP_SMMU_TABLE_UPDATE 0x53
+/* Designate command streams to be executed before/after state restore */
+#define CP_SET_AMBLE 0x55
+
/* Set internal CP registers, used to indicate context save data addresses */
#define CP_SET_PSEUDO_REGISTER 0x56
@@ -154,6 +158,9 @@
#define CP_LOADSTATE_STATETYPE_SHIFT 0x00000000
#define CP_LOADSTATE_EXTSRCADDR_SHIFT 0x00000002
+/* Used to define amble type in SET_AMBLE packet to execute during preemption */
+#define CP_KMD_AMBLE_TYPE 3
+
static inline uint pm4_calc_odd_parity_bit(uint val)
{
return (0x9669 >> (0xf & ((val) ^
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index fb026a1..96266a8 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2002,2007-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/sched/clock.h>
@@ -565,7 +566,7 @@
if (gpudev->preemption_pre_ibsubmit &&
adreno_is_preemption_enabled(adreno_dev))
- total_sizedwords += 27;
+ total_sizedwords += 31;
if (gpudev->preemption_post_ibsubmit &&
adreno_is_preemption_enabled(adreno_dev))
diff --git a/drivers/gpu/msm/adreno_sysfs.c b/drivers/gpu/msm/adreno_sysfs.c
index 0e6f1f8..b701e40 100644
--- a/drivers/gpu/msm/adreno_sysfs.c
+++ b/drivers/gpu/msm/adreno_sysfs.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/sysfs.h>
@@ -323,6 +324,31 @@
return gmu_core_acd_set(device, val);
}
+static unsigned int _perfcounter_show(struct adreno_device *adreno_dev)
+{
+ return adreno_dev->perfcounter;
+}
+
+static int _perfcounter_store(struct adreno_device *adreno_dev,
+ unsigned int val)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+
+ if (adreno_dev->perfcounter == val)
+ return 0;
+
+ mutex_lock(&device->mutex);
+
+ /* Power down the GPU before changing the state */
+ kgsl_pwrctrl_change_state(device, KGSL_STATE_SUSPEND);
+ adreno_dev->perfcounter = val;
+ kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER);
+
+ mutex_unlock(&device->mutex);
+
+ return 0;
+}
+
static ssize_t _sysfs_store_u32(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -416,6 +442,7 @@
static ADRENO_SYSFS_BOOL(ifpc);
static ADRENO_SYSFS_RO_U32(ifpc_count);
static ADRENO_SYSFS_BOOL(acd);
+static ADRENO_SYSFS_BOOL(perfcounter);
static const struct attribute *_attr_list[] = {
@@ -439,6 +466,7 @@
&adreno_attr_ifpc_count.attr.attr,
&adreno_attr_preempt_count.attr.attr,
&adreno_attr_acd.attr.attr,
+ &adreno_attr_perfcounter.attr.attr,
NULL,
};
@@ -465,7 +493,14 @@
int adreno_sysfs_init(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ int ret;
- return sysfs_create_files(&device->dev->kobj, _attr_list);
+ ret = sysfs_create_files(&device->dev->kobj, _attr_list);
+
+ /* Notify userspace */
+ if (!ret)
+ kobject_uevent(&device->dev->kobj, KOBJ_ADD);
+
+ return ret;
}
diff --git a/drivers/nfc/st21nfc.c b/drivers/nfc/st21nfc.c
index ca51daf..ad29a54 100644
--- a/drivers/nfc/st21nfc.c
+++ b/drivers/nfc/st21nfc.c
@@ -107,6 +107,7 @@
wait_queue_head_t read_wq;
struct mutex read_mutex;
struct mutex pidle_mutex;
+ struct mutex polarity_mutex;
struct i2c_client *client;
struct miscdevice st21nfc_device;
uint8_t buffer[MAX_BUFFER_SIZE];
@@ -240,6 +241,7 @@
unsigned int irq_type;
int ret;
+ mutex_lock(&st21nfc_dev->polarity_mutex);
st21nfc_dev->polarity_mode = mode;
/* setup irq_flags */
switch (mode) {
@@ -260,6 +262,7 @@
ret = irq_set_irq_type(client->irq, irq_type);
if (ret) {
pr_err("%s : set_irq_type failed\n", __func__);
+ mutex_unlock(&st21nfc_dev->polarity_mutex);
return -ENODEV;
}
/* request irq. the irq is set whenever the chip has data available
@@ -273,10 +276,12 @@
client->name, st21nfc_dev);
if (ret) {
pr_err("%s : devm_request_irq failed\n", __func__);
+ mutex_unlock(&st21nfc_dev->polarity_mutex);
return -ENODEV;
}
st21nfc_dev->irq_is_attached = true;
st21nfc_disable_irq(st21nfc_dev);
+ mutex_unlock(&st21nfc_dev->polarity_mutex);
return ret;
}
@@ -938,6 +943,7 @@
/* init mutex and queues */
init_waitqueue_head(&st21nfc_dev->read_wq);
mutex_init(&st21nfc_dev->read_mutex);
+ mutex_init(&st21nfc_dev->polarity_mutex);
spin_lock_init(&st21nfc_dev->irq_enabled_lock);
pr_debug("%s : debug irq_gpio = %d, client-irq = %d\n",
__func__, desc_to_gpio(st21nfc_dev->gpiod_irq), client->irq);
@@ -969,6 +975,7 @@
err_misc_register:
mutex_destroy(&st21nfc_dev->read_mutex);
+ mutex_destroy(&st21nfc_dev->polarity_mutex);
err_sysfs_power_stats:
if (!IS_ERR(st21nfc_dev->gpiod_pidle)) {
sysfs_remove_group(&client->dev.kobj,
@@ -995,6 +1002,7 @@
}
sysfs_remove_group(&client->dev.kobj, &st21nfc_attr_grp);
mutex_destroy(&st21nfc_dev->read_mutex);
+ mutex_destroy(&st21nfc_dev->polarity_mutex);
acpi_dev_remove_driver_gpios(ACPI_COMPANION(&client->dev));
return 0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index ee164ba..b2612df 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -1257,6 +1257,7 @@
"flow_disable=%u\n"
"rx_page_drop_cnt=%u\n"
"zero_len_frag_pkt_cnt=%u\n",
+ "lower_order=%u\n",
ipa3_ctx->stats.tx_sw_pkts,
ipa3_ctx->stats.tx_hw_pkts,
ipa3_ctx->stats.tx_non_linear,
@@ -1274,7 +1275,8 @@
ipa3_ctx->stats.flow_enable,
ipa3_ctx->stats.flow_disable,
ipa3_ctx->stats.rx_page_drop_cnt,
- ipa3_ctx->stats.zero_len_frag_pkt_cnt);
+ ipa3_ctx->stats.zero_len_frag_pkt_cnt,
+ ipa3_ctx->stats.lower_order);
cnt += nbytes;
for (i = 0; i < IPAHAL_PKT_STATUS_EXCEPTION_MAX; i++) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 1c0333a..88c2315 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -1923,6 +1923,27 @@
}
}
+
+static struct page *ipa3_alloc_page(
+ gfp_t flag, u32 *page_order, bool try_lower)
+{
+ struct page *page = NULL;
+ u32 p_order = *page_order;
+
+ page = __dev_alloc_pages(flag, p_order);
+ /* We will only try 1 page order lower. */
+ if (unlikely(!page)) {
+ if (try_lower && p_order > 0) {
+ p_order = p_order - 1;
+ page = __dev_alloc_pages(flag, p_order);
+ if (likely(page))
+ ipa3_ctx->stats.lower_order++;
+ }
+ }
+ *page_order = p_order;
+ return page;
+}
+
static struct ipa3_rx_pkt_wrapper *ipa3_alloc_rx_pkt_page(
gfp_t flag, bool is_tmp_alloc)
{
@@ -1933,12 +1954,19 @@
flag);
if (unlikely(!rx_pkt))
return NULL;
- rx_pkt->len = PAGE_SIZE << IPA_WAN_PAGE_ORDER;
- rx_pkt->page_data.page = __dev_alloc_pages(flag,
- IPA_WAN_PAGE_ORDER);
+ rx_pkt->page_data.page_order = IPA_WAN_PAGE_ORDER;
+ if (is_tmp_alloc)
+ flag |= __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
+
+ /* Try a lower order page for order 3 pages in case allocation fails. */
+ rx_pkt->page_data.page = ipa3_alloc_page(flag,
+ &rx_pkt->page_data.page_order,
+ (is_tmp_alloc && rx_pkt->page_data.page_order == 3));
if (unlikely(!rx_pkt->page_data.page))
goto fail_page_alloc;
+ rx_pkt->len = PAGE_SIZE << rx_pkt->page_data.page_order;
+
rx_pkt->page_data.dma_addr = dma_map_page(ipa3_ctx->pdev,
rx_pkt->page_data.page, 0,
rx_pkt->len, DMA_FROM_DEVICE);
@@ -1956,7 +1984,7 @@
return rx_pkt;
fail_dma_mapping:
- __free_pages(rx_pkt->page_data.page, IPA_WAN_PAGE_ORDER);
+ __free_pages(rx_pkt->page_data.page, rx_pkt->page_data.page_order);
fail_page_alloc:
kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
return NULL;
@@ -2644,7 +2672,7 @@
dma_unmap_page(ipa3_ctx->pdev, rx_pkt->page_data.dma_addr,
rx_pkt->len, DMA_FROM_DEVICE);
__free_pages(rx_pkt->page_data.page,
- IPA_WAN_PAGE_ORDER);
+ rx_pkt->page_data.page_order);
kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
}
@@ -2696,7 +2724,7 @@
rx_pkt->len,
DMA_FROM_DEVICE);
__free_pages(rx_pkt->page_data.page,
- IPA_WAN_PAGE_ORDER);
+ rx_pkt->page_data.page_order);
}
kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache,
rx_pkt);
@@ -2716,7 +2744,7 @@
rx_pkt->len,
DMA_FROM_DEVICE);
__free_pages(rx_pkt->page_data.page,
- IPA_WAN_PAGE_ORDER);
+ rx_pkt->page_data.page_order);
kmem_cache_free(
ipa3_ctx->rx_pkt_wrapper_cache,
rx_pkt);
@@ -3517,7 +3545,7 @@
rx_page.dma_addr,
rx_pkt->len, DMA_FROM_DEVICE);
__free_pages(rx_pkt->page_data.page,
- IPA_WAN_PAGE_ORDER);
+ rx_pkt->page_data.page_order);
}
rx_pkt->sys->free_rx_wrapper(rx_pkt);
}
@@ -3542,7 +3570,7 @@
skb_shinfo(rx_skb)->nr_frags,
rx_page.page, 0,
notify->bytes_xfered,
- PAGE_SIZE << IPA_WAN_PAGE_ORDER);
+ PAGE_SIZE << rx_page.page_order);
}
} else {
return NULL;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index a6c270d..62abd7c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -1383,6 +1383,7 @@
u32 rx_page_drop_cnt;
u32 zero_len_frag_pkt_cnt;
struct ipa3_page_recycle_stats page_recycle_stats[2];
+ u64 lower_order;
};
/* offset for each stats */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index a29f8df..de70e41 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -1603,6 +1603,7 @@
return;
}
IPAWANERR("Exit from service arrive fun\n");
+ return;
}
if (rc != 0) {
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index e13915d..010cf7f 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -3382,8 +3382,13 @@
if (!edev || !mdwc)
return NOTIFY_DONE;
- if (!mdwc->usb_data_enabled)
+ if (!mdwc->usb_data_enabled) {
+ if (event)
+ dwc3_msm_gadget_vbus_draw(mdwc, 500);
+ else
+ dwc3_msm_gadget_vbus_draw(mdwc, 0);
return NOTIFY_DONE;
+ }
dwc = platform_get_drvdata(mdwc->dwc3);
@@ -4847,7 +4852,8 @@
mdwc->drd_state = DRD_STATE_PERIPHERAL;
work = 1;
} else {
- dwc3_msm_gadget_vbus_draw(mdwc, 0);
+ if (mdwc->usb_data_enabled)
+ dwc3_msm_gadget_vbus_draw(mdwc, 0);
dev_dbg(mdwc->dev, "Cable disconnected\n");
}
break;
diff --git a/include/linux/ipa.h b/include/linux/ipa.h
index 6e8cda5..8b70672 100644
--- a/include/linux/ipa.h
+++ b/include/linux/ipa.h
@@ -890,11 +890,13 @@
* @page: skb page
* @dma_addr: DMA address of this Rx packet
* @is_tmp_alloc: skb page from tmp_alloc or recycle_list
+ * @page_order: page order associated with the page.
*/
struct ipa_rx_page_data {
struct page *page;
dma_addr_t dma_addr;
bool is_tmp_alloc;
+ u32 page_order;
};