Merge "mm, vmscan: do not special-case slab reclaim when watermarks are boosted"
diff --git a/arch/arm64/configs/vendor/bengal-perf_defconfig b/arch/arm64/configs/vendor/bengal-perf_defconfig
index d06d4f6..dea94f3 100644
--- a/arch/arm64/configs/vendor/bengal-perf_defconfig
+++ b/arch/arm64/configs/vendor/bengal-perf_defconfig
@@ -174,6 +174,7 @@
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
CONFIG_NETFILTER_XT_MATCH_QUOTA=y
CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
CONFIG_NETFILTER_XT_MATCH_SOCKET=y
CONFIG_NETFILTER_XT_MATCH_STATE=y
@@ -389,6 +390,7 @@
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PM8XXX=y
CONFIG_DMADEVICES=y
CONFIG_QCOM_GPI_DMA=y
CONFIG_UIO=y
@@ -420,6 +422,7 @@
CONFIG_RPMSG_CHAR=y
CONFIG_RPMSG_QCOM_GLINK_RPM=y
CONFIG_RPMSG_QCOM_GLINK_SMEM=y
+CONFIG_MSM_RPM_SMD=y
CONFIG_QCOM_COMMAND_DB=y
CONFIG_QCOM_CPUSS_DUMP=y
CONFIG_QCOM_RUN_QUEUE_STATS=y
@@ -471,6 +474,7 @@
CONFIG_ANDROID_BINDER_IPC=y
CONFIG_QCOM_QFPROM=y
CONFIG_SLIMBUS=y
+CONFIG_QCOM_KGSL=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_F2FS_FS=y
diff --git a/arch/arm64/configs/vendor/bengal_defconfig b/arch/arm64/configs/vendor/bengal_defconfig
index 883bd05..6cd50a0 100644
--- a/arch/arm64/configs/vendor/bengal_defconfig
+++ b/arch/arm64/configs/vendor/bengal_defconfig
@@ -182,6 +182,7 @@
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
CONFIG_NETFILTER_XT_MATCH_QUOTA=y
CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
CONFIG_NETFILTER_XT_MATCH_SOCKET=y
CONFIG_NETFILTER_XT_MATCH_STATE=y
@@ -406,6 +407,7 @@
CONFIG_EDAC_CORTEX_ARM64_DBE_IRQ_ONLY=y
CONFIG_EDAC_CORTEX_ARM64_PANIC_ON_UE=y
CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PM8XXX=y
CONFIG_DMADEVICES=y
CONFIG_QCOM_GPI_DMA=y
CONFIG_QCOM_GPI_DMA_DEBUG=y
@@ -439,6 +441,7 @@
CONFIG_RPMSG_CHAR=y
CONFIG_RPMSG_QCOM_GLINK_RPM=y
CONFIG_RPMSG_QCOM_GLINK_SMEM=y
+CONFIG_MSM_RPM_SMD=y
CONFIG_QCOM_COMMAND_DB=y
CONFIG_QCOM_CPUSS_DUMP=y
CONFIG_QCOM_RUN_QUEUE_STATS=y
@@ -471,6 +474,7 @@
CONFIG_PANIC_ON_GLADIATOR_ERROR=y
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QCOM_BUS_SCALING=y
CONFIG_QCOM_GLINK=y
CONFIG_QCOM_GLINK_PKT=y
CONFIG_QCOM_SMP2P_SLEEPSTATE=y
diff --git a/arch/arm64/configs/vendor/kona-perf_defconfig b/arch/arm64/configs/vendor/kona-perf_defconfig
index f90869f..02c7d62 100644
--- a/arch/arm64/configs/vendor/kona-perf_defconfig
+++ b/arch/arm64/configs/vendor/kona-perf_defconfig
@@ -546,7 +546,6 @@
CONFIG_QCOM_COMMAND_DB=y
CONFIG_QCOM_MEM_OFFLINE=y
CONFIG_OVERRIDE_MEMORY_LIMIT=y
-CONFIG_QCOM_CPUSS_DUMP=y
CONFIG_QCOM_RUN_QUEUE_STATS=y
CONFIG_MSM_QBT_HANDLER=y
CONFIG_QCOM_IPCC=y
diff --git a/arch/arm64/configs/vendor/kona_defconfig b/arch/arm64/configs/vendor/kona_defconfig
index e71774a..df2724a 100644
--- a/arch/arm64/configs/vendor/kona_defconfig
+++ b/arch/arm64/configs/vendor/kona_defconfig
@@ -568,7 +568,6 @@
CONFIG_QCOM_MEM_OFFLINE=y
CONFIG_BUG_ON_HW_MEM_ONLINE_FAIL=y
CONFIG_OVERRIDE_MEMORY_LIMIT=y
-CONFIG_QCOM_CPUSS_DUMP=y
CONFIG_QCOM_RUN_QUEUE_STATS=y
CONFIG_MSM_QBT_HANDLER=y
CONFIG_QCOM_IPCC=y
diff --git a/arch/arm64/configs/vendor/lito-perf_defconfig b/arch/arm64/configs/vendor/lito-perf_defconfig
index 7f800a0..7837cd5c 100644
--- a/arch/arm64/configs/vendor/lito-perf_defconfig
+++ b/arch/arm64/configs/vendor/lito-perf_defconfig
@@ -70,6 +70,7 @@
CONFIG_CPU_IDLE=y
CONFIG_ARM_CPUIDLE=y
CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TIMES=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
diff --git a/arch/arm64/configs/vendor/lito_defconfig b/arch/arm64/configs/vendor/lito_defconfig
index 3b828e9..8ee524f 100644
--- a/arch/arm64/configs/vendor/lito_defconfig
+++ b/arch/arm64/configs/vendor/lito_defconfig
@@ -71,6 +71,7 @@
CONFIG_CPU_IDLE=y
CONFIG_ARM_CPUIDLE=y
CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TIMES=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
@@ -545,6 +546,7 @@
CONFIG_RPMSG_QCOM_GLINK_SPSS=y
CONFIG_QCOM_COMMAND_DB=y
CONFIG_QCOM_MEM_OFFLINE=y
+CONFIG_BUG_ON_HW_MEM_ONLINE_FAIL=y
CONFIG_OVERRIDE_MEMORY_LIMIT=y
CONFIG_QCOM_CPUSS_DUMP=y
CONFIG_QCOM_RUN_QUEUE_STATS=y
@@ -669,6 +671,7 @@
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_CONSOLE_UNHASHED_POINTERS=y
+CONFIG_DEBUG_MODULE_LOAD_INFO=y
CONFIG_DEBUG_INFO=y
CONFIG_PAGE_OWNER=y
CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
diff --git a/drivers/bus/mhi/core/mhi_init.c b/drivers/bus/mhi/core/mhi_init.c
index 16ac408..35a084b 100644
--- a/drivers/bus/mhi/core/mhi_init.c
+++ b/drivers/bus/mhi/core/mhi_init.c
@@ -1613,7 +1613,6 @@ static int mhi_driver_probe(struct device *dev)
struct mhi_event *mhi_event;
struct mhi_chan *ul_chan = mhi_dev->ul_chan;
struct mhi_chan *dl_chan = mhi_dev->dl_chan;
- bool auto_start = false;
int ret;
/* bring device out of lpm */
@@ -1632,7 +1631,11 @@ static int mhi_driver_probe(struct device *dev)
ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
mhi_dev->status_cb = mhi_drv->status_cb;
- auto_start = ul_chan->auto_start;
+ if (ul_chan->auto_start) {
+ ret = mhi_prepare_channel(mhi_cntrl, ul_chan);
+ if (ret)
+ goto exit_probe;
+ }
}
if (dl_chan) {
@@ -1656,15 +1659,22 @@ static int mhi_driver_probe(struct device *dev)
/* ul & dl uses same status cb */
mhi_dev->status_cb = mhi_drv->status_cb;
- auto_start = (auto_start || dl_chan->auto_start);
}
ret = mhi_drv->probe(mhi_dev, mhi_dev->id);
+ if (ret)
+ goto exit_probe;
- if (!ret && auto_start)
- mhi_prepare_for_transfer(mhi_dev);
+ if (dl_chan && dl_chan->auto_start)
+ mhi_prepare_channel(mhi_cntrl, dl_chan);
+
+ mhi_device_put(mhi_dev, MHI_VOTE_DEVICE);
+
+ return ret;
exit_probe:
+ mhi_unprepare_from_transfer(mhi_dev);
+
mhi_device_put(mhi_dev, MHI_VOTE_DEVICE);
return ret;
diff --git a/drivers/bus/mhi/core/mhi_internal.h b/drivers/bus/mhi/core/mhi_internal.h
index 69129b6..bb2379e 100644
--- a/drivers/bus/mhi/core/mhi_internal.h
+++ b/drivers/bus/mhi/core/mhi_internal.h
@@ -878,6 +878,8 @@ void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
int mhi_dtr_init(void);
void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
struct image_info *img_info);
+int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan);
/* isr handlers */
irqreturn_t mhi_msi_handlr(int irq_number, void *dev);
diff --git a/drivers/bus/mhi/core/mhi_main.c b/drivers/bus/mhi/core/mhi_main.c
index 8e1e2fd..cd77709 100644
--- a/drivers/bus/mhi/core/mhi_main.c
+++ b/drivers/bus/mhi/core/mhi_main.c
@@ -1569,7 +1569,8 @@ irqreturn_t mhi_intvec_threaded_handlr(int irq_number, void *dev)
state = mhi_get_mhi_state(mhi_cntrl);
ee = mhi_cntrl->ee;
mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
- MHI_LOG("device ee:%s dev_state:%s\n",
+ MHI_LOG("local ee: %s device ee:%s dev_state:%s\n",
+ TO_MHI_EXEC_STR(ee),
TO_MHI_EXEC_STR(mhi_cntrl->ee),
TO_MHI_STATE_STR(state));
}
@@ -1582,7 +1583,7 @@ irqreturn_t mhi_intvec_threaded_handlr(int irq_number, void *dev)
write_unlock_irq(&mhi_cntrl->pm_lock);
/* if device in rddm don't bother processing sys error */
- if (mhi_cntrl->ee == MHI_EE_RDDM) {
+ if (mhi_cntrl->ee == MHI_EE_RDDM && ee != MHI_EE_DISABLE_TRANSITION) {
if (mhi_cntrl->ee != ee) {
mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
MHI_CB_EE_RDDM);
@@ -1688,8 +1689,8 @@ int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
return 0;
}
-static int __mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
- struct mhi_chan *mhi_chan)
+int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
+ struct mhi_chan *mhi_chan)
{
int ret = 0;
@@ -2076,7 +2077,7 @@ int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
if (!mhi_chan)
continue;
- ret = __mhi_prepare_channel(mhi_cntrl, mhi_chan);
+ ret = mhi_prepare_channel(mhi_cntrl, mhi_chan);
if (ret) {
MHI_ERR("Error moving chan %s,%d to START state\n",
mhi_chan->name, mhi_chan->chan);
diff --git a/drivers/bus/mhi/devices/mhi_satellite.c b/drivers/bus/mhi/devices/mhi_satellite.c
index 9fff109..d9de31a 100644
--- a/drivers/bus/mhi/devices/mhi_satellite.c
+++ b/drivers/bus/mhi/devices/mhi_satellite.c
@@ -235,14 +235,15 @@ enum mhi_sat_state {
SAT_DISCONNECTED, /* rpmsg link is down */
SAT_FATAL_DETECT, /* device is down as fatal error was detected early */
SAT_ERROR, /* device is down after error or graceful shutdown */
- SAT_DISABLED, /* set if rpmsg link goes down after device is down */
+ SAT_DISABLED, /* no further processing: wait for device removal */
};
#define MHI_SAT_ACTIVE(cntrl) (cntrl->state == SAT_RUNNING)
-#define MHI_SAT_FATAL_DETECT(cntrl) (cntrl->state == SAT_FATAL_DETECT)
+#define MHI_SAT_IN_ERROR_STATE(cntrl) (cntrl->state >= SAT_FATAL_DETECT)
#define MHI_SAT_ALLOW_CONNECTION(cntrl) (cntrl->state == SAT_READY || \
cntrl->state == SAT_DISCONNECTED)
-#define MHI_SAT_IN_ERROR_STATE(cntrl) (cntrl->state >= SAT_FATAL_DETECT)
+#define MHI_SAT_ALLOW_SYS_ERR(cntrl) (cntrl->state == SAT_RUNNING || \
+ cntrl->state == SAT_FATAL_DETECT)
struct mhi_sat_cntrl {
struct list_head node;
@@ -940,10 +941,15 @@ static void mhi_sat_dev_status_cb(struct mhi_device *mhi_dev,
MHI_SAT_LOG("Device fatal error detected\n");
spin_lock_irqsave(&sat_cntrl->state_lock, flags);
- if (MHI_SAT_ACTIVE(sat_cntrl))
+ if (MHI_SAT_ACTIVE(sat_cntrl)) {
sat_cntrl->error_cookie = async_schedule(mhi_sat_error_worker,
sat_cntrl);
- sat_cntrl->state = SAT_FATAL_DETECT;
+ sat_cntrl->state = SAT_FATAL_DETECT;
+ } else {
+ /* rpmsg link down or HELLO not sent or an error occurred */
+ sat_cntrl->state = SAT_DISABLED;
+ }
+
spin_unlock_irqrestore(&sat_cntrl->state_lock, flags);
}
@@ -968,7 +974,7 @@ static void mhi_sat_dev_remove(struct mhi_device *mhi_dev)
/* send sys_err if first device is removed */
spin_lock_irq(&sat_cntrl->state_lock);
- if (MHI_SAT_ACTIVE(sat_cntrl) || MHI_SAT_FATAL_DETECT(sat_cntrl))
+ if (MHI_SAT_ALLOW_SYS_ERR(sat_cntrl))
send_sys_err = true;
sat_cntrl->state = SAT_ERROR;
spin_unlock_irq(&sat_cntrl->state_lock);
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index c13f049..0397f5b 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -263,6 +263,7 @@ struct smq_invoke_ctx {
struct fastrpc_buf *lbuf;
size_t used;
struct fastrpc_file *fl;
+ uint32_t handle;
uint32_t sc;
struct overlap *overs;
struct overlap **overps;
@@ -365,6 +366,8 @@ struct fastrpc_apps {
bool legacy_remote_heap;
/* Unique job id for each message */
uint64_t jobid[NUM_CHANNELS];
+ struct wakeup_source *wake_source;
+ unsigned int wake_count;
};
struct fastrpc_mmap {
@@ -446,6 +449,8 @@ struct fastrpc_file {
/* Identifies the device (MINOR_NUM_DEV / MINOR_NUM_SECURE_DEV) */
int dev_minor;
char *debug_buf;
+ /* Flag to enable PM wake/relax voting for every remote invoke */
+ int wake_enable;
};
static struct fastrpc_apps gfa;
@@ -1319,6 +1324,7 @@ static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
goto bail;
}
ctx->crc = (uint32_t *)invokefd->crc;
+ ctx->handle = invoke->handle;
ctx->sc = invoke->sc;
if (bufs) {
VERIFY(err, 0 == context_build_overlap(ctx));
@@ -2025,7 +2031,36 @@ static void fastrpc_init(struct fastrpc_apps *me)
me->channel[CDSP_DOMAIN_ID].secure = NON_SECURE_CHANNEL;
}
-static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
+static inline void fastrpc_pm_awake(int fl_wake_enable, int *wake_enable)
+{
+ struct fastrpc_apps *me = &gfa;
+
+ if (!fl_wake_enable)
+ return;
+
+ spin_lock(&me->hlock);
+ if (!me->wake_count)
+ __pm_stay_awake(me->wake_source);
+ me->wake_count++;
+ spin_unlock(&me->hlock);
+ *wake_enable = 1;
+}
+
+static inline void fastrpc_pm_relax(int *wake_enable)
+{
+ struct fastrpc_apps *me = &gfa;
+
+ if (!(*wake_enable))
+ return;
+
+ spin_lock(&me->hlock);
+ if (me->wake_count)
+ me->wake_count--;
+ if (!me->wake_count)
+ __pm_relax(me->wake_source);
+ spin_unlock(&me->hlock);
+ *wake_enable = 0;
+}
static inline int fastrpc_wait_for_response(struct smq_invoke_ctx *ctx,
uint32_t kernel)
@@ -2083,8 +2118,8 @@ static void fastrpc_wait_for_completion(struct smq_invoke_ctx *ctx,
if (!err) {
ctx->isWorkDone = true;
} else if (!ctx->isWorkDone) {
- pr_info("poll timeout ctxid 0x%llx sc 0x%x\n",
- ctx->ctxid, ctx->sc);
+ pr_info("adsprpc: %s: %s: poll timeout for handle 0x%x, sc 0x%x\n",
+ __func__, current->comm, ctx->handle, ctx->sc);
interrupted = fastrpc_wait_for_response(ctx,
kernel);
*pInterrupted = interrupted;
@@ -2102,8 +2137,9 @@ static void fastrpc_wait_for_completion(struct smq_invoke_ctx *ctx,
break;
default:
- pr_err("adsprpc: unsupported response flags 0x%x\n",
- ctx->rspFlags);
+ *pInterrupted = -EBADR;
+ pr_err("Error: adsprpc: %s: unsupported response flags 0x%x for handle 0x%x, sc 0x%x\n",
+ current->comm, ctx->rspFlags, ctx->handle, ctx->sc);
return;
} /* end of switch */
} /* end of while loop */
@@ -2135,10 +2171,11 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
struct fastrpc_ioctl_invoke *invoke = &inv->inv;
int cid = fl->cid;
int interrupted = 0;
- int err = 0;
+ int err = 0, wake_enable = 0;
struct timespec invoket = {0};
int64_t *perf_counter = NULL;
+ fastrpc_pm_awake(fl->wake_enable, &wake_enable);
if (fl->profile) {
perf_counter = getperfcounter(fl, PERF_COUNT);
getnstimeofday(&invoket);
@@ -2158,7 +2195,7 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS && fl->sctx != NULL);
if (err) {
- pr_err("adsprpc: ERROR: %s: user application %s domain is not set\n",
+ pr_err("adsprpc: ERROR: %s: kernel session not initialized yet for %s\n",
__func__, current->comm);
err = -EBADR;
goto bail;
@@ -2204,15 +2241,17 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
if (err)
goto bail;
wait:
+ fastrpc_pm_relax(&wake_enable);
fastrpc_wait_for_completion(ctx, &interrupted, kernel);
+ fastrpc_pm_awake(fl->wake_enable, &wake_enable);
VERIFY(err, 0 == (err = interrupted));
if (err)
goto bail;
- VERIFY(err, ctx->isWorkDone);
- if (err) {
- pr_err("adsprpc: ctx work done failed, sc 0x%x handle 0x%x\n",
- ctx->sc, invoke->handle);
+ if (!ctx->isWorkDone) {
+ err = -EPROTO;
+ pr_err("Error: adsprpc: %s: %s: WorkDone state is invalid for handle 0x%x, sc 0x%x\n",
+ __func__, current->comm, invoke->handle, ctx->sc);
goto bail;
}
@@ -2240,6 +2279,7 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
if (fl->profile && !interrupted)
fastrpc_update_invoke_count(invoke->handle, perf_counter,
&invoket);
+ fastrpc_pm_relax(&wake_enable);
return err;
}
@@ -2589,10 +2629,11 @@ static int fastrpc_send_cpuinfo_to_dsp(struct fastrpc_file *fl)
}
static int fastrpc_get_info_from_dsp(struct fastrpc_file *fl,
- uint32_t *dsp_attr, uint32_t dsp_attr_size,
+ uint32_t *dsp_attr_buf,
+ uint32_t dsp_attr_buf_len,
uint32_t domain)
{
- int err = 0, dsp_cap_buff_size, dsp_support = 0;
+ int err = 0, dsp_support = 0;
struct fastrpc_ioctl_invoke_crc ioctl;
remote_arg_t ra[2];
struct kstat sb;
@@ -2618,7 +2659,7 @@ static int fastrpc_get_info_from_dsp(struct fastrpc_file *fl,
dsp_support = 0;
break;
}
- dsp_attr[0] = dsp_support;
+ dsp_attr_buf[0] = dsp_support;
if (dsp_support == 0)
goto bail;
@@ -2627,11 +2668,10 @@ static int fastrpc_get_info_from_dsp(struct fastrpc_file *fl,
if (err)
goto bail;
- dsp_cap_buff_size = dsp_attr_size - sizeof(uint32_t);
- ra[0].buf.pv = (void *)&dsp_cap_buff_size;
- ra[0].buf.len = sizeof(dsp_cap_buff_size);
- ra[1].buf.pv = (void *)(&dsp_attr[1]);
- ra[1].buf.len = dsp_cap_buff_size * sizeof(uint32_t);
+ ra[0].buf.pv = (void *)&dsp_attr_buf_len;
+ ra[0].buf.len = sizeof(dsp_attr_buf_len);
+ ra[1].buf.pv = (void *)(&dsp_attr_buf[1]);
+ ra[1].buf.len = dsp_attr_buf_len * sizeof(uint32_t);
ioctl.inv.handle = FASTRPC_STATIC_HANDLE_DSP_UTILITIES;
ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 1);
ioctl.inv.pra = ra;
@@ -2663,7 +2703,7 @@ static int fastrpc_get_info_from_kernel(
* and cache on kernel
*/
err = fastrpc_get_info_from_dsp(fl, dsp_cap->dsp_attributes,
- sizeof(dsp_cap->dsp_attributes),
+ FASTRPC_MAX_DSP_ATTRIBUTES - 1,
domain);
if (err)
goto bail;
@@ -3204,9 +3244,12 @@ static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
break;
}
}
- VERIFY(err, idx < chan->sesscount);
- if (err)
+ if (idx >= chan->sesscount) {
+ err = -EUSERS;
+ pr_err("adsprpc: ERROR %d: %s: max concurrent sessions limit (%d) already reached on %s\n",
+ err, __func__, chan->sesscount, chan->subsys);
goto bail;
+ }
chan->session[idx].smmu.faults = 0;
} else {
VERIFY(err, me->dev != NULL);
@@ -3710,7 +3753,7 @@ static int fastrpc_channel_open(struct fastrpc_file *fl)
VERIFY(err, fl && fl->sctx && fl->cid >= 0 && fl->cid < NUM_CHANNELS);
if (err) {
- pr_err("adsprpc: ERROR: %s: user application %s domain is not set\n",
+ pr_err("adsprpc: ERROR: %s: kernel session not initialized yet for %s\n",
__func__, current->comm);
err = -EBADR;
return err;
@@ -3852,8 +3895,8 @@ static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
fl->cid = cid;
fl->ssrcount = fl->apps->channel[cid].ssrcount;
mutex_lock(&fl->apps->channel[cid].smd_mutex);
- VERIFY(err, 0 == (err = fastrpc_session_alloc_locked(
- &fl->apps->channel[cid], 0, &fl->sctx)));
+ err = fastrpc_session_alloc_locked(&fl->apps->channel[cid],
+ 0, &fl->sctx);
mutex_unlock(&fl->apps->channel[cid].smd_mutex);
if (err)
goto bail;
@@ -3899,8 +3942,11 @@ static int fastrpc_internal_control(struct fastrpc_file *fl,
case FASTRPC_CONTROL_KALLOC:
cp->kalloc.kalloc_support = 1;
break;
+ case FASTRPC_CONTROL_WAKELOCK:
+ fl->wake_enable = cp->wp.enable;
+ break;
default:
- err = -ENOTTY;
+ err = -EBADRQC;
break;
}
bail:
@@ -4014,6 +4060,7 @@ static int fastrpc_control(struct fastrpc_ioctl_control *cp,
bail:
return err;
}
+
static int fastrpc_get_dsp_info(struct fastrpc_ioctl_dsp_capabilities *dsp_cap,
void *param, struct fastrpc_file *fl)
{
@@ -4789,11 +4836,19 @@ static int __init fastrpc_device_init(void)
err = register_rpmsg_driver(&fastrpc_rpmsg_client);
if (err) {
- pr_err("adsprpc: register_rpmsg_driver: failed with err %d\n",
- err);
+ pr_err("adsprpc: %s: register_rpmsg_driver failed with err %d\n",
+ __func__, err);
goto device_create_bail;
}
me->rpmsg_register = 1;
+
+ me->wake_source = wakeup_source_register("adsprpc");
+ VERIFY(err, !IS_ERR_OR_NULL(me->wake_source));
+ if (err) {
+ pr_err("adsprpc: Error: %s: wakeup_source_register failed with err %d\n",
+ __func__, PTR_ERR(me->wake_source));
+ goto device_create_bail;
+ }
return 0;
device_create_bail:
for (i = 0; i < NUM_CHANNELS; i++) {
@@ -4844,6 +4899,8 @@ static void __exit fastrpc_device_exit(void)
unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
if (me->rpmsg_register == 1)
unregister_rpmsg_driver(&fastrpc_rpmsg_client);
+ if (me->wake_source)
+ wakeup_source_unregister(me->wake_source);
debugfs_remove_recursive(debugfs_root);
}
diff --git a/drivers/char/adsprpc_shared.h b/drivers/char/adsprpc_shared.h
index 571f585..c1e5af9 100644
--- a/drivers/char/adsprpc_shared.h
+++ b/drivers/char/adsprpc_shared.h
@@ -234,23 +234,32 @@ struct fastrpc_ioctl_perf { /* kernel performance data */
uintptr_t keys;
};
-#define FASTRPC_CONTROL_LATENCY (1)
+enum fastrpc_control_type {
+ FASTRPC_CONTROL_LATENCY = 1,
+ FASTRPC_CONTROL_SMMU = 2,
+ FASTRPC_CONTROL_KALLOC = 3,
+ FASTRPC_CONTROL_WAKELOCK = 4,
+};
+
struct fastrpc_ctrl_latency {
uint32_t enable; /* latency control enable */
uint32_t latency; /* latency request in us */
};
-#define FASTRPC_CONTROL_KALLOC (3)
struct fastrpc_ctrl_kalloc {
uint32_t kalloc_support; /* Remote memory allocation from kernel */
};
-/* FASTRPC_CONTROL value 2 is reserved in user space */
+struct fastrpc_ctrl_wakelock {
+ uint32_t enable; /* wakelock control enable */
+};
+
struct fastrpc_ioctl_control {
uint32_t req;
union {
struct fastrpc_ctrl_latency lp;
struct fastrpc_ctrl_kalloc kalloc;
+ struct fastrpc_ctrl_wakelock wp;
};
};
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 950d04c..9cfe1c1 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1044,6 +1044,7 @@ static void diag_init_apps_feature(void)
SET_APPS_FEATURE(driver, F_DIAG_EVENT_REPORT);
SET_APPS_FEATURE(driver, F_DIAG_HW_ACCELERATION);
+ SET_APPS_FEATURE(driver, F_DIAG_MULTI_SIM_MASK);
}
void diag_send_error_rsp(unsigned char *buf, int len,
diff --git a/drivers/clk/qcom/camcc-lito.c b/drivers/clk/qcom/camcc-lito.c
index 20c3295..e7900c8 100644
--- a/drivers/clk/qcom/camcc-lito.c
+++ b/drivers/clk/qcom/camcc-lito.c
@@ -324,7 +324,7 @@ static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = {
},
};
-static const struct alpha_pll_config cam_cc_pll2_config = {
+static struct alpha_pll_config cam_cc_pll2_config = {
.l = 0x32,
.cal_l = 0x32,
.alpha = 0x0,
@@ -2319,6 +2319,7 @@ static const struct qcom_cc_desc cam_cc_lito_desc = {
static const struct of_device_id cam_cc_lito_match_table[] = {
{ .compatible = "qcom,lito-camcc" },
+ { .compatible = "qcom,lito-camcc-v2" },
{ }
};
MODULE_DEVICE_TABLE(of, cam_cc_lito_match_table);
@@ -2361,6 +2362,10 @@ static int cam_cc_lito_probe(struct platform_device *pdev)
clk_lucid_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
clk_lucid_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
+
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,lito-camcc-v2"))
+ cam_cc_pll2_config.test_ctl_val = 0x00000000;
+
clk_zonda_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
clk_lucid_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
clk_lucid_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
diff --git a/drivers/clk/qcom/clk-voter.c b/drivers/clk/qcom/clk-voter.c
index 1ffc5e5..b8f585f 100644
--- a/drivers/clk/qcom/clk-voter.c
+++ b/drivers/clk/qcom/clk-voter.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, 2019, The Linux Foundation. All rights reserved.
*/
#include <linux/clk.h>
@@ -126,6 +126,16 @@ static unsigned long voter_clk_recalc_rate(struct clk_hw *hw,
return v->rate;
}
+int voter_clk_handoff(struct clk_hw *hw)
+{
+ struct clk_voter *v = to_clk_voter(hw);
+
+ v->enabled = true;
+
+ return 0;
+}
+EXPORT_SYMBOL(voter_clk_handoff);
+
const struct clk_ops clk_ops_voter = {
.prepare = voter_clk_prepare,
.unprepare = voter_clk_unprepare,
diff --git a/drivers/clk/qcom/clk-voter.h b/drivers/clk/qcom/clk-voter.h
index b9a36f6..57333bb 100644
--- a/drivers/clk/qcom/clk-voter.h
+++ b/drivers/clk/qcom/clk-voter.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, 2019, The Linux Foundation. All rights reserved.
*/
#ifndef __QCOM_CLK_VOTER_H__
@@ -27,6 +27,7 @@ extern const struct clk_ops clk_ops_voter;
.hw.init = &(struct clk_init_data){ \
.ops = &clk_ops_voter, \
.name = #clk_name, \
+ .flags = CLK_ENABLE_HAND_OFF, \
.parent_names = (const char *[]){ #_parent_name }, \
.num_parents = 1, \
}, \
@@ -38,4 +39,6 @@ extern const struct clk_ops clk_ops_voter;
#define DEFINE_CLK_BRANCH_VOTER(clk_name, _parent_name) \
__DEFINE_CLK_VOTER(clk_name, _parent_name, 1000, 1)
+int voter_clk_handoff(struct clk_hw *hw);
+
#endif
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index a9bcfbb..d00c6f5 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -3227,12 +3227,17 @@ static void _aead_aes_fb_stage1_ahash_complete(
unsigned char *tmp;
tmp = kmalloc(ctx->authsize, GFP_KERNEL);
+ if (!tmp) {
+ err = -ENOMEM;
+ goto ret;
+ }
scatterwalk_map_and_copy(tmp, rctx->fb_aes_src,
req->cryptlen - ctx->authsize, ctx->authsize, 0);
if (memcmp(rctx->fb_ahash_digest, tmp, ctx->authsize) != 0)
err = -EBADMSG;
kfree(tmp);
}
+ret:
if (err)
_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
else {
@@ -3359,6 +3364,10 @@ static int _qcrypto_aead_aes_192_fallback(struct aead_request *req,
unsigned char *tmp;
tmp = kmalloc(ctx->authsize, GFP_KERNEL);
+ if (!tmp) {
+ rc = -ENOMEM;
+ goto ret;
+ }
/* compare icv */
scatterwalk_map_and_copy(tmp,
src, req->cryptlen - ctx->authsize,
diff --git a/drivers/devfreq/governor_gpubw_mon.c b/drivers/devfreq/governor_gpubw_mon.c
index c07f6a1..f305c75 100644
--- a/drivers/devfreq/governor_gpubw_mon.c
+++ b/drivers/devfreq/governor_gpubw_mon.c
@@ -100,10 +100,11 @@ static int devfreq_gpubw_get_target(struct devfreq *df,
/*
* If there's a new high watermark, update the cutoffs and send the
- * FAST hint. Otherwise check the current value against the current
+ * FAST hint, provided that we are using a floating watermark.
+ * Otherwise check the current value against the current
* cutoffs.
*/
- if (norm_max_cycles > priv->bus.max) {
+ if (norm_max_cycles > priv->bus.max && priv->bus.floating) {
_update_cutoff(priv, norm_max_cycles);
bus_profile->flag = DEVFREQ_FLAG_FAST_HINT;
} else {
@@ -224,10 +225,11 @@ static int devfreq_gpubw_event_handler(struct devfreq *devfreq,
case DEVFREQ_GOV_SUSPEND:
{
struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
-
- priv->bus.total_time = 0;
- priv->bus.gpu_time = 0;
- priv->bus.ram_time = 0;
+ if (priv) {
+ priv->bus.total_time = 0;
+ priv->bus.gpu_time = 0;
+ priv->bus.ram_time = 0;
+ }
}
break;
default:
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index 75952a0..56ce891 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -1224,7 +1224,8 @@ static const struct adreno_a6xx_core adreno_gpu_core_a650v2 = {
DEFINE_ADRENO_REV(ADRENO_REV_A650, 6, 5, 0, ANY_ID),
.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU |
ADRENO_IOCOHERENT | ADRENO_CONTENT_PROTECTION |
- ADRENO_IFPC | ADRENO_PREEMPTION | ADRENO_ACD,
+ ADRENO_IFPC | ADRENO_PREEMPTION | ADRENO_ACD |
+ ADRENO_LM,
.gpudev = &adreno_a6xx_gpudev,
.gmem_base = 0,
.gmem_size = SZ_1M + SZ_128K, /* verified 1152kB */
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index aa946dc..1230d2a 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -34,6 +34,7 @@ static unsigned int counter_delta(struct kgsl_device *device,
static struct devfreq_msm_adreno_tz_data adreno_tz_data = {
.bus = {
.max = 350,
+ .floating = true,
},
.device_id = KGSL_DEVICE_3D0,
};
@@ -877,32 +878,6 @@ static void adreno_of_get_ca_aware_properties(struct adreno_device *adreno_dev,
}
}
-static int _of_property_read_ddrtype(struct device_node *node, const char *base,
- u32 *ptr)
-{
- char str[32];
- int ddr = of_fdt_get_ddrtype();
-
- /* of_fdt_get_ddrtype returns error if the DDR type isn't determined */
- if (ddr >= 0) {
- int ret;
-
- /* Construct expanded string for the DDR type */
- ret = snprintf(str, sizeof(str), "%s-ddr%d", base, ddr);
-
- /* WARN_ON() if the array size was too small for the string */
- if (WARN_ON(ret > sizeof(str)))
- return -ENOMEM;
-
- /* Read the expanded string */
- if (!of_property_read_u32(node, str, ptr))
- return 0;
- }
-
- /* Read the default string */
- return of_property_read_u32(node, base, ptr);
-}
-
static int adreno_of_parse_pwrlevels(struct adreno_device *adreno_dev,
struct device_node *node)
{
@@ -950,7 +925,7 @@ static int adreno_of_parse_pwrlevels(struct adreno_device *adreno_dev,
of_property_read_u32(child, "qcom,acd-level",
&level->acd_level);
- ret = _of_property_read_ddrtype(child,
+ ret = kgsl_of_property_read_ddrtype(child,
"qcom,bus-freq", &level->bus_freq);
if (ret) {
dev_err(device->dev,
@@ -960,11 +935,11 @@ static int adreno_of_parse_pwrlevels(struct adreno_device *adreno_dev,
}
level->bus_min = level->bus_freq;
- _of_property_read_ddrtype(child,
+ kgsl_of_property_read_ddrtype(child,
"qcom,bus-min", &level->bus_min);
level->bus_max = level->bus_freq;
- _of_property_read_ddrtype(child,
+ kgsl_of_property_read_ddrtype(child,
"qcom,bus-max", &level->bus_max);
}
diff --git a/drivers/gpu/msm/adreno_a6xx_gmu.c b/drivers/gpu/msm/adreno_a6xx_gmu.c
index 7495487..b6c699f 100644
--- a/drivers/gpu/msm/adreno_a6xx_gmu.c
+++ b/drivers/gpu/msm/adreno_a6xx_gmu.c
@@ -346,7 +346,7 @@ static int a6xx_gmu_start(struct kgsl_device *device)
u32 mask = 0x000001FF;
/* Check for 0xBABEFACE on legacy targets */
- if (gmu->ver.core <= 0x20010003) {
+ if (gmu->ver.core <= 0x20010004) {
val = 0xBABEFACE;
mask = 0xFFFFFFFF;
}
@@ -1156,31 +1156,11 @@ static int a6xx_gmu_load_firmware(struct kgsl_device *device)
#define A6XX_VBIF_XIN_HALT_CTRL1_ACKS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
-static void a6xx_isense_disable(struct kgsl_device *device)
-{
- unsigned int val;
- const struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-
- if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM) ||
- !test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
- return;
-
- gmu_core_regread(device, A6XX_GPU_CS_ENABLE_REG, &val);
- if (val) {
- gmu_core_regwrite(device, A6XX_GPU_CS_ENABLE_REG, 0);
- gmu_core_regwrite(device, A6XX_GMU_ISENSE_CTRL, 0);
- }
-}
-
static int a6xx_gmu_suspend(struct kgsl_device *device)
{
int ret = 0;
struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
- /* do it only if LM feature is enabled */
- /* Disable ISENSE if it's on */
- a6xx_isense_disable(device);
-
/* If SPTP_RAC is on, turn off SPTP_RAC HS */
a6xx_gmu_sptprac_disable(ADRENO_DEVICE(device));
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 2257294..41047a0 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -3624,12 +3624,16 @@ long kgsl_ioctl_sparse_phys_alloc(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
struct kgsl_process_private *process = dev_priv->process_priv;
+ struct kgsl_device *device = dev_priv->device;
struct kgsl_sparse_phys_alloc *param = data;
struct kgsl_mem_entry *entry;
uint64_t flags;
int ret;
int id;
+ if (!(device->flags & KGSL_FLAG_SPARSE))
+ return -ENOTSUPP;
+
ret = _sparse_alloc_param_sanity_check(param->size, param->pagesize);
if (ret)
return ret;
@@ -3713,9 +3717,13 @@ long kgsl_ioctl_sparse_phys_free(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
struct kgsl_process_private *process = dev_priv->process_priv;
+ struct kgsl_device *device = dev_priv->device;
struct kgsl_sparse_phys_free *param = data;
struct kgsl_mem_entry *entry;
+ if (!(device->flags & KGSL_FLAG_SPARSE))
+ return -ENOTSUPP;
+
entry = kgsl_sharedmem_find_id_flags(process, param->id,
KGSL_MEMFLAGS_SPARSE_PHYS);
if (entry == NULL)
@@ -3745,10 +3753,14 @@ long kgsl_ioctl_sparse_virt_alloc(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
struct kgsl_process_private *private = dev_priv->process_priv;
+ struct kgsl_device *device = dev_priv->device;
struct kgsl_sparse_virt_alloc *param = data;
struct kgsl_mem_entry *entry;
int ret;
+ if (!(device->flags & KGSL_FLAG_SPARSE))
+ return -ENOTSUPP;
+
ret = _sparse_alloc_param_sanity_check(param->size, param->pagesize);
if (ret)
return ret;
@@ -3789,9 +3801,13 @@ long kgsl_ioctl_sparse_virt_free(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
struct kgsl_process_private *process = dev_priv->process_priv;
+ struct kgsl_device *device = dev_priv->device;
struct kgsl_sparse_virt_free *param = data;
struct kgsl_mem_entry *entry = NULL;
+ if (!(device->flags & KGSL_FLAG_SPARSE))
+ return -ENOTSUPP;
+
entry = kgsl_sharedmem_find_id_flags(process, param->id,
KGSL_MEMFLAGS_SPARSE_VIRT);
if (entry == NULL)
@@ -4138,6 +4154,7 @@ long kgsl_ioctl_sparse_bind(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
struct kgsl_process_private *private = dev_priv->process_priv;
+ struct kgsl_device *device = dev_priv->device;
struct kgsl_sparse_bind *param = data;
struct kgsl_sparse_binding_object obj;
struct kgsl_mem_entry *virt_entry;
@@ -4146,6 +4163,9 @@ long kgsl_ioctl_sparse_bind(struct kgsl_device_private *dev_priv,
int ret = 0;
int i = 0;
+ if (!(device->flags & KGSL_FLAG_SPARSE))
+ return -ENOTSUPP;
+
ptr = (void __user *) (uintptr_t) param->list;
if (param->size > sizeof(struct kgsl_sparse_binding_object) ||
@@ -4201,6 +4221,9 @@ long kgsl_ioctl_gpu_sparse_command(struct kgsl_device_private *dev_priv,
long result;
unsigned int i = 0;
+ if (!(device->flags & KGSL_FLAG_SPARSE))
+ return -ENOTSUPP;
+
/* Make sure sparse and syncpoint count isn't too big */
if (param->numsparse > KGSL_MAX_SPARSE ||
param->numsyncs > KGSL_MAX_SYNCPOINTS)
@@ -4976,6 +4999,32 @@ int kgsl_request_irq(struct platform_device *pdev, const char *name,
return ret ? ret : num;
}
+int kgsl_of_property_read_ddrtype(struct device_node *node, const char *base,
+ u32 *ptr)
+{
+ char str[32];
+ int ddr = of_fdt_get_ddrtype();
+
+ /* of_fdt_get_ddrtype returns error if the DDR type isn't determined */
+ if (ddr >= 0) {
+ int ret;
+
+ /* Construct expanded string for the DDR type */
+ ret = snprintf(str, sizeof(str), "%s-ddr%d", base, ddr);
+
+ /* WARN_ON() if the array size was too small for the string */
+ if (WARN_ON(ret > sizeof(str)))
+ return -ENOMEM;
+
+ /* Read the expanded string */
+ if (!of_property_read_u32(node, str, ptr))
+ return 0;
+ }
+
+ /* Read the default string */
+ return of_property_read_u32(node, base, ptr);
+}
+
int kgsl_device_platform_probe(struct kgsl_device *device)
{
int status = -EINVAL;
@@ -4985,6 +5034,9 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
if (status)
return status;
+ /* Disable the sparse ioctl invocation as they are not used */
+ device->flags &= ~KGSL_FLAG_SPARSE;
+
kgsl_device_debugfs_init(device);
status = kgsl_pwrctrl_init(device);
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 1a86ca5..8813993 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -50,6 +50,7 @@ enum kgsl_event_results {
};
#define KGSL_FLAG_WAKE_ON_TOUCH BIT(0)
+#define KGSL_FLAG_SPARSE BIT(1)
/*
* "list" of event types for ftrace symbolic magic
@@ -919,6 +920,22 @@ void kgsl_snapshot_add_section(struct kgsl_device *device, u16 id,
void *priv);
/**
+ * kgsl_of_property_read_ddrtype - Get property from devicetree based on
+ * the type of DDR.
+ * @node: Devicetree node
+ * @base: prefix string of the property
+ * @ptr: Pointer to store the value of the property
+ *
+ * First look up the devicetree property based on the prefix string and DDR
+ * type. If property is not specified per DDR type, then look for the property
+ * based on prefix string only.
+ *
+ * Return: 0 on success or error code on failure.
+ */
+int kgsl_of_property_read_ddrtype(struct device_node *node, const char *base,
+ u32 *ptr);
+
+/**
* kgsl_query_property_list - Get a list of valid properties
* @device: A KGSL device handle
* @list: Pointer to a list of u32s
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index bc6ff13..994ddb8 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -891,8 +891,9 @@ static void send_nmi_to_gmu(struct adreno_device *adreno_dev)
/* Make sure the interrupt is masked before causing it */
wmb();
- adreno_write_gmureg(adreno_dev,
- ADRENO_REG_GMU_NMI_CONTROL_STATUS, 0);
+ if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
+ adreno_write_gmureg(adreno_dev,
+ ADRENO_REG_GMU_NMI_CONTROL_STATUS, 0);
adreno_write_gmureg(adreno_dev,
ADRENO_REG_GMU_CM3_CFG,
(1 << GMU_CM3_CFG_NONMASKINTR_SHIFT));
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 405429a5..6eda6f7 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -1651,6 +1651,18 @@ static int kgsl_iommu_start(struct kgsl_mmu *mmu)
int status;
struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
+ /* Set the following registers only when the MMU type is QSMMU */
+ if (mmu->subtype != KGSL_IOMMU_SMMU_V500) {
+ /* Enable hazard check from GPU_SMMU_HUM_CFG */
+ writel_relaxed(0x02, iommu->regbase + 0x6800);
+
+ /* Write to GPU_SMMU_DORA_ORDERING to disable reordering */
+ writel_relaxed(0x01, iommu->regbase + 0x64a0);
+
+ /* make sure register write committed */
+ wmb();
+ }
+
status = _setup_user_context(mmu);
if (status)
return status;
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index 790b379..4fdb5e2 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -1012,6 +1012,9 @@ int kgsl_pwrscale_init(struct device *dev, const char *governor)
data->bus.ib = &pwr->bus_ib[0];
data->bus.index = &pwr->bus_index[0];
data->bus.width = pwr->bus_width;
+ if (!kgsl_of_property_read_ddrtype(device->pdev->dev.of_node,
+ "qcom,bus-accesses", &data->bus.max))
+ data->bus.floating = false;
} else
data->bus.num = 0;
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index f98abee..a8dec53 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -1323,7 +1323,6 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
* buffer, provided the size matches. Any allocation has to be done
* with the lock released.
*/
- mutex_lock(&drvdata->mem_lock);
spin_lock_irqsave(&drvdata->spinlock, flags);
if (!drvdata->etr_buf || (drvdata->etr_buf->size != drvdata->size)) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -1340,7 +1339,6 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
/* Allocate memory with the locks released */
free_buf = new_buf = tmc_etr_setup_sysfs_buf(drvdata);
if (IS_ERR(new_buf)) {
- mutex_unlock(&drvdata->mem_lock);
return -ENOMEM;
}
coresight_cti_map_trigout(drvdata->cti_flush, 3, 0);
@@ -1350,7 +1348,6 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
free_buf = new_buf =
tmc_etr_setup_sysfs_buf(drvdata);
if (IS_ERR(new_buf)) {
- mutex_unlock(&drvdata->mem_lock);
return -ENOMEM;
}
}
@@ -1362,7 +1359,6 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
usb_bypass_notifier);
if (IS_ERR_OR_NULL(drvdata->usbch)) {
dev_err(drvdata->dev, "usb_qdss_open failed\n");
- mutex_unlock(&drvdata->mem_lock);
return -ENODEV;
}
} else {
@@ -1370,7 +1366,6 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
usb_notifier);
if (IS_ERR_OR_NULL(drvdata->usbch)) {
dev_err(drvdata->dev, "usb_qdss_open failed\n");
- mutex_unlock(&drvdata->mem_lock);
return -ENODEV;
}
}
@@ -1418,7 +1413,6 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
tmc_etr_byte_cntr_start(drvdata->byte_cntr);
- mutex_unlock(&drvdata->mem_lock);
if (!ret)
dev_info(drvdata->dev, "TMC-ETR enabled\n");
@@ -1433,13 +1427,19 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev)
static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ int ret = 0;
+
switch (mode) {
case CS_MODE_SYSFS:
- return tmc_enable_etr_sink_sysfs(csdev);
+ mutex_lock(&drvdata->mem_lock);
+ ret = tmc_enable_etr_sink_sysfs(csdev);
+ mutex_unlock(&drvdata->mem_lock);
+ return ret;
+
case CS_MODE_PERF:
return tmc_enable_etr_sink_perf(csdev);
}
-
/* We shouldn't be here */
return -EINVAL;
}
@@ -1449,11 +1449,9 @@ static void _tmc_disable_etr_sink(struct coresight_device *csdev, bool flush)
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- mutex_lock(&drvdata->mem_lock);
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- mutex_unlock(&drvdata->mem_lock);
return;
}
@@ -1504,31 +1502,40 @@ static void _tmc_disable_etr_sink(struct coresight_device *csdev, bool flush)
}
}
out:
- mutex_unlock(&drvdata->mem_lock);
dev_info(drvdata->dev, "TMC-ETR disabled\n");
}
static void tmc_disable_etr_sink(struct coresight_device *csdev)
{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ mutex_lock(&drvdata->mem_lock);
_tmc_disable_etr_sink(csdev, true);
+ mutex_unlock(&drvdata->mem_lock);
}
int tmc_etr_switch_mode(struct tmc_drvdata *drvdata, const char *out_mode)
{
enum tmc_etr_out_mode new_mode, old_mode;
+ mutex_lock(&drvdata->mem_lock);
if (!strcmp(out_mode, str_tmc_etr_out_mode[TMC_ETR_OUT_MODE_MEM]))
new_mode = TMC_ETR_OUT_MODE_MEM;
else if (!strcmp(out_mode, str_tmc_etr_out_mode[TMC_ETR_OUT_MODE_USB]))
new_mode = TMC_ETR_OUT_MODE_USB;
- else
+ else {
+ mutex_unlock(&drvdata->mem_lock);
return -EINVAL;
+ }
- if (new_mode == drvdata->out_mode)
+ if (new_mode == drvdata->out_mode) {
+ mutex_unlock(&drvdata->mem_lock);
return 0;
+ }
if (drvdata->mode == CS_MODE_DISABLED) {
drvdata->out_mode = new_mode;
+ mutex_unlock(&drvdata->mem_lock);
return 0;
}
@@ -1541,8 +1548,11 @@ int tmc_etr_switch_mode(struct tmc_drvdata *drvdata, const char *out_mode)
dev_err(drvdata->dev, "Switch to %s failed. Fall back to %s.\n",
str_tmc_etr_out_mode[new_mode],
str_tmc_etr_out_mode[old_mode]);
+ mutex_unlock(&drvdata->mem_lock);
return -EINVAL;
}
+
+ mutex_unlock(&drvdata->mem_lock);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index d449e60..3ab671d 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -115,6 +115,7 @@ struct geni_i2c_dev {
struct msm_gpi_dma_async_tx_cb_param rx_cb;
enum i2c_se_mode se_mode;
bool cmd_done;
+ bool is_shared;
};
struct geni_i2c_err_log {
@@ -420,15 +421,6 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
struct geni_i2c_dev *gi2c = i2c_get_adapdata(adap);
int i, ret = 0, timeout = 0;
- ret = pinctrl_select_state(gi2c->i2c_rsc.geni_pinctrl,
- gi2c->i2c_rsc.geni_gpio_active);
- if (ret) {
- GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
- "%s: Error %d pinctrl_select_state active\n",
- __func__, ret);
- return ret;
- }
-
if (!gi2c->tx_c) {
gi2c->tx_c = dma_request_slave_channel(gi2c->dev, "tx");
if (!gi2c->tx_c) {
@@ -647,8 +639,6 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
geni_i2c_gsi_xfer_out:
if (!ret && gi2c->err)
ret = gi2c->err;
- pinctrl_select_state(gi2c->i2c_rsc.geni_pinctrl,
- gi2c->i2c_rsc.geni_gpio_sleep);
return ret;
}
@@ -907,6 +897,11 @@ static int geni_i2c_probe(struct platform_device *pdev)
return ret;
}
+ if (of_property_read_bool(pdev->dev.of_node, "qcom,shared")) {
+ gi2c->is_shared = true;
+ dev_info(&pdev->dev, "Multi-EE usecase\n");
+ }
+
if (of_property_read_u32(pdev->dev.of_node, "qcom,clk-freq-out",
&gi2c->i2c_rsc.clk_freq_out)) {
dev_info(&pdev->dev,
@@ -984,12 +979,14 @@ static int geni_i2c_runtime_suspend(struct device *dev)
{
struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
- if (gi2c->se_mode == FIFO_SE_DMA) {
+ if (gi2c->se_mode == FIFO_SE_DMA)
disable_irq(gi2c->irq);
- se_geni_resources_off(&gi2c->i2c_rsc);
- } else {
- /* GPIO is set to sleep state already. So just clocks off */
+
+ if (gi2c->is_shared) {
+ /* Do not unconfigure GPIOs if shared se */
se_geni_clks_off(&gi2c->i2c_rsc);
+ } else {
+ se_geni_resources_off(&gi2c->i2c_rsc);
}
return 0;
}
@@ -1006,10 +1003,7 @@ static int geni_i2c_runtime_resume(struct device *dev)
gi2c->ipcl = ipc_log_context_create(2, ipc_name, 0);
}
- if (gi2c->se_mode != GSI_ONLY)
- ret = se_geni_resources_on(&gi2c->i2c_rsc);
- else
- ret = se_geni_clks_on(&gi2c->i2c_rsc);
+ ret = se_geni_resources_on(&gi2c->i2c_rsc);
if (ret)
return ret;
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 5299eea..11f7366 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -856,6 +856,17 @@
To compile this driver as a module, choose M here: the
module will be called zylonite-wm97xx.
+config SECURE_TOUCH_SYNAPTICS_DSX
+ bool "Secure Touch"
+ depends on TOUCHSCREEN_SYNAPTICS_DSX_I2C
+ help
+ Say Y here to enable Secure Touch in supported driver.
+
+ If unsure, say N.
+
+ To compile the supported driver with Secure Touch enabled,
+ choose M here.
+
config TOUCHSCREEN_USB_COMPOSITE
tristate "USB Touchscreen Driver"
depends on USB_ARCH_HAS_HCD
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c
index e4ff2ea..4e2dc13 100755
--- a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.c
@@ -45,6 +45,16 @@
#ifdef KERNEL_ABOVE_2_6_38
#include <linux/input/mt.h>
#endif
+#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX)
+#include <linux/errno.h>
+#include <soc/qcom/scm.h>
+enum subsystem {
+ TZ = 1,
+ APSS = 3
+};
+
+#define TZ_BLSP_MODIFY_OWNERSHIP_ID 3
+#endif
#include <linux/completion.h>
@@ -182,17 +192,18 @@ static ssize_t synaptics_rmi4_synad_pid_store(struct device *dev,
static ssize_t synaptics_rmi4_virtual_key_map_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf);
-struct synaptics_rmi4_f01_device_status {
- union {
- struct {
- unsigned char status_code:4;
- unsigned char reserved:2;
- unsigned char flash_prog:1;
- unsigned char unconfigured:1;
- } __packed;
- unsigned char data[1];
- };
-};
+static irqreturn_t synaptics_rmi4_irq(int irq, void *data);
+
+#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX)
+static ssize_t synaptics_rmi4_secure_touch_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_secure_touch_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t synaptics_rmi4_secure_touch_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+#endif
struct synaptics_rmi4_f11_query_0_5 {
union {
@@ -705,6 +716,14 @@ static struct device_attribute attrs[] = {
__ATTR(wake_gesture, 0664,
synaptics_rmi4_wake_gesture_show,
synaptics_rmi4_wake_gesture_store),
+#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX)
+ __ATTR(secure_touch_enable, 0664,
+ synaptics_rmi4_secure_touch_enable_show,
+ synaptics_rmi4_secure_touch_enable_store),
+ __ATTR(secure_touch, 0444,
+ synaptics_rmi4_secure_touch_show,
+ NULL),
+#endif
#ifdef USE_DATA_SERVER
__ATTR(synad_pid, 0220,
synaptics_rmi4_show_error,
@@ -720,6 +739,224 @@ static struct kobj_attribute virtual_key_map_attr = {
.show = synaptics_rmi4_virtual_key_map_show,
};
+#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX)
+static int synaptics_rmi4_i2c_change_pipe_owner(
+ struct synaptics_rmi4_data *rmi4_data, enum subsystem subsystem)
+{
+ struct scm_desc desc;
+ struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+ int ret = 0;
+
+ desc.arginfo = SCM_ARGS(2);
+ desc.args[0] = i2c->adapter->nr;
+ desc.args[1] = subsystem;
+
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_TZ, TZ_BLSP_MODIFY_OWNERSHIP_ID),
+ &desc);
+ if (ret) {
+ dev_err(rmi4_data->pdev->dev.parent, "%s: failed\n",
+ __func__);
+ return ret;
+ }
+
+ return desc.ret[0];
+}
+
+static void synaptics_rmi4_secure_touch_init(struct synaptics_rmi4_data *data)
+{
+ data->st_initialized = false;
+ init_completion(&data->st_powerdown);
+ init_completion(&data->st_irq_processed);
+
+ /* Get clocks */
+ data->core_clk = devm_clk_get(data->pdev->dev.parent, "core_clk");
+ if (IS_ERR(data->core_clk)) {
+ dev_dbg(data->pdev->dev.parent,
+ "%s: error on clk_get(core_clk): %ld\n",
+ __func__, PTR_ERR(data->core_clk));
+ data->core_clk = NULL;
+ }
+
+ data->iface_clk = devm_clk_get(data->pdev->dev.parent, "iface_clk");
+ if (IS_ERR(data->iface_clk)) {
+ dev_dbg(data->pdev->dev.parent,
+ "%s: error on clk_get(iface_clk): %ld\n",
+ __func__, PTR_ERR(data->iface_clk));
+ data->iface_clk = NULL;
+ }
+ data->st_initialized = true;
+}
+
+static void synaptics_rmi4_secure_touch_notify(
+ struct synaptics_rmi4_data *rmi4_data)
+{
+ sysfs_notify(&rmi4_data->input_dev->dev.kobj, NULL, "secure_touch");
+}
+
+static irqreturn_t synaptics_rmi4_filter_interrupt(
+ struct synaptics_rmi4_data *rmi4_data)
+{
+ if (atomic_read(&rmi4_data->st_enabled)) {
+ if (atomic_cmpxchg(&rmi4_data->st_pending_irqs, 0, 1) == 0) {
+ reinit_completion(&rmi4_data->st_irq_processed);
+ synaptics_rmi4_secure_touch_notify(rmi4_data);
+ wait_for_completion_interruptible(
+ &rmi4_data->st_irq_processed);
+ }
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+/*
+ * 'blocking' variable will have value 'true' when we want to prevent the driver
+ * from accessing the xPU/SMMU protected HW resources while the session is
+ * active.
+ */
+static void synaptics_rmi4_secure_touch_stop(
+ struct synaptics_rmi4_data *rmi4_data, bool blocking)
+{
+ if (atomic_read(&rmi4_data->st_enabled)) {
+ atomic_set(&rmi4_data->st_pending_irqs, -1);
+ synaptics_rmi4_secure_touch_notify(rmi4_data);
+ if (blocking)
+ wait_for_completion_interruptible(
+ &rmi4_data->st_powerdown);
+ }
+}
+
+#else
+static void synaptics_rmi4_secure_touch_init(
+ struct synaptics_rmi4_data *rmi4_data)
+{
+}
+
+static irqreturn_t synaptics_rmi4_filter_interrupt(
+ struct synaptics_rmi4_data *rmi4_data)
+{
+ return IRQ_NONE;
+}
+
+static void synaptics_rmi4_secure_touch_stop(
+ struct synaptics_rmi4_data *rmi4_data, bool blocking)
+{
+}
+#endif
+
+#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX)
+static ssize_t synaptics_rmi4_secure_touch_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d",
+ atomic_read(&rmi4_data->st_enabled));
+}
+
+/*
+ * Accept only "0" and "1" valid values.
+ * "0" will reset the st_enabled flag, then wake up the reading process and
+ * the interrupt handler.
+ * The bus driver is notified via pm_runtime that it is not required to stay
+ * awake anymore.
+ * It will also make sure the queue of events is emptied in the controller,
+ * in case a touch happened in between the secure touch being disabled and
+ * the local ISR being ungated.
+ * "1" will set the st_enabled flag and clear the st_pending_irqs flag.
+ * The bus driver is requested via pm_runtime to stay awake.
+ */
+static ssize_t synaptics_rmi4_secure_touch_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+ unsigned long value;
+ int err = 0;
+
+ if (count > 2)
+ return -EINVAL;
+
+ if (!rmi4_data->st_initialized)
+ return -EIO;
+
+ err = kstrtoul(buf, 10, &value);
+ if (err)
+ return err;
+
+ err = count;
+
+ switch (value) {
+ case 0:
+ if (atomic_read(&rmi4_data->st_enabled) == 0)
+ break;
+ synaptics_rmi4_i2c_change_pipe_owner(rmi4_data, APSS);
+ synaptics_rmi4_bus_put(rmi4_data);
+ atomic_set(&rmi4_data->st_enabled, 0);
+ synaptics_rmi4_secure_touch_notify(rmi4_data);
+ complete(&rmi4_data->st_irq_processed);
+ synaptics_rmi4_irq(rmi4_data->irq, rmi4_data);
+ complete(&rmi4_data->st_powerdown);
+
+ break;
+ case 1:
+ if (atomic_read(&rmi4_data->st_enabled)) {
+ err = -EBUSY;
+ break;
+ }
+ synchronize_irq(rmi4_data->irq);
+
+ if (synaptics_rmi4_bus_get(rmi4_data) < 0) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "synaptics_rmi4_bus_get failed\n");
+ err = -EIO;
+ break;
+ }
+ synaptics_rmi4_i2c_change_pipe_owner(rmi4_data, TZ);
+ reinit_completion(&rmi4_data->st_powerdown);
+ reinit_completion(&rmi4_data->st_irq_processed);
+ atomic_set(&rmi4_data->st_enabled, 1);
+ atomic_set(&rmi4_data->st_pending_irqs, 0);
+ break;
+ default:
+ dev_err(rmi4_data->pdev->dev.parent,
+ "unsupported value: %lu\n", value);
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+/*
+ * This function returns whether there are pending interrupts, or
+ * other error conditions that need to be signaled to the userspace library,
+ * according tot he following logic:
+ * - st_enabled is 0 if secure touch is not enabled, returning -EBADF
+ * - st_pending_irqs is -1 to signal that secure touch is in being stopped,
+ * returning -EINVAL
+ * - st_pending_irqs is 1 to signal that there is a pending irq, returning
+ * the value "1" to the sysfs read operation
+ * - st_pending_irqs is 0 (only remaining case left) if the pending interrupt
+ * has been processed, so the interrupt handler can be allowed to continue.
+ */
+static ssize_t synaptics_rmi4_secure_touch_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+ int val = 0;
+
+ if (atomic_read(&rmi4_data->st_enabled) == 0)
+ return -EBADF;
+ if (atomic_cmpxchg(&rmi4_data->st_pending_irqs, -1, 0) == -1)
+ return -EINVAL;
+ if (atomic_cmpxchg(&rmi4_data->st_pending_irqs, 1, 0) == 1)
+ val = 1;
+ else
+ complete(&rmi4_data->st_irq_processed);
+
+ return scnprintf(buf, PAGE_SIZE, "%u", val);
+}
+#endif
+
static ssize_t synaptics_rmi4_f01_reset_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
@@ -1673,10 +1910,9 @@ static int synaptics_rmi4_sensor_report(struct synaptics_rmi4_data *rmi4_data,
bool report)
{
int retval;
- unsigned char data[MAX_INTR_REGISTERS + 1];
- unsigned char *intr = &data[1];
+ unsigned char *data = NULL;
+ unsigned char *intr;
bool was_in_bl_mode;
- struct synaptics_rmi4_f01_device_status status;
struct synaptics_rmi4_fn *fhandler;
struct synaptics_rmi4_exp_fhandler *exp_fhandler;
struct synaptics_rmi4_device_info *rmi;
@@ -1687,6 +1923,14 @@ static int synaptics_rmi4_sensor_report(struct synaptics_rmi4_data *rmi4_data,
* Get interrupt status information from F01 Data1 register to
* determine the source(s) that are flagging the interrupt.
*/
+ data = kcalloc((MAX_INTR_REGISTERS + 1), sizeof(char), GFP_KERNEL);
+ if (!data) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ intr = &data[1];
+
retval = synaptics_rmi4_reg_read(rmi4_data,
rmi4_data->f01_data_base_addr,
data,
@@ -1695,31 +1939,31 @@ static int synaptics_rmi4_sensor_report(struct synaptics_rmi4_data *rmi4_data,
dev_err(rmi4_data->pdev->dev.parent,
"%s: Failed to read interrupt status\n",
__func__);
- return retval;
+ goto exit;
}
- status.data[0] = data[0];
- if (status.status_code == STATUS_CRC_IN_PROGRESS) {
+ rmi4_data->status.data[0] = data[0];
+ if (rmi4_data->status.status_code == STATUS_CRC_IN_PROGRESS) {
retval = synaptics_rmi4_check_status(rmi4_data,
&was_in_bl_mode);
if (retval < 0) {
dev_err(rmi4_data->pdev->dev.parent,
"%s: Failed to check status\n",
__func__);
- return retval;
+ goto exit;
}
retval = synaptics_rmi4_reg_read(rmi4_data,
rmi4_data->f01_data_base_addr,
- status.data,
- sizeof(status.data));
+ rmi4_data->status.data,
+ sizeof(rmi4_data->status.data));
if (retval < 0) {
dev_err(rmi4_data->pdev->dev.parent,
"%s: Failed to read device status\n",
__func__);
- return retval;
+ goto exit;
}
}
- if (status.unconfigured && !status.flash_prog) {
+ if (rmi4_data->status.unconfigured && !rmi4_data->status.flash_prog) {
pr_notice("%s: spontaneous reset detected\n", __func__);
retval = synaptics_rmi4_reinit_device(rmi4_data);
if (retval < 0) {
@@ -1730,7 +1974,7 @@ static int synaptics_rmi4_sensor_report(struct synaptics_rmi4_data *rmi4_data,
}
if (!report)
- return retval;
+ goto exit;
/*
* Traverse the function handler list and service the source(s)
@@ -1758,7 +2002,8 @@ static int synaptics_rmi4_sensor_report(struct synaptics_rmi4_data *rmi4_data,
}
}
mutex_unlock(&exp_data.mutex);
-
+exit:
+ kfree(data);
return retval;
}
@@ -1768,6 +2013,9 @@ static irqreturn_t synaptics_rmi4_irq(int irq, void *data)
const struct synaptics_dsx_board_data *bdata =
rmi4_data->hw_if->board_data;
+ if (synaptics_rmi4_filter_interrupt(data) == IRQ_HANDLED)
+ return IRQ_HANDLED;
+
if (gpio_get_value(bdata->irq_gpio) != bdata->irq_on_state)
goto exit;
@@ -1816,12 +2064,18 @@ static int synaptics_rmi4_irq_enable(struct synaptics_rmi4_data *rmi4_data,
bool enable, bool attn_only)
{
int retval = 0;
- unsigned char data[MAX_INTR_REGISTERS];
+ unsigned char *data = NULL;
const struct synaptics_dsx_board_data *bdata =
rmi4_data->hw_if->board_data;
mutex_lock(&(rmi4_data->rmi4_irq_enable_mutex));
+ data = kcalloc(MAX_INTR_REGISTERS, sizeof(char), GFP_KERNEL);
+ if (!data) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+
if (attn_only) {
retval = synaptics_rmi4_int_enable(rmi4_data, enable);
goto exit;
@@ -1875,8 +2129,8 @@ static int synaptics_rmi4_irq_enable(struct synaptics_rmi4_data *rmi4_data,
}
exit:
+ kfree(data);
mutex_unlock(&(rmi4_data->rmi4_irq_enable_mutex));
-
return retval;
}
@@ -2280,8 +2534,8 @@ static int synaptics_rmi4_f12_init(struct synaptics_rmi4_data *rmi4_data,
unsigned char subpacket;
unsigned char ctrl_23_size;
unsigned char size_of_2d_data;
- unsigned char size_of_query5;
- unsigned char size_of_query8;
+ unsigned char *size_of_query5 = NULL;
+ unsigned char *size_of_query8 = NULL;
unsigned char ctrl_8_offset;
unsigned char ctrl_20_offset;
unsigned char ctrl_23_offset;
@@ -2311,6 +2565,18 @@ static int synaptics_rmi4_f12_init(struct synaptics_rmi4_data *rmi4_data,
extra_data = (struct synaptics_rmi4_f12_extra_data *)fhandler->extra;
size_of_2d_data = sizeof(struct synaptics_rmi4_f12_finger_data);
+ size_of_query5 = kcalloc(1, sizeof(char), GFP_KERNEL);
+ if (!size_of_query5) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ size_of_query8 = kcalloc(1, sizeof(char), GFP_KERNEL);
+ if (!size_of_query8) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+
query_5 = kzalloc(sizeof(*query_5), GFP_KERNEL);
if (!query_5) {
dev_err(rmi4_data->pdev->dev.parent,
@@ -2367,19 +2633,19 @@ static int synaptics_rmi4_f12_init(struct synaptics_rmi4_data *rmi4_data,
retval = synaptics_rmi4_reg_read(rmi4_data,
fhandler->full_addr.query_base + 4,
- &size_of_query5,
- sizeof(size_of_query5));
+ size_of_query5,
+ sizeof(*size_of_query5));
if (retval < 0)
goto exit;
- if (size_of_query5 > sizeof(query_5->data))
- size_of_query5 = sizeof(query_5->data);
+ if (*size_of_query5 > sizeof(query_5->data))
+ *size_of_query5 = sizeof(query_5->data);
memset(query_5->data, 0x00, sizeof(query_5->data));
retval = synaptics_rmi4_reg_read(rmi4_data,
fhandler->full_addr.query_base + 5,
query_5->data,
- size_of_query5);
+ *size_of_query5);
if (retval < 0)
goto exit;
@@ -2494,26 +2760,26 @@ static int synaptics_rmi4_f12_init(struct synaptics_rmi4_data *rmi4_data,
retval = synaptics_rmi4_reg_read(rmi4_data,
fhandler->full_addr.query_base + 7,
- &size_of_query8,
- sizeof(size_of_query8));
+ size_of_query8,
+ sizeof(*size_of_query8));
if (retval < 0)
goto exit;
- if (size_of_query8 > sizeof(query_8->data))
- size_of_query8 = sizeof(query_8->data);
+ if (*size_of_query8 > sizeof(query_8->data))
+ *size_of_query8 = sizeof(query_8->data);
memset(query_8->data, 0x00, sizeof(query_8->data));
retval = synaptics_rmi4_reg_read(rmi4_data,
fhandler->full_addr.query_base + 8,
query_8->data,
- size_of_query8);
+ *size_of_query8);
if (retval < 0)
goto exit;
/* Determine the presence of the Data0 register */
extra_data->data1_offset = query_8->data0_is_present;
- if ((size_of_query8 >= 3) && (query_8->data15_is_present)) {
+ if ((*size_of_query8 >= 3) && (query_8->data15_is_present)) {
extra_data->data15_offset = query_8->data0_is_present +
query_8->data1_is_present +
query_8->data2_is_present +
@@ -2535,7 +2801,7 @@ static int synaptics_rmi4_f12_init(struct synaptics_rmi4_data *rmi4_data,
}
#ifdef REPORT_2D_PRESSURE
- if ((size_of_query8 >= 5) && (query_8->data29_is_present)) {
+ if ((*size_of_query8 >= 5) && (query_8->data29_is_present)) {
extra_data->data29_offset = query_8->data0_is_present +
query_8->data1_is_present +
query_8->data2_is_present +
@@ -2683,6 +2949,8 @@ static int synaptics_rmi4_f12_init(struct synaptics_rmi4_data *rmi4_data,
}
exit:
+ kfree(size_of_query5);
+ kfree(size_of_query8);
kfree(query_5);
kfree(query_8);
kfree(ctrl_8);
@@ -2908,16 +3176,15 @@ static int synaptics_rmi4_check_status(struct synaptics_rmi4_data *rmi4_data,
{
int retval;
int timeout = CHECK_STATUS_TIMEOUT_MS;
- struct synaptics_rmi4_f01_device_status status;
retval = synaptics_rmi4_reg_read(rmi4_data,
rmi4_data->f01_data_base_addr,
- status.data,
- sizeof(status.data));
+ rmi4_data->status.data,
+ sizeof(rmi4_data->status.data));
if (retval < 0)
return retval;
- while (status.status_code == STATUS_CRC_IN_PROGRESS) {
+ while (rmi4_data->status.status_code == STATUS_CRC_IN_PROGRESS) {
if (timeout > 0)
msleep(20);
else
@@ -2925,8 +3192,8 @@ static int synaptics_rmi4_check_status(struct synaptics_rmi4_data *rmi4_data,
retval = synaptics_rmi4_reg_read(rmi4_data,
rmi4_data->f01_data_base_addr,
- status.data,
- sizeof(status.data));
+ rmi4_data->status.data,
+ sizeof(rmi4_data->status.data));
if (retval < 0)
return retval;
@@ -2936,11 +3203,11 @@ static int synaptics_rmi4_check_status(struct synaptics_rmi4_data *rmi4_data,
if (timeout != CHECK_STATUS_TIMEOUT_MS)
*was_in_bl_mode = true;
- if (status.flash_prog == 1) {
+ if (rmi4_data->status.flash_prog == 1) {
rmi4_data->flash_prog_mode = true;
pr_notice("%s: In flash prog mode, status = 0x%02x\n",
__func__,
- status.status_code);
+ rmi4_data->status.status_code);
} else {
rmi4_data->flash_prog_mode = false;
}
@@ -2951,32 +3218,39 @@ static int synaptics_rmi4_check_status(struct synaptics_rmi4_data *rmi4_data,
static int synaptics_rmi4_set_configured(struct synaptics_rmi4_data *rmi4_data)
{
int retval;
- unsigned char device_ctrl;
+ unsigned char *device_ctrl = NULL;
+
+ device_ctrl = kcalloc(1, sizeof(char), GFP_KERNEL);
+ if (!device_ctrl) {
+ retval = -ENOMEM;
+ goto exit;
+ }
retval = synaptics_rmi4_reg_read(rmi4_data,
rmi4_data->f01_ctrl_base_addr,
- &device_ctrl,
- sizeof(device_ctrl));
+ device_ctrl,
+ sizeof(*device_ctrl));
if (retval < 0) {
dev_err(rmi4_data->pdev->dev.parent,
"%s: Failed to set configured\n",
__func__);
- return retval;
+ goto exit;
}
- rmi4_data->no_sleep_setting = device_ctrl & NO_SLEEP_ON;
- device_ctrl |= CONFIGURED;
+ rmi4_data->no_sleep_setting = *device_ctrl & NO_SLEEP_ON;
+ *device_ctrl |= CONFIGURED;
retval = synaptics_rmi4_reg_write(rmi4_data,
rmi4_data->f01_ctrl_base_addr,
- &device_ctrl,
- sizeof(device_ctrl));
+ device_ctrl,
+ sizeof(*device_ctrl));
if (retval < 0) {
dev_err(rmi4_data->pdev->dev.parent,
"%s: Failed to set configured\n",
__func__);
}
-
+exit:
+ kfree(device_ctrl);
return retval;
}
@@ -3013,7 +3287,6 @@ static int synaptics_rmi4_query_device(struct synaptics_rmi4_data *rmi4_data)
bool f01found;
bool f35found;
bool was_in_bl_mode;
- struct synaptics_rmi4_fn_desc rmi_fd;
struct synaptics_rmi4_fn *fhandler;
struct synaptics_rmi4_device_info *rmi;
@@ -3034,8 +3307,8 @@ static int synaptics_rmi4_query_device(struct synaptics_rmi4_data *rmi4_data)
retval = synaptics_rmi4_reg_read(rmi4_data,
pdt_entry_addr,
- (unsigned char *)&rmi_fd,
- sizeof(rmi_fd));
+ (unsigned char *)&rmi4_data->rmi_fd,
+ sizeof(rmi4_data->rmi_fd));
if (retval < 0)
return retval;
@@ -3043,7 +3316,7 @@ static int synaptics_rmi4_query_device(struct synaptics_rmi4_data *rmi4_data)
fhandler = NULL;
- if (rmi_fd.fn_number == 0) {
+ if (rmi4_data->rmi_fd.fn_number == 0) {
dev_dbg(rmi4_data->pdev->dev.parent,
"%s: Reached end of PDT\n",
__func__);
@@ -3052,28 +3325,30 @@ static int synaptics_rmi4_query_device(struct synaptics_rmi4_data *rmi4_data)
dev_dbg(rmi4_data->pdev->dev.parent,
"%s: F%02x found (page %d)\n",
- __func__, rmi_fd.fn_number,
+ __func__, rmi4_data->rmi_fd.fn_number,
page_number);
- switch (rmi_fd.fn_number) {
+ switch (rmi4_data->rmi_fd.fn_number) {
case SYNAPTICS_RMI4_F01:
- if (rmi_fd.intr_src_count == 0)
+ if (rmi4_data->rmi_fd.intr_src_count == 0)
break;
f01found = true;
retval = synaptics_rmi4_alloc_fh(&fhandler,
- &rmi_fd, page_number);
+ &rmi4_data->rmi_fd,
+ page_number);
if (retval < 0) {
dev_err(rmi4_data->pdev->dev.parent,
- "%s: Failed to alloc for F%d\n",
- __func__,
- rmi_fd.fn_number);
+ "%s: Failed to alloc for F%d\n",
+ __func__,
+ rmi4_data->rmi_fd.fn_number);
return retval;
}
retval = synaptics_rmi4_f01_init(rmi4_data,
- fhandler, &rmi_fd, intr_count);
+ fhandler, &rmi4_data->rmi_fd,
+ intr_count);
if (retval < 0)
return retval;
@@ -3097,59 +3372,65 @@ static int synaptics_rmi4_query_device(struct synaptics_rmi4_data *rmi4_data)
break;
case SYNAPTICS_RMI4_F11:
- if (rmi_fd.intr_src_count == 0)
+ if (rmi4_data->rmi_fd.intr_src_count == 0)
break;
retval = synaptics_rmi4_alloc_fh(&fhandler,
- &rmi_fd, page_number);
+ &rmi4_data->rmi_fd,
+ page_number);
if (retval < 0) {
dev_err(rmi4_data->pdev->dev.parent,
- "%s: Failed to alloc for F%d\n",
- __func__,
- rmi_fd.fn_number);
+ "%s: Failed to alloc for F%d\n",
+ __func__,
+ rmi4_data->rmi_fd.fn_number);
return retval;
}
retval = synaptics_rmi4_f11_init(rmi4_data,
- fhandler, &rmi_fd, intr_count);
+ fhandler, &rmi4_data->rmi_fd,
+ intr_count);
if (retval < 0)
return retval;
break;
case SYNAPTICS_RMI4_F12:
- if (rmi_fd.intr_src_count == 0)
+ if (rmi4_data->rmi_fd.intr_src_count == 0)
break;
retval = synaptics_rmi4_alloc_fh(&fhandler,
- &rmi_fd, page_number);
+ &rmi4_data->rmi_fd,
+ page_number);
if (retval < 0) {
dev_err(rmi4_data->pdev->dev.parent,
- "%s: Failed to alloc for F%d\n",
- __func__,
- rmi_fd.fn_number);
+ "%s: Failed to alloc for F%d\n",
+ __func__,
+ rmi4_data->rmi_fd.fn_number);
return retval;
}
retval = synaptics_rmi4_f12_init(rmi4_data,
- fhandler, &rmi_fd, intr_count);
+ fhandler, &rmi4_data->rmi_fd,
+ intr_count);
if (retval < 0)
return retval;
break;
case SYNAPTICS_RMI4_F1A:
- if (rmi_fd.intr_src_count == 0)
+ if (rmi4_data->rmi_fd.intr_src_count == 0)
break;
retval = synaptics_rmi4_alloc_fh(&fhandler,
- &rmi_fd, page_number);
+ &rmi4_data->rmi_fd,
+ page_number);
if (retval < 0) {
dev_err(rmi4_data->pdev->dev.parent,
- "%s: Failed to alloc for F%d\n",
- __func__,
- rmi_fd.fn_number);
+ "%s: Failed to alloc for F%d\n",
+ __func__,
+ rmi4_data->rmi_fd.fn_number);
return retval;
}
retval = synaptics_rmi4_f1a_init(rmi4_data,
- fhandler, &rmi_fd, intr_count);
+ fhandler, &rmi4_data->rmi_fd,
+ intr_count);
if (retval < 0) {
#ifdef IGNORE_FN_INIT_FAILURE
kfree(fhandler);
@@ -3161,25 +3442,27 @@ static int synaptics_rmi4_query_device(struct synaptics_rmi4_data *rmi4_data)
break;
#ifdef USE_DATA_SERVER
case SYNAPTICS_RMI4_F21:
- if (rmi_fd.intr_src_count == 0)
+ if (rmi4_data->rmi_fd.intr_src_count == 0)
break;
retval = synaptics_rmi4_alloc_fh(&fhandler,
- &rmi_fd, page_number);
+ &rmi4_data->rmi_fd,
+ page_number);
if (retval < 0) {
dev_err(rmi4_data->pdev->dev.parent,
- "%s: Failed to alloc for F%d\n",
- __func__,
- rmi_fd.fn_number);
+ "%s: Failed to alloc for F%d\n",
+ __func__,
+ rmi4_data->rmi_fd.fn_number);
return retval;
}
- fhandler->fn_number = rmi_fd.fn_number;
+ fhandler->fn_number =
+ rmi4_data->rmi_fd.fn_number;
fhandler->num_of_data_sources =
- rmi_fd.intr_src_count;
+ rmi4_data->rmi_fd.intr_src_count;
- synaptics_rmi4_set_intr_mask(fhandler, &rmi_fd,
- intr_count);
+ synaptics_rmi4_set_intr_mask(fhandler,
+ &rmi4_data->rmi_fd, intr_count);
break;
#endif
case SYNAPTICS_RMI4_F35:
@@ -3188,16 +3471,16 @@ static int synaptics_rmi4_query_device(struct synaptics_rmi4_data *rmi4_data)
#ifdef F51_DISCRETE_FORCE
case SYNAPTICS_RMI4_F51:
rmi4_data->f51_query_base_addr =
- rmi_fd.query_base_addr |
- (page_number << 8);
+ rmi4_data->rmi_fd.query_base_addr |
+ (page_number << 8);
break;
#endif
}
/* Accumulate the interrupt count */
- intr_count += rmi_fd.intr_src_count;
+ intr_count += rmi4_data->rmi_fd.intr_src_count;
- if (fhandler && rmi_fd.intr_src_count) {
+ if (fhandler && rmi4_data->rmi_fd.intr_src_count) {
list_add_tail(&fhandler->link,
&rmi->support_fn_list);
}
@@ -4114,39 +4397,47 @@ static int synaptics_rmi4_sleep_enable(struct synaptics_rmi4_data *rmi4_data,
bool enable)
{
int retval;
- unsigned char device_ctrl;
+ unsigned char *device_ctrl = NULL;
unsigned char no_sleep_setting = rmi4_data->no_sleep_setting;
+ device_ctrl = kcalloc(1, sizeof(char), GFP_KERNEL);
+ if (!device_ctrl) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+
retval = synaptics_rmi4_reg_read(rmi4_data,
rmi4_data->f01_ctrl_base_addr,
- &device_ctrl,
- sizeof(device_ctrl));
+ device_ctrl,
+ sizeof(*device_ctrl));
if (retval < 0) {
dev_err(rmi4_data->pdev->dev.parent,
"%s: Failed to read device control\n",
__func__);
- return retval;
+ goto exit;
}
- device_ctrl = device_ctrl & ~MASK_3BIT;
+ *device_ctrl = *device_ctrl & ~MASK_3BIT;
if (enable)
- device_ctrl = device_ctrl | SENSOR_SLEEP;
+ *device_ctrl = *device_ctrl | SENSOR_SLEEP;
else
- device_ctrl = device_ctrl | no_sleep_setting | NORMAL_OPERATION;
+ *device_ctrl = *device_ctrl | no_sleep_setting |
+ NORMAL_OPERATION;
retval = synaptics_rmi4_reg_write(rmi4_data,
rmi4_data->f01_ctrl_base_addr,
- &device_ctrl,
- sizeof(device_ctrl));
+ device_ctrl,
+ sizeof(*device_ctrl));
if (retval < 0) {
dev_err(rmi4_data->pdev->dev.parent,
"%s: Failed to write device control\n",
__func__);
- return retval;
+ goto exit;
}
rmi4_data->sensor_sleep = enable;
-
+exit:
+ kfree(device_ctrl);
return retval;
}
@@ -4291,6 +4582,11 @@ static int synaptics_rmi4_probe(struct platform_device *pdev)
goto err_drm_reg;
}
}
+
+ /* Initialize secure touch */
+ synaptics_rmi4_secure_touch_init(rmi4_data);
+ synaptics_rmi4_secure_touch_stop(rmi4_data, true);
+
rmi4_data->rmi4_probe_wq = create_singlethread_workqueue(
"Synaptics_rmi4_probe_wq");
if (!rmi4_data->rmi4_probe_wq) {
@@ -4641,6 +4937,7 @@ static int synaptics_rmi4_dsi_panel_notifier_cb(struct notifier_block *self,
if (evdata && evdata->data && rmi4_data) {
if (event == DRM_PANEL_EARLY_EVENT_BLANK) {
+ synaptics_rmi4_secure_touch_stop(rmi4_data, false);
transition = *(int *)evdata->data;
if (transition == DRM_PANEL_BLANK_POWERDOWN) {
if (rmi4_data->initialized)
@@ -4679,6 +4976,13 @@ static int synaptics_rmi4_early_suspend(struct early_suspend *h)
if (rmi4_data->stay_awake)
return retval;
+ /*
+ * During early suspend/late resume, the driver doesn't access xPU/SMMU
+ * protected HW resources. So, there is no compelling need to block,
+ * but notifying the userspace that a power event has occurred is
+ * enough. Hence 'blocking' variable can be set to false.
+ */
+ synaptics_rmi4_secure_touch_stop(rmi4_data, false);
if (rmi4_data->enable_wakeup_gesture) {
if (rmi4_data->no_sleep_setting) {
@@ -4743,6 +5047,8 @@ static int synaptics_rmi4_late_resume(struct early_suspend *h)
if (rmi4_data->stay_awake)
return retval;
+ synaptics_rmi4_secure_touch_stop(rmi4_data, false);
+
if (rmi4_data->enable_wakeup_gesture) {
disable_irq_wake(rmi4_data->irq);
goto exit;
@@ -4791,6 +5097,8 @@ static int synaptics_rmi4_suspend(struct device *dev)
if (rmi4_data->stay_awake)
return 0;
+ synaptics_rmi4_secure_touch_stop(rmi4_data, true);
+
if (rmi4_data->enable_wakeup_gesture) {
if (rmi4_data->no_sleep_setting) {
synaptics_rmi4_reg_read(rmi4_data,
@@ -4862,6 +5170,7 @@ static int synaptics_rmi4_resume(struct device *dev)
rmi4_data->hw_if->board_data;
if (rmi4_data->stay_awake)
return 0;
+ synaptics_rmi4_secure_touch_stop(rmi4_data, true);
if (rmi4_data->enable_wakeup_gesture) {
disable_irq_wake(rmi4_data->irq);
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.h b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.h
index 8f175e3..0a778b0 100755
--- a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.h
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_core.h
@@ -51,6 +51,13 @@
#include <drm/drm_panel.h>
+#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX)
+#include <linux/completion.h>
+#include <linux/atomic.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#endif
+
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38))
#define KERNEL_ABOVE_2_6_38
#endif
@@ -282,6 +289,17 @@ struct synaptics_rmi4_device_info {
struct list_head support_fn_list;
};
+struct synaptics_rmi4_f01_device_status {
+ union {
+ struct {
+ unsigned char status_code:4;
+ unsigned char reserved:2;
+ unsigned char flash_prog:1;
+ unsigned char unconfigured:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
/*
* struct synaptics_rmi4_data - RMI4 device instance data
* @pdev: pointer to platform device
@@ -356,6 +374,8 @@ struct synaptics_rmi4_data {
const struct synaptics_dsx_hw_interface *hw_if;
struct synaptics_rmi4_device_info rmi4_mod_info;
struct synaptics_rmi4_input_settings input_settings;
+ struct synaptics_rmi4_fn_desc rmi_fd;
+ struct synaptics_rmi4_f01_device_status status;
struct kobject *board_prop_dir;
struct regulator *pwr_reg;
struct regulator *bus_reg;
@@ -431,6 +451,15 @@ struct synaptics_rmi4_data {
bool enable);
void (*report_touch)(struct synaptics_rmi4_data *rmi4_data,
struct synaptics_rmi4_fn *fhandler);
+#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX)
+ atomic_t st_enabled;
+ atomic_t st_pending_irqs;
+ struct completion st_powerdown;
+ struct completion st_irq_processed;
+ bool st_initialized;
+ struct clk *core_clk;
+ struct clk *iface_clk;
+#endif
};
struct synaptics_dsx_bus_access {
@@ -439,6 +468,8 @@ struct synaptics_dsx_bus_access {
unsigned char *data, unsigned int length);
int (*write)(struct synaptics_rmi4_data *rmi4_data, unsigned short addr,
unsigned char *data, unsigned int length);
+ int (*get)(struct synaptics_rmi4_data *rmi4_data);
+ void (*put)(struct synaptics_rmi4_data *rmi4_data);
};
struct synaptics_dsx_hw_interface {
@@ -489,6 +520,16 @@ static inline int synaptics_rmi4_reg_write(
return rmi4_data->hw_if->bus_access->write(rmi4_data, addr, data, len);
}
+static inline int synaptics_rmi4_bus_get(struct synaptics_rmi4_data *rmi4_data)
+{
+ return rmi4_data->hw_if->bus_access->get(rmi4_data);
+}
+
+static inline void synaptics_rmi4_bus_put(struct synaptics_rmi4_data *rmi4_data)
+{
+ rmi4_data->hw_if->bus_access->put(rmi4_data);
+}
+
static inline ssize_t synaptics_rmi4_show_error(struct device *dev,
struct device_attribute *attr, char *buf)
{
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c
index 2372368..6e78ebb 100755
--- a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c
@@ -1388,30 +1388,42 @@ static int fwu_parse_image_info(void)
static int fwu_read_flash_status(void)
{
- int retval;
- unsigned char status;
- unsigned char command;
+ int retval = 0;
+ unsigned char *status = NULL;
+ unsigned char *command = NULL;
struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+ status = kcalloc(1, sizeof(char), GFP_KERNEL);
+ if (!status) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ command = kcalloc(1, sizeof(char), GFP_KERNEL);
+ if (!command) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+
retval = synaptics_rmi4_reg_read(rmi4_data,
fwu->f34_fd.data_base_addr + fwu->off.flash_status,
- &status,
- sizeof(status));
+ status,
+ sizeof(*status));
if (retval < 0) {
dev_err(rmi4_data->pdev->dev.parent,
"%s: Failed to read flash status\n",
__func__);
- return retval;
+ goto exit;
}
- fwu->in_bl_mode = status >> 7;
+ fwu->in_bl_mode = *status >> 7;
if (fwu->bl_version == BL_V5)
- fwu->flash_status = (status >> 4) & MASK_3BIT;
+ fwu->flash_status = (*status >> 4) & MASK_3BIT;
else if (fwu->bl_version == BL_V6)
- fwu->flash_status = status & MASK_3BIT;
+ fwu->flash_status = *status & MASK_3BIT;
else if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
- fwu->flash_status = status & MASK_5BIT;
+ fwu->flash_status = *status & MASK_5BIT;
if (fwu->write_bootloader)
fwu->flash_status = 0x00;
@@ -1429,26 +1441,28 @@ static int fwu_read_flash_status(void)
retval = synaptics_rmi4_reg_read(rmi4_data,
fwu->f34_fd.data_base_addr + fwu->off.flash_cmd,
- &command,
- sizeof(command));
+ command,
+ sizeof(*command));
if (retval < 0) {
dev_err(rmi4_data->pdev->dev.parent,
"%s: Failed to read flash command\n",
__func__);
- return retval;
+ goto exit;
}
if (fwu->bl_version == BL_V5)
- fwu->command = command & MASK_4BIT;
+ fwu->command = *command & MASK_4BIT;
else if (fwu->bl_version == BL_V6)
- fwu->command = command & MASK_6BIT;
+ fwu->command = *command & MASK_6BIT;
else if (fwu->bl_version == BL_V7 || fwu->bl_version == BL_V8)
- fwu->command = command;
+ fwu->command = *command;
if (fwu->write_bootloader)
fwu->command = 0x00;
-
- return 0;
+exit:
+ kfree(status);
+ kfree(command);
+ return retval;
}
static int fwu_wait_for_idle(int timeout_ms, bool poll)
@@ -2062,10 +2076,22 @@ static int fwu_read_f34_v5v6_queries(void)
unsigned char count;
unsigned char base;
unsigned char offset;
- unsigned char buf[10];
- struct f34_v5v6_flash_properties_2 properties_2;
+ unsigned char *buf = NULL;
+ struct f34_v5v6_flash_properties_2 *properties_2 = NULL;
struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+ buf = kcalloc(10, sizeof(char), GFP_KERNEL);
+ if (!buf) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+
+ properties_2 = kzalloc(sizeof(*properties_2), GFP_KERNEL);
+ if (!properties_2) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+
base = fwu->f34_fd.query_base_addr;
retval = synaptics_rmi4_reg_read(rmi4_data,
@@ -2076,7 +2102,7 @@ static int fwu_read_f34_v5v6_queries(void)
dev_err(rmi4_data->pdev->dev.parent,
"%s: Failed to read bootloader ID\n",
__func__);
- return retval;
+ goto exit;
}
if (fwu->bl_version == BL_V5) {
@@ -2103,7 +2129,7 @@ static int fwu_read_f34_v5v6_queries(void)
dev_err(rmi4_data->pdev->dev.parent,
"%s: Failed to read block size info\n",
__func__);
- return retval;
+ goto exit;
}
batohs(&fwu->block_size, &(buf[0]));
@@ -2124,7 +2150,7 @@ static int fwu_read_f34_v5v6_queries(void)
dev_err(rmi4_data->pdev->dev.parent,
"%s: Failed to read flash properties\n",
__func__);
- return retval;
+ goto exit;
}
count = 4;
@@ -2146,7 +2172,7 @@ static int fwu_read_f34_v5v6_queries(void)
dev_err(rmi4_data->pdev->dev.parent,
"%s: Failed to read block count info\n",
__func__);
- return retval;
+ goto exit;
}
batohs(&fwu->blkcount.ui_firmware, &(buf[0]));
@@ -2178,17 +2204,17 @@ static int fwu_read_f34_v5v6_queries(void)
if (fwu->flash_properties.has_query4) {
retval = synaptics_rmi4_reg_read(rmi4_data,
base + fwu->off.properties_2,
- properties_2.data,
- sizeof(properties_2.data));
+ properties_2->data,
+ sizeof(properties_2->data));
if (retval < 0) {
dev_err(rmi4_data->pdev->dev.parent,
"%s: Failed to read flash properties 2\n",
__func__);
- return retval;
+ goto exit;
}
offset = fwu->off.properties_2 + 1;
count = 0;
- if (properties_2.has_guest_code) {
+ if (properties_2->has_guest_code) {
retval = synaptics_rmi4_reg_read(rmi4_data,
base + offset + count,
buf,
@@ -2197,7 +2223,7 @@ static int fwu_read_f34_v5v6_queries(void)
dev_err(rmi4_data->pdev->dev.parent,
"%s: Failed to read guest code block count\n",
__func__);
- return retval;
+ goto exit;
}
batohs(&fwu->blkcount.guest_code, &(buf[0]));
@@ -2205,7 +2231,7 @@ static int fwu_read_f34_v5v6_queries(void)
fwu->has_guest_code = true;
}
#ifdef SYNA_TDDI
- if (properties_2.has_force_config) {
+ if (properties_2->has_force_config) {
retval = synaptics_rmi4_reg_read(rmi4_data,
base + offset + count,
buf,
@@ -2214,13 +2240,13 @@ static int fwu_read_f34_v5v6_queries(void)
dev_err(rmi4_data->pdev->dev.parent,
"%s: Failed to read tddi force block count\n",
__func__);
- return retval;
+ goto exit;
}
batohs(&fwu->blkcount.tddi_force_config, &(buf[0]));
count++;
fwu->has_force_config = true;
}
- if (properties_2.has_lockdown_data) {
+ if (properties_2->has_lockdown_data) {
retval = synaptics_rmi4_reg_read(rmi4_data,
base + offset + count,
buf,
@@ -2229,13 +2255,13 @@ static int fwu_read_f34_v5v6_queries(void)
dev_err(rmi4_data->pdev->dev.parent,
"%s: Failed to read tddi lockdown block count\n",
__func__);
- return retval;
+ goto exit;
}
batohs(&fwu->blkcount.tddi_lockdown_data, &(buf[0]));
count++;
fwu->has_lockdown_data = true;
}
- if (properties_2.has_lcm_data) {
+ if (properties_2->has_lcm_data) {
retval = synaptics_rmi4_reg_read(rmi4_data,
base + offset + count,
buf,
@@ -2244,13 +2270,13 @@ static int fwu_read_f34_v5v6_queries(void)
dev_err(rmi4_data->pdev->dev.parent,
"%s: Failed to read tddi lcm block count\n",
__func__);
- return retval;
+ goto exit;
}
batohs(&fwu->blkcount.tddi_lcm_data, &(buf[0]));
count++;
fwu->has_lcm_data = true;
}
- if (properties_2.has_oem_data) {
+ if (properties_2->has_oem_data) {
retval = synaptics_rmi4_reg_read(rmi4_data,
base + offset + count,
buf,
@@ -2259,7 +2285,7 @@ static int fwu_read_f34_v5v6_queries(void)
dev_err(rmi4_data->pdev->dev.parent,
"%s: Failed to read tddi oem block count\n",
__func__);
- return retval;
+ goto exit;
}
batohs(&fwu->blkcount.tddi_oem_data, &(buf[0]));
fwu->has_oem_data = true;
@@ -2268,8 +2294,10 @@ static int fwu_read_f34_v5v6_queries(void)
}
fwu->has_utility_param = false;
-
- return 0;
+exit:
+ kfree(properties_2);
+ kfree(buf);
+ return retval;
}
static int fwu_read_f34_queries(void)
@@ -2790,7 +2818,6 @@ static int fwu_scan_pdt(void)
bool f01found = false;
bool f34found = false;
bool f35found = false;
- struct synaptics_rmi4_fn_desc rmi_fd;
struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
fwu->in_ub_mode = false;
@@ -2798,38 +2825,38 @@ static int fwu_scan_pdt(void)
for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
retval = synaptics_rmi4_reg_read(rmi4_data,
addr,
- (unsigned char *)&rmi_fd,
- sizeof(rmi_fd));
+ (unsigned char *)&rmi4_data->rmi_fd,
+ sizeof(rmi4_data->rmi_fd));
if (retval < 0)
return retval;
- if (rmi_fd.fn_number) {
+ if (rmi4_data->rmi_fd.fn_number) {
dev_dbg(rmi4_data->pdev->dev.parent,
"%s: Found F%02x\n",
- __func__, rmi_fd.fn_number);
- switch (rmi_fd.fn_number) {
+ __func__, rmi4_data->rmi_fd.fn_number);
+ switch (rmi4_data->rmi_fd.fn_number) {
case SYNAPTICS_RMI4_F01:
f01found = true;
rmi4_data->f01_query_base_addr =
- rmi_fd.query_base_addr;
+ rmi4_data->rmi_fd.query_base_addr;
rmi4_data->f01_ctrl_base_addr =
- rmi_fd.ctrl_base_addr;
+ rmi4_data->rmi_fd.ctrl_base_addr;
rmi4_data->f01_data_base_addr =
- rmi_fd.data_base_addr;
+ rmi4_data->rmi_fd.data_base_addr;
rmi4_data->f01_cmd_base_addr =
- rmi_fd.cmd_base_addr;
+ rmi4_data->rmi_fd.cmd_base_addr;
break;
case SYNAPTICS_RMI4_F34:
f34found = true;
fwu->f34_fd.query_base_addr =
- rmi_fd.query_base_addr;
+ rmi4_data->rmi_fd.query_base_addr;
fwu->f34_fd.ctrl_base_addr =
- rmi_fd.ctrl_base_addr;
+ rmi4_data->rmi_fd.ctrl_base_addr;
fwu->f34_fd.data_base_addr =
- rmi_fd.data_base_addr;
+ rmi4_data->rmi_fd.data_base_addr;
- switch (rmi_fd.fn_version) {
+ switch (rmi4_data->rmi_fd.fn_version) {
case F34_V0:
fwu->bl_version = BL_V5;
break;
@@ -2847,7 +2874,7 @@ static int fwu_scan_pdt(void)
}
fwu->intr_mask = 0;
- intr_src = rmi_fd.intr_src_count;
+ intr_src = rmi4_data->rmi_fd.intr_src_count;
intr_off = intr_count % 8;
for (ii = intr_off;
ii < (intr_src + intr_off);
@@ -2858,20 +2885,20 @@ static int fwu_scan_pdt(void)
case SYNAPTICS_RMI4_F35:
f35found = true;
fwu->f35_fd.query_base_addr =
- rmi_fd.query_base_addr;
+ rmi4_data->rmi_fd.query_base_addr;
fwu->f35_fd.ctrl_base_addr =
- rmi_fd.ctrl_base_addr;
+ rmi4_data->rmi_fd.ctrl_base_addr;
fwu->f35_fd.data_base_addr =
- rmi_fd.data_base_addr;
+ rmi4_data->rmi_fd.data_base_addr;
fwu->f35_fd.cmd_base_addr =
- rmi_fd.cmd_base_addr;
+ rmi4_data->rmi_fd.cmd_base_addr;
break;
}
} else {
break;
}
- intr_count += rmi_fd.intr_src_count;
+ intr_count += rmi4_data->rmi_fd.intr_src_count;
}
if (!f01found || !f34found) {
@@ -5568,7 +5595,7 @@ static int synaptics_rmi4_fwu_init(struct synaptics_rmi4_data *rmi4_data)
{
int retval;
unsigned char attr_count;
- struct pdt_properties pdt_props;
+ struct pdt_properties *pdt_props = NULL;
if (fwu) {
dev_dbg(rmi4_data->pdev->dev.parent,
@@ -5577,6 +5604,12 @@ static int synaptics_rmi4_fwu_init(struct synaptics_rmi4_data *rmi4_data)
return 0;
}
+ pdt_props = kzalloc(sizeof(*pdt_props), GFP_KERNEL);
+ if (!pdt_props) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+
fwu = kzalloc(sizeof(*fwu), GFP_KERNEL);
if (!fwu) {
dev_err(rmi4_data->pdev->dev.parent,
@@ -5599,13 +5632,13 @@ static int synaptics_rmi4_fwu_init(struct synaptics_rmi4_data *rmi4_data)
retval = synaptics_rmi4_reg_read(rmi4_data,
PDT_PROPS,
- pdt_props.data,
- sizeof(pdt_props.data));
+ pdt_props->data,
+ sizeof(pdt_props->data));
if (retval < 0) {
dev_dbg(rmi4_data->pdev->dev.parent,
"%s: Failed to read PDT properties, assuming 0x00\n",
__func__);
- } else if (pdt_props.has_bsr) {
+ } else if (pdt_props->has_bsr) {
dev_err(rmi4_data->pdev->dev.parent,
"%s: Reflash for LTS not currently supported\n",
__func__);
@@ -5697,6 +5730,7 @@ static int synaptics_rmi4_fwu_init(struct synaptics_rmi4_data *rmi4_data)
fwu = NULL;
exit:
+ kfree(pdt_props);
return retval;
}
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c
index 00ea777..9b75990 100644
--- a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c
@@ -273,11 +273,17 @@ static int synaptics_rmi4_i2c_set_page(struct synaptics_rmi4_data *rmi4_data,
{
int retval = 0;
unsigned char retry;
- unsigned char buf[PAGE_SELECT_LEN];
+ unsigned char *buf = NULL;
unsigned char page;
struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
struct i2c_msg msg[2];
+ buf = kcalloc(PAGE_SELECT_LEN, sizeof(char), GFP_KERNEL);
+ if (!buf) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+
msg[0].addr = hw_if.board_data->i2c_addr;
msg[0].flags = 0;
msg[0].len = PAGE_SELECT_LEN;
@@ -308,6 +314,8 @@ static int synaptics_rmi4_i2c_set_page(struct synaptics_rmi4_data *rmi4_data,
retval = PAGE_SELECT_LEN;
}
+exit:
+ kfree(buf);
return retval;
}
@@ -316,7 +324,7 @@ static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data,
{
int retval = 0;
unsigned char retry;
- unsigned char buf;
+ unsigned char *buf = NULL;
unsigned char index = 0;
unsigned char xfer_msgs;
unsigned char remaining_msgs;
@@ -329,6 +337,12 @@ static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data,
mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+ buf = kcalloc(1, sizeof(char), GFP_KERNEL);
+ if (!buf) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+
retval = synaptics_rmi4_i2c_set_page(rmi4_data, addr);
if (retval != PAGE_SELECT_LEN) {
retval = -EIO;
@@ -338,13 +352,13 @@ static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data,
msg[0].addr = hw_if.board_data->i2c_addr;
msg[0].flags = 0;
msg[0].len = 1;
- msg[0].buf = &buf;
+ msg[0].buf = buf;
msg[rd_msgs].addr = hw_if.board_data->i2c_addr;
msg[rd_msgs].flags = I2C_M_RD;
msg[rd_msgs].len = (unsigned short)remaining_length;
msg[rd_msgs].buf = &data[data_offset];
- buf = addr & MASK_8BIT;
+ buf[0] = addr & MASK_8BIT;
remaining_msgs = rd_msgs + 1;
@@ -383,8 +397,8 @@ static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data,
retval = length;
exit:
+ kfree(buf);
mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
-
return retval;
}
@@ -504,10 +518,73 @@ static int check_default_tp(struct device_node *dt, const char *prop)
return ret;
}
+#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX)
+static int synaptics_rmi4_clk_prepare_enable(
+ struct synaptics_rmi4_data *rmi4_data)
+{
+ int ret;
+
+ ret = clk_prepare_enable(rmi4_data->iface_clk);
+ if (ret) {
+ dev_err(rmi4_data->pdev->dev.parent,
+ "error on clk_prepare_enable(iface_clk):%d\n",
+ ret);
+ return ret;
+ }
+ ret = clk_prepare_enable(rmi4_data->core_clk);
+ if (ret) {
+ clk_disable_unprepare(rmi4_data->iface_clk);
+ dev_err(rmi4_data->pdev->dev.parent,
+ "error clk_prepare_enable(core_clk):%d\n", ret);
+ }
+ return ret;
+}
+
+static void synaptics_rmi4_clk_disable_unprepare(
+ struct synaptics_rmi4_data *rmi4_data)
+{
+ clk_disable_unprepare(rmi4_data->core_clk);
+ clk_disable_unprepare(rmi4_data->iface_clk);
+}
+
+static int synaptics_rmi4_i2c_get(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+
+ mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+ retval = pm_runtime_get_sync(i2c->adapter->dev.parent);
+ if (retval >= 0 && rmi4_data->core_clk != NULL &&
+ rmi4_data->iface_clk != NULL) {
+ retval = synaptics_rmi4_clk_prepare_enable(rmi4_data);
+ if (retval)
+ pm_runtime_put_sync(i2c->adapter->dev.parent);
+ }
+ mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+
+ return retval;
+}
+
+static void synaptics_rmi4_i2c_put(struct synaptics_rmi4_data *rmi4_data)
+{
+ struct i2c_client *i2c = to_i2c_client(rmi4_data->pdev->dev.parent);
+
+ mutex_lock(&rmi4_data->rmi4_io_ctrl_mutex);
+ if (rmi4_data->core_clk != NULL && rmi4_data->iface_clk != NULL)
+ synaptics_rmi4_clk_disable_unprepare(rmi4_data);
+ pm_runtime_put_sync(i2c->adapter->dev.parent);
+ mutex_unlock(&rmi4_data->rmi4_io_ctrl_mutex);
+}
+#endif
+
static struct synaptics_dsx_bus_access bus_access = {
.type = BUS_I2C,
.read = synaptics_rmi4_i2c_read,
.write = synaptics_rmi4_i2c_write,
+#if defined(CONFIG_SECURE_TOUCH_SYNAPTICS_DSX)
+ .get = synaptics_rmi4_i2c_get,
+ .put = synaptics_rmi4_i2c_put,
+#endif
};
static void synaptics_rmi4_i2c_dev_release(struct device *dev)
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_test_reporting.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_test_reporting.c
index c17b692..3a0be3c 100755
--- a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_test_reporting.c
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_test_reporting.c
@@ -4673,25 +4673,31 @@ static void test_f55_init(struct synaptics_rmi4_data *rmi4_data)
unsigned char ii;
unsigned char rx_electrodes;
unsigned char tx_electrodes;
- struct f55_control_43 ctrl_43;
+ struct f55_control_43 *ctrl_43 = NULL;
+
+ ctrl_43 = kzalloc(sizeof(*ctrl_43), GFP_KERNEL);
+ if (!ctrl_43) {
+ retval = -ENOMEM;
+ goto exit;
+ }
retval = test_f55_set_queries();
if (retval < 0) {
dev_err(rmi4_data->pdev->dev.parent,
"%s: Failed to read F55 query registers\n",
__func__);
- return;
+ goto exit;
}
if (!f55->query.has_sensor_assignment)
- return;
+ goto exit;
retval = test_f55_set_controls();
if (retval < 0) {
dev_err(rmi4_data->pdev->dev.parent,
"%s: Failed to set up F55 control registers\n",
__func__);
- return;
+ goto exit;
}
tx_electrodes = f55->query.num_of_tx_electrodes;
@@ -4708,7 +4714,7 @@ static void test_f55_init(struct synaptics_rmi4_data *rmi4_data)
dev_err(rmi4_data->pdev->dev.parent,
"%s: Failed to read F55 tx assignment\n",
__func__);
- return;
+ goto exit;
}
retval = synaptics_rmi4_reg_read(rmi4_data,
@@ -4719,7 +4725,7 @@ static void test_f55_init(struct synaptics_rmi4_data *rmi4_data)
dev_err(rmi4_data->pdev->dev.parent,
"%s: Failed to read F55 rx assignment\n",
__func__);
- return;
+ goto exit;
}
f54->tx_assigned = 0;
@@ -4742,17 +4748,17 @@ static void test_f55_init(struct synaptics_rmi4_data *rmi4_data)
if (f55->extended_amp) {
retval = synaptics_rmi4_reg_read(rmi4_data,
f55->control_base_addr + f55->afe_mux_offset,
- ctrl_43.data,
- sizeof(ctrl_43.data));
+ ctrl_43->data,
+ sizeof(ctrl_43->data));
if (retval < 0) {
dev_err(rmi4_data->pdev->dev.parent,
"%s: Failed to read F55 AFE mux sizes\n",
__func__);
- return;
+ goto exit;
}
- f54->tx_assigned = ctrl_43.afe_l_mux_size +
- ctrl_43.afe_r_mux_size;
+ f54->tx_assigned = ctrl_43->afe_l_mux_size +
+ ctrl_43->afe_r_mux_size;
}
/* force mapping */
@@ -4768,7 +4774,7 @@ static void test_f55_init(struct synaptics_rmi4_data *rmi4_data)
dev_err(rmi4_data->pdev->dev.parent,
"%s: Failed to read F55 force tx assignment\n",
__func__);
- return;
+ goto exit;
}
retval = synaptics_rmi4_reg_read(rmi4_data,
@@ -4779,7 +4785,7 @@ static void test_f55_init(struct synaptics_rmi4_data *rmi4_data)
dev_err(rmi4_data->pdev->dev.parent,
"%s: Failed to read F55 force rx assignment\n",
__func__);
- return;
+ goto exit;
}
for (ii = 0; ii < tx_electrodes; ii++) {
@@ -4792,6 +4798,10 @@ static void test_f55_init(struct synaptics_rmi4_data *rmi4_data)
f54->rx_assigned++;
}
}
+
+exit:
+ kfree(ctrl_43);
+ return;
}
static void test_f55_set_regs(struct synaptics_rmi4_data *rmi4_data,
@@ -4981,7 +4991,6 @@ static int test_scan_pdt(void)
unsigned short addr;
bool f54found = false;
bool f55found = false;
- struct synaptics_rmi4_fn_desc rmi_fd;
struct synaptics_rmi4_data *rmi4_data = f54->rmi4_data;
for (page = 0; page < PAGES_TO_SERVICE; page++) {
@@ -4990,30 +4999,31 @@ static int test_scan_pdt(void)
retval = synaptics_rmi4_reg_read(rmi4_data,
addr,
- (unsigned char *)&rmi_fd,
- sizeof(rmi_fd));
+ (unsigned char *)&rmi4_data->rmi_fd,
+ sizeof(rmi4_data->rmi_fd));
if (retval < 0)
return retval;
addr &= ~(MASK_8BIT << 8);
- if (!rmi_fd.fn_number)
+ if (!rmi4_data->rmi_fd.fn_number)
break;
- switch (rmi_fd.fn_number) {
+ switch (rmi4_data->rmi_fd.fn_number) {
case SYNAPTICS_RMI4_F54:
test_f54_set_regs(rmi4_data,
- &rmi_fd, intr_count, page);
+ &rmi4_data->rmi_fd, intr_count,
+ page);
f54found = true;
break;
case SYNAPTICS_RMI4_F55:
test_f55_set_regs(rmi4_data,
- &rmi_fd, page);
+ &rmi4_data->rmi_fd, page);
f55found = true;
break;
case SYNAPTICS_RMI4_F21:
test_f21_set_regs(rmi4_data,
- &rmi_fd, page);
+ &rmi4_data->rmi_fd, page);
break;
default:
break;
@@ -5022,7 +5032,7 @@ static int test_scan_pdt(void)
if (f54found && f55found)
goto pdt_done;
- intr_count += rmi_fd.intr_src_count;
+ intr_count += rmi4_data->rmi_fd.intr_src_count;
}
}
diff --git a/drivers/media/platform/msm/cvp/msm_cvp.c b/drivers/media/platform/msm/cvp/msm_cvp.c
index 417d220..04676c70 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp.c
@@ -10,6 +10,7 @@
struct cvp_power_level {
unsigned long core_sum;
+ unsigned long op_core_sum;
unsigned long bw_sum;
};
@@ -674,7 +675,8 @@ static int msm_cvp_session_receive_hfi(struct msm_cvp_inst *inst,
__func__, inst->session_queue.state,
inst->session_queue.msg_count);
- if (inst->state >= MSM_CVP_CLOSE_DONE) {
+ if (inst->state >= MSM_CVP_CLOSE_DONE ||
+ sq->state != QUEUE_ACTIVE) {
rc = -ECONNRESET;
goto exit;
}
@@ -1070,6 +1072,10 @@ static int msm_cvp_thread_fence_run(void *data)
"%s: Failed in call_hfi_op %d, %x\n",
__func__, in_pkt->pkt_data[0],
in_pkt->pkt_data[1]);
+
+ if (rc == -ECONNRESET)
+ goto exit;
+
synx_state = SYNX_STATE_SIGNALED_ERROR;
}
@@ -1080,6 +1086,10 @@ static int msm_cvp_thread_fence_run(void *data)
dprintk(CVP_ERR,
"%s: wait for signal failed, rc %d\n",
__func__, rc);
+
+ if (rc == -ECONNRESET)
+ goto exit;
+
synx_state = SYNX_STATE_SIGNALED_ERROR;
}
}
@@ -1175,16 +1185,24 @@ static int msm_cvp_thread_fence_run(void *data)
"%s: Failed in call_hfi_op %d, %x\n",
__func__, in_pkt->pkt_data[0],
in_pkt->pkt_data[1]);
+
+ if (rc == -ECONNRESET)
+ goto exit;
+
synx_state = SYNX_STATE_SIGNALED_ERROR;
}
if (synx_state != SYNX_STATE_SIGNALED_ERROR) {
rc = wait_for_sess_signal_receipt(inst,
HAL_SESSION_ICA_FRAME_CMD_DONE);
- if (rc) {
+ if (rc) {
dprintk(CVP_ERR,
"%s: wait for signal failed, rc %d\n",
__func__, rc);
+
+ if (rc == -ECONNRESET)
+ goto exit;
+
synx_state = SYNX_STATE_SIGNALED_ERROR;
}
}
@@ -1246,16 +1264,24 @@ static int msm_cvp_thread_fence_run(void *data)
"%s: Failed in call_hfi_op %d, %x\n",
__func__, in_pkt->pkt_data[0],
in_pkt->pkt_data[1]);
+
+ if (rc == -ECONNRESET)
+ goto exit;
+
synx_state = SYNX_STATE_SIGNALED_ERROR;
}
if (synx_state != SYNX_STATE_SIGNALED_ERROR) {
rc = wait_for_sess_signal_receipt(inst,
HAL_SESSION_FD_FRAME_CMD_DONE);
- if (rc) {
+ if (rc) {
dprintk(CVP_ERR,
"%s: wait for signal failed, rc %d\n",
__func__, rc);
+
+ if (rc == -ECONNRESET)
+ goto exit;
+
synx_state = SYNX_STATE_SIGNALED_ERROR;
}
}
@@ -1433,6 +1459,89 @@ static inline int max_3(unsigned int a, unsigned int b, unsigned int c)
return (a >= b) ? ((a >= c) ? a : c) : ((b >= c) ? b : c);
}
+static bool is_subblock_profile_existed(struct msm_cvp_inst *inst)
+{
+ return (inst->prop.od_cycles ||
+ inst->prop.mpu_cycles ||
+ inst->prop.fdu_cycles);
+}
+
+static void aggregate_power_update(struct msm_cvp_core *core,
+ struct cvp_power_level *nrt_pwr,
+ struct cvp_power_level *rt_pwr,
+ unsigned int max_clk_rate)
+{
+ struct msm_cvp_inst *inst;
+ int i;
+ unsigned long fdu_sum[2] = {0}, od_sum[2] = {0}, mpu_sum[2] = {0};
+ unsigned long ica_sum[2] = {0}, fw_sum[2] = {0};
+ unsigned long op_fdu_max[2] = {0}, op_od_max[2] = {0};
+ unsigned long op_mpu_max[2] = {0}, op_ica_max[2] = {0};
+ unsigned long op_fw_max[2] = {0}, bw_sum[2] = {0}, op_bw_max[2] = {0};
+
+ list_for_each_entry(inst, &core->instances, list) {
+ if (inst->state == MSM_CVP_CORE_INVALID ||
+ inst->state == MSM_CVP_CORE_UNINIT ||
+ !is_subblock_profile_existed(inst))
+ continue;
+ if (inst->prop.priority <= CVP_RT_PRIO_THRESHOLD) {
+ /* Non-realtime session use index 0 */
+ i = 0;
+ } else {
+ i = 1;
+ }
+ fdu_sum[i] += inst->prop.fdu_cycles;
+ od_sum[i] += inst->prop.od_cycles;
+ mpu_sum[i] += inst->prop.mpu_cycles;
+ ica_sum[i] += inst->prop.ica_cycles;
+ fw_sum[i] += inst->prop.fw_cycles;
+ op_fdu_max[i] =
+ (op_fdu_max[i] >= inst->prop.fdu_op_cycles) ?
+ op_fdu_max[i] : inst->prop.fdu_op_cycles;
+ op_od_max[i] =
+ (op_od_max[i] >= inst->prop.od_op_cycles) ?
+ op_od_max[i] : inst->prop.od_op_cycles;
+ op_mpu_max[i] =
+ (op_mpu_max[i] >= inst->prop.mpu_op_cycles) ?
+ op_mpu_max[i] : inst->prop.mpu_op_cycles;
+ op_ica_max[i] =
+ (op_ica_max[i] >= inst->prop.ica_op_cycles) ?
+ op_ica_max[i] : inst->prop.ica_op_cycles;
+ op_fw_max[i] =
+ (op_fw_max[i] >= inst->prop.fw_op_cycles) ?
+ op_fw_max[i] : inst->prop.fw_op_cycles;
+ bw_sum[i] += inst->prop.ddr_bw;
+ op_bw_max[i] =
+ (op_bw_max[i] >= inst->prop.ddr_op_bw) ?
+ op_bw_max[i] : inst->prop.ddr_op_bw;
+ }
+
+ for (i = 0; i < 2; i++) {
+ fdu_sum[i] = max_3(fdu_sum[i], od_sum[i], mpu_sum[i]);
+ fdu_sum[i] = max_3(fdu_sum[i], ica_sum[i], fw_sum[i]);
+
+ op_fdu_max[i] = max_3(op_fdu_max[i], op_od_max[i],
+ op_mpu_max[i]);
+ op_fdu_max[i] = max_3(op_fdu_max[i],
+ op_ica_max[i], op_fw_max[i]);
+ op_fdu_max[i] =
+ (op_fdu_max[i] > max_clk_rate) ?
+ max_clk_rate : op_fdu_max[i];
+ bw_sum[i] = (bw_sum[i] >= op_bw_max[i]) ?
+ bw_sum[i] : op_bw_max[i];
+ }
+
+ nrt_pwr->core_sum += fdu_sum[0];
+ nrt_pwr->op_core_sum = (nrt_pwr->op_core_sum >= op_fdu_max[0]) ?
+ nrt_pwr->op_core_sum : op_fdu_max[0];
+ nrt_pwr->bw_sum += bw_sum[0];
+ rt_pwr->core_sum += fdu_sum[1];
+ rt_pwr->op_core_sum = (rt_pwr->op_core_sum >= op_fdu_max[1]) ?
+ rt_pwr->op_core_sum : op_fdu_max[1];
+ rt_pwr->bw_sum += bw_sum[1];
+}
+
+
static void aggregate_power_request(struct msm_cvp_core *core,
struct cvp_power_level *nrt_pwr,
struct cvp_power_level *rt_pwr,
@@ -1446,7 +1555,8 @@ static void aggregate_power_request(struct msm_cvp_core *core,
list_for_each_entry(inst, &core->instances, list) {
if (inst->state == MSM_CVP_CORE_INVALID ||
- inst->state == MSM_CVP_CORE_UNINIT)
+ inst->state == MSM_CVP_CORE_UNINIT ||
+ is_subblock_profile_existed(inst))
continue;
if (inst->prop.priority <= CVP_RT_PRIO_THRESHOLD) {
/* Non-realtime session use index 0 */
@@ -1479,17 +1589,18 @@ static void aggregate_power_request(struct msm_cvp_core *core,
op_core_max[i] =
(op_core_max[i] > max_clk_rate) ?
max_clk_rate : op_core_max[i];
- core_sum[i] =
- (core_sum[i] >= op_core_max[i]) ?
- core_sum[i] : op_core_max[i];
bw_sum[i] = (bw_sum[i] >= op_bw_max[i]) ?
bw_sum[i] : op_bw_max[i];
}
- nrt_pwr->core_sum = core_sum[0];
- nrt_pwr->bw_sum = bw_sum[0];
- rt_pwr->core_sum = core_sum[1];
- rt_pwr->bw_sum = bw_sum[1];
+ nrt_pwr->core_sum += core_sum[0];
+ nrt_pwr->op_core_sum = (nrt_pwr->op_core_sum >= op_core_max[0]) ?
+ nrt_pwr->op_core_sum : op_core_max[0];
+ nrt_pwr->bw_sum += bw_sum[0];
+ rt_pwr->core_sum += core_sum[1];
+ rt_pwr->op_core_sum = (rt_pwr->op_core_sum >= op_core_max[1]) ?
+ rt_pwr->op_core_sum : op_core_max[1];
+ rt_pwr->bw_sum += bw_sum[1];
}
/**
@@ -1513,8 +1624,8 @@ static int adjust_bw_freqs(void)
struct allowed_clock_rates_table *tbl = NULL;
unsigned int tbl_size;
unsigned int cvp_min_rate, cvp_max_rate, max_bw;
- struct cvp_power_level rt_pwr, nrt_pwr;
- unsigned long tmp, core_sum, bw_sum;
+ struct cvp_power_level rt_pwr = {0}, nrt_pwr = {0};
+ unsigned long tmp, core_sum, op_core_sum, bw_sum;
int i, rc = 0;
core = list_first_entry(&cvp_driver->cores, struct msm_cvp_core, list);
@@ -1530,6 +1641,13 @@ static int adjust_bw_freqs(void)
max_bw = bus->range[1];
aggregate_power_request(core, &nrt_pwr, &rt_pwr, cvp_max_rate);
+ dprintk(CVP_DBG, "PwrReq nrt %u %u rt %u %u\n",
+ nrt_pwr.core_sum, nrt_pwr.op_core_sum,
+ rt_pwr.core_sum, rt_pwr.op_core_sum);
+ aggregate_power_update(core, &nrt_pwr, &rt_pwr, cvp_max_rate);
+ dprintk(CVP_DBG, "PwrUpdate nrt %u %u rt %u %u\n",
+ nrt_pwr.core_sum, nrt_pwr.op_core_sum,
+ rt_pwr.core_sum, rt_pwr.op_core_sum);
if (rt_pwr.core_sum > cvp_max_rate) {
dprintk(CVP_WARN, "%s clk vote out of range %lld\n",
@@ -1538,6 +1656,11 @@ static int adjust_bw_freqs(void)
}
core_sum = rt_pwr.core_sum + nrt_pwr.core_sum;
+ op_core_sum = (rt_pwr.op_core_sum >= nrt_pwr.op_core_sum) ?
+ rt_pwr.op_core_sum : nrt_pwr.op_core_sum;
+
+ core_sum = (core_sum >= op_core_sum) ?
+ core_sum : op_core_sum;
if (core_sum > cvp_max_rate) {
core_sum = cvp_max_rate;
@@ -1622,7 +1745,6 @@ static int msm_cvp_request_power(struct msm_cvp_inst *inst,
inst->power.reserved[0] = div_by_1dot5(inst->power.reserved[0]);
inst->power.reserved[2] = div_by_1dot5(inst->power.reserved[2]);
inst->power.reserved[3] = div_by_1dot5(inst->power.reserved[3]);
- inst->power.reserved[4] = div_by_1dot5(inst->power.reserved[4]);
/* Convert bps to KBps */
inst->power.ddr_bw = inst->power.ddr_bw >> 10;
@@ -1640,6 +1762,33 @@ static int msm_cvp_request_power(struct msm_cvp_inst *inst,
return rc;
}
+static int msm_cvp_update_power(struct msm_cvp_inst *inst)
+
+{ int rc = 0;
+ struct msm_cvp_core *core;
+ struct msm_cvp_inst *s;
+
+ if (!inst) {
+ dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+ return -EINVAL;
+ }
+
+ s = cvp_get_inst_validate(inst->core, inst);
+ if (!s)
+ return -ECONNRESET;
+
+ inst->cur_cmd_type = CVP_KMD_UPDATE_POWER;
+ core = inst->core;
+
+ mutex_lock(&core->lock);
+ rc = adjust_bw_freqs();
+ mutex_unlock(&core->lock);
+ inst->cur_cmd_type = 0;
+ cvp_put_inst(s);
+
+ return rc;
+}
+
static int msm_cvp_register_buffer(struct msm_cvp_inst *inst,
struct cvp_kmd_buffer *buf)
{
@@ -1874,6 +2023,12 @@ static int msm_cvp_set_sysprop(struct msm_cvp_inst *inst,
return -EINVAL;
}
+ if (props->prop_num >= MAX_KMD_PROP_NUM) {
+ dprintk(CVP_ERR, "Too many properties %d to set\n",
+ props->prop_num);
+ return -E2BIG;
+ }
+
prop_array = &arg->data.sys_properties.prop_data;
session_prop = &inst->prop;
@@ -1894,6 +2049,52 @@ static int msm_cvp_set_sysprop(struct msm_cvp_inst *inst,
case CVP_KMD_PROP_SESSION_DSPMASK:
session_prop->dsp_mask = prop_array[i].data;
break;
+ case CVP_KMD_PROP_PWR_FDU:
+ session_prop->fdu_cycles = prop_array[i].data;
+ break;
+ case CVP_KMD_PROP_PWR_ICA:
+ session_prop->ica_cycles =
+ div_by_1dot5(prop_array[i].data);
+ break;
+ case CVP_KMD_PROP_PWR_OD:
+ session_prop->od_cycles = prop_array[i].data;
+ break;
+ case CVP_KMD_PROP_PWR_MPU:
+ session_prop->mpu_cycles = prop_array[i].data;
+ break;
+ case CVP_KMD_PROP_PWR_FW:
+ session_prop->fw_cycles =
+ div_by_1dot5(prop_array[i].data);
+ break;
+ case CVP_KMD_PROP_PWR_DDR:
+ session_prop->ddr_bw = prop_array[i].data;
+ break;
+ case CVP_KMD_PROP_PWR_SYSCACHE:
+ session_prop->ddr_cache = prop_array[i].data;
+ break;
+ case CVP_KMD_PROP_PWR_FDU_OP:
+ session_prop->fdu_op_cycles = prop_array[i].data;
+ break;
+ case CVP_KMD_PROP_PWR_ICA_OP:
+ session_prop->ica_op_cycles =
+ div_by_1dot5(prop_array[i].data);
+ break;
+ case CVP_KMD_PROP_PWR_OD_OP:
+ session_prop->od_op_cycles = prop_array[i].data;
+ break;
+ case CVP_KMD_PROP_PWR_MPU_OP:
+ session_prop->mpu_op_cycles = prop_array[i].data;
+ break;
+ case CVP_KMD_PROP_PWR_FW_OP:
+ session_prop->fw_op_cycles =
+ div_by_1dot5(prop_array[i].data);
+ break;
+ case CVP_KMD_PROP_PWR_DDR_OP:
+ session_prop->ddr_op_bw = prop_array[i].data;
+ break;
+ case CVP_KMD_PROP_PWR_SYSCACHE_OP:
+ session_prop->ddr_op_cache = prop_array[i].data;
+ break;
default:
dprintk(CVP_ERR,
"unrecognized sys property to set %d\n",
@@ -1944,6 +2145,11 @@ int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct cvp_kmd_arg *arg)
rc = msm_cvp_request_power(inst, power);
break;
}
+ case CVP_KMD_UPDATE_POWER:
+ {
+ rc = msm_cvp_update_power(inst);
+ break;
+ }
case CVP_KMD_REGISTER_BUFFER:
{
struct cvp_kmd_buffer *buf =
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_common.c b/drivers/media/platform/msm/cvp/msm_cvp_common.c
index 2fb9850..0422546 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_common.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_common.c
@@ -395,11 +395,25 @@ int wait_for_sess_signal_receipt(struct msm_cvp_inst *inst,
msecs_to_jiffies(
inst->core->resources.msm_cvp_hw_rsp_timeout));
if (!rc) {
+ enum cvp_event_t event;
+ unsigned long flags = 0;
+
dprintk(CVP_WARN, "Wait interrupted or timed out: %d\n",
SESSION_MSG_INDEX(cmd));
call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
dump_hfi_queue(hdev->hfi_device_data);
rc = -EIO;
+
+ spin_lock_irqsave(&inst->event_handler.lock, flags);
+ event = inst->event_handler.event;
+ spin_unlock_irqrestore(
+ &inst->event_handler.lock, flags);
+ if (event == CVP_SSR_EVENT) {
+ dprintk(CVP_WARN, "%s: SSR triggered\n",
+ __func__);
+ rc = -ECONNRESET;
+ }
+
} else {
rc = 0;
}
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_internal.h b/drivers/media/platform/msm/cvp/msm_cvp_internal.h
index 139b322..571a506 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_internal.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_internal.h
@@ -259,6 +259,20 @@ struct cvp_session_prop {
u32 priority;
u32 is_secure;
u32 dsp_mask;
+ u32 fdu_cycles;
+ u32 od_cycles;
+ u32 mpu_cycles;
+ u32 ica_cycles;
+ u32 fw_cycles;
+ u32 fdu_op_cycles;
+ u32 od_op_cycles;
+ u32 mpu_op_cycles;
+ u32 ica_op_cycles;
+ u32 fw_op_cycles;
+ u32 ddr_bw;
+ u32 ddr_op_bw;
+ u32 ddr_cache;
+ u32 ddr_op_cache;
};
enum cvp_event_t {
diff --git a/drivers/media/platform/msm/cvp/msm_v4l2_private.c b/drivers/media/platform/msm/cvp/msm_v4l2_private.c
index 4deafae..b2ad749 100644
--- a/drivers/media/platform/msm/cvp/msm_v4l2_private.c
+++ b/drivers/media/platform/msm/cvp/msm_v4l2_private.c
@@ -423,6 +423,8 @@ static int convert_from_user(struct cvp_kmd_arg *kp,
}
break;
}
+ case CVP_KMD_UPDATE_POWER:
+ break;
default:
dprintk(CVP_ERR, "%s: unknown cmd type 0x%x\n",
__func__, kp->type);
@@ -594,6 +596,8 @@ static int convert_to_user(struct cvp_kmd_arg *kp, unsigned long arg)
}
case CVP_KMD_SET_SYS_PROPERTY:
break;
+ case CVP_KMD_UPDATE_POWER:
+ break;
default:
dprintk(CVP_ERR, "%s: unknown cmd type 0x%x\n",
__func__, kp->type);
diff --git a/drivers/media/platform/msm/npu/npu_common.h b/drivers/media/platform/msm/npu/npu_common.h
index 219041e..885ea54 100644
--- a/drivers/media/platform/msm/npu/npu_common.h
+++ b/drivers/media/platform/msm/npu/npu_common.h
@@ -48,6 +48,8 @@
#define NPU_MAX_PATCH_NUM 160
#define NPU_MAX_BW_DEVS 4
+#define PERF_MODE_DEFAULT 0
+
enum npu_power_level {
NPU_PWRLEVEL_MINSVS = 0,
NPU_PWRLEVEL_LOWSVS,
@@ -164,6 +166,8 @@ struct npu_reg {
* @uc_pwrlevel - power level from user driver setting
* @perf_mode_override - perf mode from sysfs to override perf mode
* settings from user driver
+ * @dcvs_mode - dcvs mode from sysfs to turn on dcvs mode
+ * settings from user driver
* @devbw - bw device
*/
struct npu_pwrctrl {
@@ -183,6 +187,8 @@ struct npu_pwrctrl {
uint32_t cdsprm_pwrlevel;
uint32_t fmax_pwrlevel;
uint32_t perf_mode_override;
+ uint32_t dcvs_mode;
+ uint32_t cur_dcvs_activity;
};
/**
diff --git a/drivers/media/platform/msm/npu/npu_debugfs.c b/drivers/media/platform/msm/npu/npu_debugfs.c
index 4c46f77..51866c8 100644
--- a/drivers/media/platform/msm/npu/npu_debugfs.c
+++ b/drivers/media/platform/msm/npu/npu_debugfs.c
@@ -26,8 +26,6 @@
static int npu_debug_open(struct inode *inode, struct file *file);
static int npu_debug_release(struct inode *inode, struct file *file);
static int npu_debug_reg_release(struct inode *inode, struct file *file);
-static ssize_t npu_debug_reg_write(struct file *file,
- const char __user *user_buf, size_t count, loff_t *ppos);
static ssize_t npu_debug_reg_read(struct file *file,
char __user *user_buf, size_t count, loff_t *ppos);
static ssize_t npu_debug_off_write(struct file *file,
@@ -43,13 +41,12 @@ static ssize_t npu_debug_ctrl_write(struct file *file,
* Variables
* -------------------------------------------------------------------------
*/
-struct npu_device *g_npu_dev;
+static struct npu_device *g_npu_dev;
static const struct file_operations npu_reg_fops = {
.open = npu_debug_open,
.release = npu_debug_reg_release,
.read = npu_debug_reg_read,
- .write = npu_debug_reg_write,
};
static const struct file_operations npu_off_fops = {
@@ -107,41 +104,6 @@ static int npu_debug_reg_release(struct inode *inode, struct file *file)
* Function Implementations - Reg Read/Write
* -------------------------------------------------------------------------
*/
-static ssize_t npu_debug_reg_write(struct file *file,
- const char __user *user_buf, size_t count, loff_t *ppos)
-{
- size_t off;
- uint32_t data, cnt;
- struct npu_device *npu_dev = file->private_data;
- char buf[24];
-
- if (count >= sizeof(buf))
- return -EINVAL;
-
- if (copy_from_user(buf, user_buf, count))
- return -EFAULT;
-
- buf[count] = 0; /* end of string */
-
- cnt = sscanf(buf, "%zx %x", &off, &data);
- NPU_DBG("%s 0x%zx, 0x%08x\n", buf, off, data);
-
- return count;
- if (cnt < 2)
- return -EINVAL;
-
- if (npu_enable_core_power(npu_dev))
- return -EPERM;
-
- REGW(npu_dev, off, data);
-
- npu_disable_core_power(npu_dev);
-
- NPU_DBG("write: addr=%zx data=%x\n", off, data);
-
- return count;
-}
-
static ssize_t npu_debug_reg_read(struct file *file,
char __user *user_buf, size_t count, loff_t *ppos)
{
@@ -261,6 +223,7 @@ static ssize_t npu_debug_off_read(struct file *file,
len = scnprintf(buf, sizeof(buf), "offset=0x%08x cnt=%d\n",
debugfs->reg_off, debugfs->reg_cnt);
+ len = min(len, count);
if (copy_to_user(user_buf, buf, len)) {
NPU_ERR("failed to copy to user\n");
@@ -290,49 +253,21 @@ static ssize_t npu_debug_log_read(struct file *file,
mutex_lock(&debugfs->log_lock);
if (debugfs->log_num_bytes_buffered != 0) {
- if ((debugfs->log_read_index +
- debugfs->log_num_bytes_buffered) >
- debugfs->log_buf_size) {
- /* Wrap around case */
- uint32_t remaining_to_end = debugfs->log_buf_size -
- debugfs->log_read_index;
- uint8_t *src_addr = debugfs->log_buf +
- debugfs->log_read_index;
- uint8_t *dst_addr = user_buf;
-
- if (copy_to_user(dst_addr, src_addr,
- remaining_to_end)) {
- NPU_ERR("failed to copy to user\n");
- mutex_unlock(&debugfs->log_lock);
- return -EFAULT;
- }
- src_addr = debugfs->log_buf;
- dst_addr = user_buf + remaining_to_end;
- if (copy_to_user(dst_addr, src_addr,
- debugfs->log_num_bytes_buffered -
- remaining_to_end)) {
- NPU_ERR("failed to copy to user\n");
- mutex_unlock(&debugfs->log_lock);
- return -EFAULT;
- }
- debugfs->log_read_index =
- debugfs->log_num_bytes_buffered -
- remaining_to_end;
- } else {
- if (copy_to_user(user_buf, (debugfs->log_buf +
- debugfs->log_read_index),
- debugfs->log_num_bytes_buffered)) {
- NPU_ERR("failed to copy to user\n");
- mutex_unlock(&debugfs->log_lock);
- return -EFAULT;
- }
- debugfs->log_read_index +=
- debugfs->log_num_bytes_buffered;
- if (debugfs->log_read_index == debugfs->log_buf_size)
- debugfs->log_read_index = 0;
+ len = min(debugfs->log_num_bytes_buffered,
+ debugfs->log_buf_size - debugfs->log_read_index);
+ len = min(count, len);
+ if (copy_to_user(user_buf, (debugfs->log_buf +
+ debugfs->log_read_index), len)) {
+ NPU_ERR("failed to copy to user\n");
+ mutex_unlock(&debugfs->log_lock);
+ return -EFAULT;
}
- len = debugfs->log_num_bytes_buffered;
- debugfs->log_num_bytes_buffered = 0;
+ debugfs->log_read_index += len;
+ if (debugfs->log_read_index == debugfs->log_buf_size)
+ debugfs->log_read_index = 0;
+
+ debugfs->log_num_bytes_buffered -= len;
+ *ppos += len;
}
/* mutex log unlock */
diff --git a/drivers/media/platform/msm/npu/npu_dev.c b/drivers/media/platform/msm/npu/npu_dev.c
index b94bc4d..b37564e 100644
--- a/drivers/media/platform/msm/npu/npu_dev.c
+++ b/drivers/media/platform/msm/npu/npu_dev.c
@@ -30,7 +30,6 @@
#define DDR_MAPPED_START_ADDR 0x80000000
#define DDR_MAPPED_SIZE 0x60000000
-#define PERF_MODE_DEFAULT 0
#define MBOX_OP_TIMEOUTMS 1000
/* -------------------------------------------------------------------------
@@ -58,6 +57,12 @@ static ssize_t perf_mode_override_show(struct device *dev,
static ssize_t perf_mode_override_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count);
+static ssize_t dcvs_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+static ssize_t dcvs_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
static ssize_t boot_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count);
@@ -85,6 +90,11 @@ static int npu_exec_network_v2(struct npu_client *client,
unsigned long arg);
static int npu_receive_event(struct npu_client *client,
unsigned long arg);
+static int npu_set_fw_state(struct npu_client *client, uint32_t enable);
+static int npu_set_property(struct npu_client *client,
+ unsigned long arg);
+static int npu_get_property(struct npu_client *client,
+ unsigned long arg);
static long npu_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
static unsigned int npu_poll(struct file *filp, struct poll_table_struct *p);
@@ -155,11 +165,13 @@ static DEVICE_ATTR_RO(caps);
static DEVICE_ATTR_RW(pwr);
static DEVICE_ATTR_RW(perf_mode_override);
static DEVICE_ATTR_WO(boot);
+static DEVICE_ATTR_RW(dcvs_mode);
static struct attribute *npu_fs_attrs[] = {
&dev_attr_caps.attr,
&dev_attr_pwr.attr,
&dev_attr_perf_mode_override.attr,
+ &dev_attr_dcvs_mode.attr,
&dev_attr_boot.attr,
NULL
};
@@ -278,8 +290,8 @@ static ssize_t perf_mode_override_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct npu_client client;
struct npu_device *npu_dev = dev_get_drvdata(dev);
- struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
uint32_t val;
int rc;
@@ -290,15 +302,56 @@ static ssize_t perf_mode_override_store(struct device *dev,
}
val = min(val, npu_dev->pwrctrl.num_pwrlevels);
- mutex_lock(&host_ctx->lock);
- npu_dev->pwrctrl.perf_mode_override = val;
- NPU_INFO("setting uc_pwrlevel_override to %d\n", val);
- npu_set_power_level(npu_dev, true);
- mutex_unlock(&host_ctx->lock);
+ NPU_INFO("setting perf mode to %d\n", val);
+ client.npu_dev = npu_dev;
+ npu_host_set_perf_mode(&client, 0, val);
return count;
}
+static ssize_t dcvs_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct npu_device *npu_dev = dev_get_drvdata(dev);
+ struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", pwr->dcvs_mode);
+}
+
+static ssize_t dcvs_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct npu_device *npu_dev = dev_get_drvdata(dev);
+ struct msm_npu_property prop;
+ uint32_t val;
+ int ret = 0;
+
+ ret = kstrtou32(buf, 10, &val);
+ if (ret) {
+ NPU_ERR("Invalid input for dcvs mode setting\n");
+ return -EINVAL;
+ }
+
+ val = min(val, (uint32_t)(npu_dev->pwrctrl.num_pwrlevels - 1));
+ NPU_DBG("sysfs: setting dcvs_mode to %d\n", val);
+
+ prop.prop_id = MSM_NPU_PROP_ID_DCVS_MODE;
+ prop.num_of_params = 1;
+ prop.network_hdl = 0;
+ prop.prop_param[0] = val;
+
+ ret = npu_host_set_fw_property(npu_dev, &prop);
+ if (ret) {
+ NPU_ERR("npu_host_set_fw_property failed %d\n", ret);
+ return ret;
+ }
+
+ npu_dev->pwrctrl.dcvs_mode = val;
+
+ return count;
+}
/* -------------------------------------------------------------------------
* SysFS - npu_boot
* -------------------------------------------------------------------------
@@ -386,6 +439,7 @@ void npu_disable_core_power(struct npu_device *npu_dev)
pwr->active_pwrlevel = pwr->default_pwrlevel;
pwr->uc_pwrlevel = pwr->max_pwrlevel;
pwr->cdsprm_pwrlevel = pwr->max_pwrlevel;
+ pwr->cur_dcvs_activity = pwr->num_pwrlevels;
NPU_DBG("setting back to power level=%d\n",
pwr->active_pwrlevel);
}
@@ -446,14 +500,6 @@ static uint32_t npu_calc_power_level(struct npu_device *npu_dev)
uint32_t uc_pwr_level = npu_dev->pwrctrl.uc_pwrlevel;
/*
- * if perf_mode_override is not 0, use it to override
- * uc_pwrlevel
- */
- if (npu_dev->pwrctrl.perf_mode_override > 0)
- uc_pwr_level = npu_power_level_from_index(npu_dev,
- npu_dev->pwrctrl.perf_mode_override - 1);
-
- /*
* pick the lowese power level between thermal power and usecase power
* settings
*/
@@ -559,11 +605,8 @@ int npu_set_uc_power_level(struct npu_device *npu_dev,
struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
uint32_t uc_pwrlevel_to_set;
- if (perf_mode == PERF_MODE_DEFAULT)
- uc_pwrlevel_to_set = pwr->default_pwrlevel;
- else
- uc_pwrlevel_to_set = npu_power_level_from_index(npu_dev,
- perf_mode - 1);
+ uc_pwrlevel_to_set = npu_power_level_from_index(npu_dev,
+ perf_mode - 1);
if (uc_pwrlevel_to_set > pwr->max_pwrlevel)
uc_pwrlevel_to_set = pwr->max_pwrlevel;
@@ -1327,6 +1370,43 @@ static int npu_receive_event(struct npu_client *client,
return ret;
}
+static int npu_set_fw_state(struct npu_client *client, uint32_t enable)
+{
+ struct npu_device *npu_dev = client->npu_dev;
+ struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+ int rc = 0;
+
+ if (host_ctx->network_num > 0) {
+ NPU_ERR("Need to unload network first\n");
+ mutex_unlock(&npu_dev->dev_lock);
+ return -EINVAL;
+ }
+
+ if (enable) {
+ NPU_DBG("enable fw\n");
+ rc = enable_fw(npu_dev);
+ if (rc) {
+ NPU_ERR("enable fw failed\n");
+ } else {
+ host_ctx->npu_init_cnt++;
+ NPU_DBG("npu_init_cnt %d\n",
+ host_ctx->npu_init_cnt);
+ /* set npu to lowest power level */
+ if (npu_set_uc_power_level(npu_dev, 1))
+ NPU_WARN("Failed to set uc power level\n");
+ }
+ } else if (host_ctx->npu_init_cnt > 0) {
+ NPU_DBG("disable fw\n");
+ disable_fw(npu_dev);
+ host_ctx->npu_init_cnt--;
+ NPU_DBG("npu_init_cnt %d\n", host_ctx->npu_init_cnt);
+ } else {
+ NPU_ERR("can't disable fw %d\n", host_ctx->npu_init_cnt);
+ }
+
+ return rc;
+}
+
static int npu_set_property(struct npu_client *client,
unsigned long arg)
{
@@ -1341,9 +1421,19 @@ static int npu_set_property(struct npu_client *client,
}
switch (prop.prop_id) {
+ case MSM_NPU_PROP_ID_FW_STATE:
+ ret = npu_set_fw_state(client,
+ (uint32_t)prop.prop_param[0]);
+ break;
+ case MSM_NPU_PROP_ID_PERF_MODE:
+ ret = npu_host_set_perf_mode(client,
+ (uint32_t)prop.network_hdl,
+ (uint32_t)prop.prop_param[0]);
+ break;
default:
- NPU_ERR("Not supported property %d\n", prop.prop_id);
- ret = -EINVAL;
+ ret = npu_host_set_fw_property(client->npu_dev, &prop);
+ if (ret)
+ NPU_ERR("npu_host_set_fw_property failed\n");
break;
}
@@ -1369,6 +1459,10 @@ static int npu_get_property(struct npu_client *client,
case MSM_NPU_PROP_ID_FW_STATE:
prop.prop_param[0] = host_ctx->fw_state;
break;
+ case MSM_NPU_PROP_ID_PERF_MODE:
+ prop.prop_param[0] = npu_host_get_perf_mode(client,
+ (uint32_t)prop.network_hdl);
+ break;
case MSM_NPU_PROP_ID_PERF_MODE_MAX:
prop.prop_param[0] = npu_dev->pwrctrl.num_pwrlevels;
break;
@@ -1379,13 +1473,17 @@ static int npu_get_property(struct npu_client *client,
prop.prop_param[0] = npu_dev->hw_version;
break;
default:
- NPU_ERR("Not supported property %d\n", prop.prop_id);
- return -EINVAL;
+ ret = npu_host_get_fw_property(client->npu_dev, &prop);
+ if (ret) {
+ NPU_ERR("npu_host_set_fw_property failed\n");
+ return ret;
+ }
+ break;
}
ret = copy_to_user(argp, &prop, sizeof(prop));
if (ret) {
- pr_err("fail to copy to user\n");
+ NPU_ERR("fail to copy to user\n");
return -EFAULT;
}
@@ -1745,6 +1843,7 @@ static int npu_of_parse_pwrlevels(struct npu_device *npu_dev,
pwr->uc_pwrlevel = pwr->max_pwrlevel;
pwr->perf_mode_override = 0;
pwr->cdsprm_pwrlevel = pwr->max_pwrlevel;
+ pwr->cur_dcvs_activity = pwr->num_pwrlevels;
return 0;
}
diff --git a/drivers/media/platform/msm/npu/npu_host_ipc.h b/drivers/media/platform/msm/npu/npu_host_ipc.h
index 991d769..2a336c2 100644
--- a/drivers/media/platform/msm/npu/npu_host_ipc.h
+++ b/drivers/media/platform/msm/npu/npu_host_ipc.h
@@ -33,6 +33,10 @@
#define NPU_IPC_CMD_EXECUTE_V2 0x0000000A
/* npu_ipc_cmd_notify_pwr_packet_t */
#define NPU_IPC_CMD_NOTIFY_PWR 0x0000000B
+/* ipc_cmd_set_property_packet */
+#define NPU_IPC_CMD_SET_PROPERTY 0x0000000C
+/* ipc_cmd_get_property_packet */
+#define NPU_IPC_CMD_GET_PROPERTY 0x0000000D
/* Messages sent **from** NPU */
/* IPC Message Response -- uint32_t */
@@ -52,6 +56,15 @@
#define NPU_IPC_MSG_EXECUTE_V2_DONE 0x00010006
/* struct ipc_msg_notify_pwr_pkt */
#define NPU_IPC_MSG_NOTIFY_PWR_DONE 0x00010007
+/* ipc_msg_set_property_packet */
+#define NPU_IPC_MSG_SET_PROPERTY_DONE 0x00010008
+/* ipc_msg_get_property_packet */
+#define NPU_IPC_MSG_GET_PROPERTY_DONE 0x00010009
+/* ipc_msg_general_notify_pkt */
+#define NPU_IPC_MSG_GENERAL_NOTIFY 0x00010010
+
+/* IPC Notify Message Type -- uint32_t */
+#define NPU_NOTIFY_DCVS_MODE 0x00002000
/* Logging message size */
/* Number 32-bit elements for the maximum log message size */
@@ -100,6 +113,9 @@
/* Debug stats */
#define NUM_LAYER_STATS_PER_EXE_MSG_MAX 110
+/* DCVS */
+#define NPU_DCVS_ACTIVITY_MAX_PERF 0x100
+
/* -------------------------------------------------------------------------
* Data Structures
* -------------------------------------------------------------------------
@@ -269,6 +285,40 @@ struct ipc_cmd_loopback_pkt {
};
/*
+ * Generic property definition
+ */
+struct ipc_cmd_prop_pkt {
+ struct ipc_cmd_header_pkt header;
+ uint32_t prop_id;
+ uint32_t num_params;
+ uint32_t network_hdl;
+ uint32_t prop_param[0];
+};
+
+/*
+ * Generic property response packet definition
+ */
+struct ipc_msg_prop_pkt {
+ struct ipc_msg_header_pkt header;
+ uint32_t prop_id;
+ uint32_t num_params;
+ uint32_t network_hdl;
+ uint32_t prop_param[0];
+};
+
+/*
+ * Generic notify message packet definition
+ */
+struct ipc_msg_general_notify_pkt {
+ struct ipc_msg_header_pkt header;
+ uint32_t notify_id;
+ uint32_t num_params;
+ uint32_t network_hdl;
+ uint32_t notify_param[0];
+};
+
+
+/*
* LOAD response packet definition
*/
struct ipc_msg_load_pkt {
diff --git a/drivers/media/platform/msm/npu/npu_hw_access.c b/drivers/media/platform/msm/npu/npu_hw_access.c
index 9288045..cef5a96 100644
--- a/drivers/media/platform/msm/npu/npu_hw_access.c
+++ b/drivers/media/platform/msm/npu/npu_hw_access.c
@@ -20,67 +20,93 @@
* Functions - Register
* -------------------------------------------------------------------------
*/
+static uint32_t npu_reg_read(void __iomem *base, size_t size, uint32_t off)
+{
+ if (!base) {
+ NPU_ERR("NULL base address\n");
+ return 0;
+ }
+
+ if ((off % 4) != 0) {
+ NPU_ERR("offset %x is not aligned\n", off);
+ return 0;
+ }
+
+ if (off >= size) {
+ NPU_ERR("offset exceeds io region %x:%x\n", off, size);
+ return 0;
+ }
+
+ return readl_relaxed(base + off);
+}
+
+static void npu_reg_write(void __iomem *base, size_t size, uint32_t off,
+ uint32_t val)
+{
+ if (!base) {
+ NPU_ERR("NULL base address\n");
+ return;
+ }
+
+ if ((off % 4) != 0) {
+ NPU_ERR("offset %x is not aligned\n", off);
+ return;
+ }
+
+ if (off >= size) {
+ NPU_ERR("offset exceeds io region %x:%x\n", off, size);
+ return;
+ }
+
+ writel_relaxed(val, base + off);
+ __iowmb();
+}
+
uint32_t npu_core_reg_read(struct npu_device *npu_dev, uint32_t off)
{
- uint32_t ret = 0;
-
- ret = readl(npu_dev->core_io.base + off);
- return ret;
+ return npu_reg_read(npu_dev->core_io.base, npu_dev->core_io.size, off);
}
void npu_core_reg_write(struct npu_device *npu_dev, uint32_t off, uint32_t val)
{
- writel_relaxed(val, npu_dev->core_io.base + off);
- __iowmb();
+ npu_reg_write(npu_dev->core_io.base, npu_dev->core_io.size,
+ off, val);
}
uint32_t npu_tcsr_reg_read(struct npu_device *npu_dev, uint32_t off)
{
- uint32_t ret = 0;
-
- ret = readl_relaxed(npu_dev->tcsr_io.base + off);
- return ret;
+ return npu_reg_read(npu_dev->tcsr_io.base, npu_dev->tcsr_io.size, off);
}
uint32_t npu_apss_shared_reg_read(struct npu_device *npu_dev, uint32_t off)
{
- uint32_t ret = 0;
-
- ret = readl(npu_dev->apss_shared_io.base + off);
- return ret;
+ return npu_reg_read(npu_dev->apss_shared_io.base,
+ npu_dev->apss_shared_io.size, off);
}
void npu_apss_shared_reg_write(struct npu_device *npu_dev, uint32_t off,
uint32_t val)
{
- writel_relaxed(val, npu_dev->apss_shared_io.base + off);
- __iowmb();
+ npu_reg_write(npu_dev->apss_shared_io.base,
+ npu_dev->apss_shared_io.size, off, val);
}
uint32_t npu_cc_reg_read(struct npu_device *npu_dev, uint32_t off)
{
- uint32_t ret = 0;
-
- ret = readl_relaxed(npu_dev->cc_io.base + off);
-
- return ret;
+ return npu_reg_read(npu_dev->cc_io.base, npu_dev->cc_io.size, off);
}
void npu_cc_reg_write(struct npu_device *npu_dev, uint32_t off,
uint32_t val)
{
- writel_relaxed(val, npu_dev->cc_io.base + off);
- __iowmb();
+ npu_reg_write(npu_dev->cc_io.base, npu_dev->cc_io.size,
+ off, val);
}
uint32_t npu_qfprom_reg_read(struct npu_device *npu_dev, uint32_t off)
{
- uint32_t ret = 0;
-
- if (npu_dev->qfprom_io.base)
- ret = readl(npu_dev->qfprom_io.base + off);
-
- return ret;
+ return npu_reg_read(npu_dev->qfprom_io.base,
+ npu_dev->qfprom_io.size, off);
}
/* -------------------------------------------------------------------------
@@ -96,6 +122,13 @@ void npu_mem_write(struct npu_device *npu_dev, void *dst, void *src,
uint32_t i = 0;
uint32_t num = 0;
+ if (dst_off >= npu_dev->tcm_io.size ||
+ (npu_dev->tcm_io.size - dst_off) < size) {
+ NPU_ERR("memory write exceeds io region %x:%x:%x\n",
+ dst_off, size, npu_dev->tcm_io.size);
+ return;
+ }
+
num = size/4;
for (i = 0; i < num; i++) {
writel_relaxed(src_ptr32[i], npu_dev->tcm_io.base + dst_off);
@@ -122,6 +155,13 @@ int32_t npu_mem_read(struct npu_device *npu_dev, void *src, void *dst,
uint32_t i = 0;
uint32_t num = 0;
+ if (src_off >= npu_dev->tcm_io.size ||
+ (npu_dev->tcm_io.size - src_off) < size) {
+ NPU_ERR("memory read exceeds io region %x:%x:%x\n",
+ src_off, size, npu_dev->tcm_io.size);
+ return 0;
+ }
+
num = size/4;
for (i = 0; i < num; i++) {
out32[i] = readl_relaxed(npu_dev->tcm_io.base + src_off);
diff --git a/drivers/media/platform/msm/npu/npu_mgr.c b/drivers/media/platform/msm/npu/npu_mgr.c
index 0223496..7963e1d 100644
--- a/drivers/media/platform/msm/npu/npu_mgr.c
+++ b/drivers/media/platform/msm/npu/npu_mgr.c
@@ -25,7 +25,7 @@
#define LOG_MSG_TOTAL_SIZE_INDEX 0
#define LOG_MSG_MSG_ID_INDEX 1
-#define NPU_FW_TIMEOUT_POLL_INTERVAL_MS 20
+#define NPU_FW_TIMEOUT_POLL_INTERVAL_MS 10
#define NPU_FW_TIMEOUT_MS 5000
/* -------------------------------------------------------------------------
@@ -39,7 +39,7 @@ static void npu_disable_fw_work(struct work_struct *work);
static void npu_update_pwr_work(struct work_struct *work);
static void turn_off_fw_logging(struct npu_device *npu_dev);
static int wait_for_status_ready(struct npu_device *npu_dev,
- uint32_t status_reg, uint32_t status_bits);
+ uint32_t status_reg, uint32_t status_bits, bool poll);
static int wait_npu_cpc_power_off(struct npu_device *npu_dev);
static struct npu_network *alloc_network(struct npu_host_ctx *ctx,
struct npu_client *client);
@@ -66,6 +66,7 @@ static int npu_notify_fw_pwr_state(struct npu_device *npu_dev,
uint32_t pwr_level, bool post);
static int load_fw_nolock(struct npu_device *npu_dev, bool enable);
static void disable_fw_nolock(struct npu_device *npu_dev);
+static int update_dcvs_activity(struct npu_device *npu_dev, uint32_t activity);
/* -------------------------------------------------------------------------
* Function Definitions - Init / Deinit
@@ -79,17 +80,21 @@ static int wait_npu_cpc_power_off(struct npu_device *npu_dev)
max_wait_ms = NPU_FW_TIMEOUT_MS;
- while (reg_val & NPU_CPC_PWR_ON) {
+ do {
+ reg_val = npu_tcsr_reg_read(npu_dev, TCSR_NPU_CPC_PWR_ON);
+ if (!(reg_val & NPU_CPC_PWR_ON)) {
+ NPU_DBG("npu cpc powers off\n");
+ break;
+ }
+
wait_cnt += NPU_FW_TIMEOUT_POLL_INTERVAL_MS;
- if (wait_cnt >= max_wait_ms) {
+ if (wait_cnt > max_wait_ms) {
NPU_ERR("timeout wait for cpc power off\n");
return -EPERM;
}
msleep(NPU_FW_TIMEOUT_POLL_INTERVAL_MS);
- reg_val = npu_tcsr_reg_read(npu_dev, TCSR_NPU_CPC_PWR_ON);
- };
+ } while (1);
- NPU_DBG("npu cpc powers off\n");
return 0;
}
@@ -123,7 +128,7 @@ static int load_fw_nolock(struct npu_device *npu_dev, bool enable)
/* Keep reading ctrl status until NPU is ready */
if (wait_for_status_ready(npu_dev, REG_NPU_FW_CTRL_STATUS,
- FW_CTRL_STATUS_MAIN_THREAD_READY_VAL)) {
+ FW_CTRL_STATUS_MAIN_THREAD_READY_VAL, false)) {
ret = -EPERM;
goto load_fw_fail;
}
@@ -455,7 +460,7 @@ static int npu_notify_fw_pwr_state(struct npu_device *npu_dev,
NPU_ERR("NPU_IPC_CMD_NOTIFY_PWR sent failed: %d\n", ret);
} else {
ret = wait_for_status_ready(npu_dev, REG_NPU_FW_CTRL_STATUS,
- FW_CTRL_STATUS_PWR_NOTIFY_DONE_VAL);
+ FW_CTRL_STATUS_PWR_NOTIFY_DONE_VAL, true);
if (!ret) {
reg_val = REGR(npu_dev, REG_NPU_FW_CTRL_STATUS);
if (reg_val & FW_CTRL_STATUS_PWR_NOTIFY_ERR_VAL) {
@@ -622,8 +627,10 @@ int npu_host_init(struct npu_device *npu_dev)
goto fail;
}
- host_ctx->wq = create_workqueue("npu_irq_hdl");
- if (!host_ctx->wq) {
+ host_ctx->wq = create_workqueue("npu_general_wq");
+ host_ctx->wq_pri =
+ alloc_workqueue("npu_ipc_wq", WQ_HIGHPRI | WQ_UNBOUND, 0);
+ if (!host_ctx->wq || !host_ctx->wq_pri) {
sts = -EPERM;
goto fail;
} else {
@@ -643,12 +650,22 @@ int npu_host_init(struct npu_device *npu_dev)
goto fail;
}
+ host_ctx->prop_buf = kzalloc(sizeof(struct msm_npu_property),
+ GFP_KERNEL);
+ if (!host_ctx->prop_buf) {
+ sts = -ENOMEM;
+ goto fail;
+ }
+
host_ctx->auto_pil_disable = false;
return sts;
fail:
+ kfree(host_ctx->ipc_msg_buf);
if (host_ctx->wq)
destroy_workqueue(host_ctx->wq);
+ if (host_ctx->wq_pri)
+ destroy_workqueue(host_ctx->wq_pri);
if (host_ctx->notif_hdle)
subsys_notif_unregister_notifier(host_ctx->notif_hdle,
&host_ctx->nb);
@@ -660,8 +677,10 @@ void npu_host_deinit(struct npu_device *npu_dev)
{
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+ kfree(host_ctx->prop_buf);
kfree(host_ctx->ipc_msg_buf);
destroy_workqueue(host_ctx->wq);
+ destroy_workqueue(host_ctx->wq_pri);
subsys_notif_unregister_notifier(host_ctx->notif_hdle, &host_ctx->nb);
mutex_destroy(&host_ctx->lock);
}
@@ -679,7 +698,7 @@ irqreturn_t npu_ipc_intr_hdlr(int irq, void *ptr)
/* Check that the event thread currently is running */
if (host_ctx->wq)
- queue_work(host_ctx->wq, &host_ctx->ipc_irq_work);
+ queue_work(host_ctx->wq_pri, &host_ctx->ipc_irq_work);
return IRQ_HANDLED;
}
@@ -728,7 +747,7 @@ irqreturn_t npu_err_intr_hdlr(int irq, void *ptr)
NPU_ERR("err_irq_sts %x\n", host_ctx->err_irq_sts);
if (host_ctx->wq)
- queue_work(host_ctx->wq, &host_ctx->wdg_err_irq_work);
+ queue_work(host_ctx->wq_pri, &host_ctx->wdg_err_irq_work);
return IRQ_HANDLED;
}
@@ -743,7 +762,7 @@ irqreturn_t npu_wdg_intr_hdlr(int irq, void *ptr)
NPU_ERR("wdg_irq_sts %x\n", host_ctx->wdg_irq_sts);
if (host_ctx->wq)
- queue_work(host_ctx->wq, &host_ctx->wdg_err_irq_work);
+ queue_work(host_ctx->wq_pri, &host_ctx->wdg_err_irq_work);
return IRQ_HANDLED;
}
@@ -813,7 +832,7 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force)
/* Keep reading ctrl status until NPU is ready */
if (wait_for_status_ready(npu_dev, REG_NPU_FW_CTRL_STATUS,
- FW_CTRL_STATUS_MAIN_THREAD_READY_VAL)) {
+ FW_CTRL_STATUS_MAIN_THREAD_READY_VAL, false)) {
NPU_ERR("wait for fw status ready timedout\n");
ret = -EPERM;
goto fw_start_done;
@@ -997,7 +1016,7 @@ static void turn_off_fw_logging(struct npu_device *npu_dev)
}
static int wait_for_status_ready(struct npu_device *npu_dev,
- uint32_t status_reg, uint32_t status_bits)
+ uint32_t status_reg, uint32_t status_bits, bool poll)
{
uint32_t ctrl_sts = 0;
uint32_t wait_cnt = 0, max_wait_ms;
@@ -1005,19 +1024,34 @@ static int wait_for_status_ready(struct npu_device *npu_dev,
max_wait_ms = (host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
NW_DEBUG_TIMEOUT_MS : NPU_FW_TIMEOUT_MS;
+ if (poll)
+ wait_cnt = max_wait_ms * 10;
+ else
+ wait_cnt = max_wait_ms / NPU_FW_TIMEOUT_POLL_INTERVAL_MS;
/* keep reading status register until bits are set */
- while ((ctrl_sts & status_bits) != status_bits) {
+ do {
ctrl_sts = REGR(npu_dev, status_reg);
- msleep(NPU_FW_TIMEOUT_POLL_INTERVAL_MS);
- wait_cnt += NPU_FW_TIMEOUT_POLL_INTERVAL_MS;
- if (wait_cnt >= max_wait_ms) {
+ if ((ctrl_sts & status_bits) == status_bits) {
+ NPU_DBG("status %x[reg %x] ready received\n",
+ status_bits, status_reg);
+ break;
+ }
+
+ if (!wait_cnt) {
NPU_ERR("timeout wait for status %x[%x] in reg %x\n",
status_bits, ctrl_sts, status_reg);
return -EPERM;
}
- }
- NPU_DBG("status %x[reg %x] ready received\n", status_bits, status_reg);
+
+ if (poll)
+ udelay(100);
+ else
+ msleep(NPU_FW_TIMEOUT_POLL_INTERVAL_MS);
+
+ wait_cnt--;
+ } while (1);
+
return 0;
}
@@ -1207,6 +1241,7 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
uint32_t msg_id;
struct npu_network *network = NULL;
struct npu_kevent kevt;
+ struct npu_device *npu_dev = host_ctx->npu_dev;
msg_id = msg[1];
switch (msg_id) {
@@ -1394,6 +1429,73 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
complete_all(&host_ctx->misc_cmd_done);
break;
}
+ case NPU_IPC_MSG_SET_PROPERTY_DONE:
+ {
+ struct ipc_msg_prop_pkt *prop_rsp_pkt =
+ (struct ipc_msg_prop_pkt *)msg;
+ uint32_t *param = (uint32_t *)((uint8_t *)prop_rsp_pkt +
+ sizeof(struct ipc_msg_prop_pkt));
+ NPU_DBG("NPU_IPC_MSG_SET_PROPERTY_DONE %d:0x%x:%d\n",
+ prop_rsp_pkt->network_hdl,
+ prop_rsp_pkt->prop_id,
+ param[0]);
+
+ host_ctx->misc_cmd_result = prop_rsp_pkt->header.status;
+ host_ctx->misc_cmd_pending = false;
+
+ complete_all(&host_ctx->misc_cmd_done);
+ break;
+ }
+ case NPU_IPC_MSG_GET_PROPERTY_DONE:
+ {
+ struct ipc_msg_prop_pkt *prop_rsp_pkt =
+ (struct ipc_msg_prop_pkt *)msg;
+ uint32_t prop_size = 0;
+ uint32_t *prop_data = (uint32_t *)((uint8_t *)prop_rsp_pkt +
+ sizeof(struct ipc_msg_header_pkt));
+
+ NPU_DBG("NPU_IPC_MSG_GET_PROPERTY_DONE %d:0x%x:%d:%d\n",
+ prop_rsp_pkt->network_hdl,
+ prop_rsp_pkt->prop_id,
+ prop_rsp_pkt->num_params,
+ prop_rsp_pkt->prop_param[0]);
+
+ host_ctx->misc_cmd_result = prop_rsp_pkt->header.status;
+ host_ctx->misc_cmd_pending = false;
+
+ if (prop_rsp_pkt->num_params > 0) {
+ /* Copy prop data to kernel buffer */
+ prop_size = prop_rsp_pkt->header.size -
+ sizeof(struct ipc_msg_header_pkt);
+ memcpy(host_ctx->prop_buf, prop_data, prop_size);
+ }
+
+ complete_all(&host_ctx->misc_cmd_done);
+ break;
+ }
+ case NPU_IPC_MSG_GENERAL_NOTIFY:
+ {
+ struct ipc_msg_general_notify_pkt *notify_msg_pkt =
+ (struct ipc_msg_general_notify_pkt *)msg;
+
+ NPU_DBG("NPU_IPC_MSG_GENERAL_NOTIFY %d:0x%x:%d\n",
+ notify_msg_pkt->network_hdl,
+ notify_msg_pkt->notify_id,
+ notify_msg_pkt->notify_param[0]);
+
+ switch (notify_msg_pkt->notify_id) {
+ case NPU_NOTIFY_DCVS_MODE:
+ NPU_DBG("NPU_IPC_MSG_GENERAL_NOTIFY DCVS_MODE %d\n",
+ notify_msg_pkt->notify_param[0]);
+ update_dcvs_activity(npu_dev,
+ notify_msg_pkt->notify_param[0]);
+ break;
+ default:
+ NPU_ERR("Nothing to do\n");
+ break;
+ }
+ break;
+ }
default:
NPU_ERR("Not supported apps response received %d\n",
msg_id);
@@ -1616,9 +1718,9 @@ static uint32_t find_networks_perf_mode(struct npu_host_ctx *host_ctx)
} else {
/* find the max level among all the networks */
for (i = 0; i < host_ctx->network_num; i++) {
- if ((network->perf_mode != 0) &&
- (network->perf_mode > max_perf_mode))
- max_perf_mode = network->perf_mode;
+ if ((network->cur_perf_mode != 0) &&
+ (network->cur_perf_mode > max_perf_mode))
+ max_perf_mode = network->cur_perf_mode;
network++;
}
}
@@ -1635,6 +1737,12 @@ static int set_perf_mode(struct npu_device *npu_dev)
networks_perf_mode = find_networks_perf_mode(host_ctx);
+ if (npu_dev->pwrctrl.perf_mode_override)
+ networks_perf_mode = npu_dev->pwrctrl.perf_mode_override;
+
+ if (npu_dev->pwrctrl.cur_dcvs_activity != NPU_DCVS_ACTIVITY_MAX_PERF)
+ networks_perf_mode = min_t(uint32_t, networks_perf_mode,
+ npu_dev->pwrctrl.cur_dcvs_activity);
ret = npu_set_uc_power_level(npu_dev, networks_perf_mode);
if (ret)
NPU_ERR("set uc power level %d failed\n", networks_perf_mode);
@@ -1642,12 +1750,182 @@ static int set_perf_mode(struct npu_device *npu_dev)
return ret;
}
+static int update_dcvs_activity(struct npu_device *npu_dev, uint32_t activity)
+{
+ npu_dev->pwrctrl.cur_dcvs_activity = activity;
+ NPU_DBG("update dcvs activity to %d\n", activity);
+
+ return set_perf_mode(npu_dev);
+}
+
+int32_t npu_host_set_fw_property(struct npu_device *npu_dev,
+ struct msm_npu_property *property)
+{
+ int ret = 0, i;
+ uint32_t prop_param, prop_id;
+ struct ipc_cmd_prop_pkt *prop_packet = NULL;
+ struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+ uint32_t num_of_params, pkt_size;
+
+ prop_id = property->prop_id;
+ num_of_params = min_t(uint32_t, property->num_of_params,
+ (uint32_t)PROP_PARAM_MAX_SIZE);
+ pkt_size = sizeof(*prop_packet) + num_of_params * sizeof(uint32_t);
+ prop_packet = kzalloc(pkt_size, GFP_KERNEL);
+
+ if (!prop_packet)
+ return -ENOMEM;
+
+ switch (prop_id) {
+ case MSM_NPU_PROP_ID_DCVS_MODE:
+ prop_param = min_t(uint32_t, property->prop_param[0],
+ (uint32_t)(npu_dev->pwrctrl.num_pwrlevels - 1));
+ property->prop_param[0] = prop_param;
+ NPU_DBG("setting dcvs_mode to %d[%d:%d]\n", prop_param,
+ property->prop_param[0],
+ (uint32_t)(npu_dev->pwrctrl.num_pwrlevels - 1));
+
+ if (property->network_hdl == 0) {
+ npu_dev->pwrctrl.dcvs_mode = prop_param;
+ NPU_DBG("Set global dcvs mode %d\n", prop_param);
+ }
+ break;
+ default:
+ NPU_ERR("unsupported property %d\n", property->prop_id);
+ goto set_prop_exit;
+ }
+
+ prop_packet->header.cmd_type = NPU_IPC_CMD_SET_PROPERTY;
+ prop_packet->header.size = pkt_size;
+ prop_packet->header.trans_id =
+ atomic_add_return(1, &host_ctx->ipc_trans_id);
+ prop_packet->header.flags = 0;
+
+ prop_packet->prop_id = prop_id;
+ prop_packet->num_params = num_of_params;
+ prop_packet->network_hdl = property->network_hdl;
+ for (i = 0; i < num_of_params; i++)
+ prop_packet->prop_param[i] = property->prop_param[i];
+
+ mutex_lock(&host_ctx->lock);
+ ret = npu_send_misc_cmd(npu_dev, IPC_QUEUE_APPS_EXEC,
+ prop_packet);
+ NPU_DBG("NPU_IPC_CMD_SET_PROPERTY sent status: %d\n", ret);
+
+ if (ret) {
+ NPU_ERR("NPU_IPC_CMD_SET_PROPERTY failed\n");
+ goto set_prop_exit;
+ }
+ mutex_unlock(&host_ctx->lock);
+
+ ret = wait_for_completion_interruptible_timeout(
+ &host_ctx->misc_cmd_done,
+ (host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
+ NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
+
+ mutex_lock(&host_ctx->lock);
+ if (!ret) {
+ NPU_ERR("NPU_IPC_CMD_SET_PROPERTY time out\n");
+ ret = -ETIMEDOUT;
+ goto set_prop_exit;
+ } else if (ret < 0) {
+ NPU_ERR("Wait for set_property done interrupted by signal\n");
+ goto set_prop_exit;
+ }
+
+ ret = host_ctx->misc_cmd_result;
+ if (ret)
+ NPU_ERR("set fw property failed %d\n", ret);
+
+set_prop_exit:
+ mutex_unlock(&host_ctx->lock);
+ kfree(prop_packet);
+ return ret;
+}
+
+int32_t npu_host_get_fw_property(struct npu_device *npu_dev,
+ struct msm_npu_property *property)
+{
+ int ret = 0, i;
+ struct ipc_cmd_prop_pkt *prop_packet = NULL;
+ struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+ struct msm_npu_property *prop_from_fw;
+ uint32_t num_of_params, pkt_size;
+
+ num_of_params = min_t(uint32_t, property->num_of_params,
+ (uint32_t)PROP_PARAM_MAX_SIZE);
+ pkt_size = sizeof(*prop_packet) + num_of_params * sizeof(uint32_t);
+ prop_packet = kzalloc(pkt_size, GFP_KERNEL);
+
+ if (!prop_packet)
+ return -ENOMEM;
+
+ prop_packet->header.cmd_type = NPU_IPC_CMD_GET_PROPERTY;
+ prop_packet->header.size = pkt_size;
+ prop_packet->header.trans_id =
+ atomic_add_return(1, &host_ctx->ipc_trans_id);
+ prop_packet->header.flags = 0;
+
+ prop_packet->prop_id = property->prop_id;
+ prop_packet->num_params = num_of_params;
+ prop_packet->network_hdl = property->network_hdl;
+ for (i = 0; i < num_of_params; i++)
+ prop_packet->prop_param[i] = property->prop_param[i];
+
+ mutex_lock(&host_ctx->lock);
+ ret = npu_send_misc_cmd(npu_dev, IPC_QUEUE_APPS_EXEC,
+ prop_packet);
+ NPU_DBG("NPU_IPC_CMD_GET_PROPERTY sent status: %d\n", ret);
+
+ if (ret) {
+ NPU_ERR("NPU_IPC_CMD_GET_PROPERTY failed\n");
+ goto get_prop_exit;
+ }
+ mutex_unlock(&host_ctx->lock);
+
+ ret = wait_for_completion_interruptible_timeout(
+ &host_ctx->misc_cmd_done,
+ (host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
+ NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
+
+ mutex_lock(&host_ctx->lock);
+ if (!ret) {
+ pr_err_ratelimited("npu: NPU_IPC_CMD_GET_PROPERTY time out\n");
+ ret = -ETIMEDOUT;
+ goto get_prop_exit;
+ } else if (ret < 0) {
+ NPU_ERR("Wait for get_property done interrupted by signal\n");
+ goto get_prop_exit;
+ }
+
+ ret = host_ctx->misc_cmd_result;
+ if (!ret) {
+ /* Return prop data retrieved from fw to user */
+ prop_from_fw = (struct msm_npu_property *)(host_ctx->prop_buf);
+ if (property->prop_id == prop_from_fw->prop_id &&
+ property->network_hdl == prop_from_fw->network_hdl) {
+ property->num_of_params = num_of_params;
+ for (i = 0; i < num_of_params; i++)
+ property->prop_param[i] =
+ prop_from_fw->prop_param[i];
+ }
+ } else {
+ NPU_ERR("get fw property failed %d\n", ret);
+ }
+
+get_prop_exit:
+ mutex_unlock(&host_ctx->lock);
+ kfree(prop_packet);
+ return ret;
+}
+
int32_t npu_host_load_network_v2(struct npu_client *client,
struct msm_npu_load_network_ioctl_v2 *load_ioctl,
struct msm_npu_patch_info_v2 *patch_info)
{
int ret = 0, i;
struct npu_device *npu_dev = client->npu_dev;
+ struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
struct npu_network *network;
struct ipc_cmd_load_pkt_v2 *load_packet = NULL;
struct ipc_cmd_unload_pkt unload_packet;
@@ -1685,7 +1963,9 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
network->phy_add = load_ioctl->buf_phys_addr;
network->first_block_size = load_ioctl->first_block_size;
network->priority = load_ioctl->priority;
- network->perf_mode = load_ioctl->perf_mode;
+ network->cur_perf_mode = network->init_perf_mode =
+ (load_ioctl->perf_mode == PERF_MODE_DEFAULT) ?
+ pwr->num_pwrlevels : load_ioctl->perf_mode;
network->num_layers = load_ioctl->num_layers;
/* verify mapped physical address */
@@ -1868,9 +2148,10 @@ int32_t npu_host_unload_network(struct npu_client *client,
*/
network_put(network);
free_network(host_ctx, client, network->id);
- /* update perf mode */
- if (set_perf_mode(npu_dev))
- NPU_WARN("set_perf_mode failed\n");
+
+ /* recalculate uc_power_level after unload network */
+ if (npu_dev->pwrctrl.cur_dcvs_activity)
+ set_perf_mode(npu_dev);
mutex_unlock(&host_ctx->lock);
@@ -2107,3 +2388,85 @@ void npu_host_cleanup_networks(struct npu_client *client)
npu_host_unmap_buf(client, &unmap_req);
}
}
+
+/*
+ * set network or global perf_mode
+ * if network_hdl is 0, set global perf_mode_override
+ * otherwise set network perf_mode: if perf_mode is 0,
+ * change network perf_mode to initial perf_mode from
+ * load_network
+ */
+int32_t npu_host_set_perf_mode(struct npu_client *client, uint32_t network_hdl,
+ uint32_t perf_mode)
+{
+ int ret = 0;
+ struct npu_device *npu_dev = client->npu_dev;
+ struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+ struct npu_network *network = NULL;
+
+ mutex_lock(&host_ctx->lock);
+
+ if (network_hdl == 0) {
+ NPU_DBG("change perf_mode_override to %d\n", perf_mode);
+ npu_dev->pwrctrl.perf_mode_override = perf_mode;
+ } else {
+ network = get_network_by_hdl(host_ctx, client, network_hdl);
+ if (!network) {
+ NPU_ERR("invalid network handle %x\n", network_hdl);
+ mutex_unlock(&host_ctx->lock);
+ return -EINVAL;
+ }
+
+ if (perf_mode == 0) {
+ network->cur_perf_mode = network->init_perf_mode;
+ NPU_DBG("change network %d perf_mode back to %d\n",
+ network_hdl, network->cur_perf_mode);
+ } else {
+ network->cur_perf_mode = perf_mode;
+ NPU_DBG("change network %d perf_mode to %d\n",
+ network_hdl, network->cur_perf_mode);
+ }
+ }
+
+ ret = set_perf_mode(npu_dev);
+ if (ret)
+ NPU_ERR("set_perf_mode failed\n");
+
+ if (network)
+ network_put(network);
+ mutex_unlock(&host_ctx->lock);
+
+ return ret;
+}
+
+/*
+ * get the currently set network or global perf_mode
+ * if network_hdl is 0, get global perf_mode_override
+ * otherwise get network perf_mode
+ */
+int32_t npu_host_get_perf_mode(struct npu_client *client, uint32_t network_hdl)
+{
+ int param_val = 0;
+ struct npu_device *npu_dev = client->npu_dev;
+ struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+ struct npu_network *network = NULL;
+
+ mutex_lock(&host_ctx->lock);
+
+ if (network_hdl == 0) {
+ param_val = npu_dev->pwrctrl.perf_mode_override;
+ } else {
+ network = get_network_by_hdl(host_ctx, client, network_hdl);
+ if (!network) {
+ NPU_ERR("invalid network handle %x\n", network_hdl);
+ mutex_unlock(&host_ctx->lock);
+ return -EINVAL;
+ }
+ param_val = network->cur_perf_mode;
+ network_put(network);
+ }
+
+ mutex_unlock(&host_ctx->lock);
+
+ return param_val;
+}
diff --git a/drivers/media/platform/msm/npu/npu_mgr.h b/drivers/media/platform/msm/npu/npu_mgr.h
index 72976cb..9ef7883 100644
--- a/drivers/media/platform/msm/npu/npu_mgr.h
+++ b/drivers/media/platform/msm/npu/npu_mgr.h
@@ -44,7 +44,8 @@ struct npu_network {
uint32_t first_block_size;
uint32_t network_hdl;
uint32_t priority;
- uint32_t perf_mode;
+ uint32_t cur_perf_mode;
+ uint32_t init_perf_mode;
uint32_t num_layers;
void *stats_buf;
void __user *stats_buf_u;
@@ -73,6 +74,7 @@ struct npu_host_ctx {
void *subsystem_handle;
enum fw_state fw_state;
int32_t fw_ref_cnt;
+ int32_t npu_init_cnt;
int32_t power_vote_num;
struct work_struct ipc_irq_work;
struct work_struct wdg_err_irq_work;
@@ -81,11 +83,13 @@ struct npu_host_ctx {
struct work_struct update_pwr_work;
struct delayed_work disable_fw_work;
struct workqueue_struct *wq;
+ struct workqueue_struct *wq_pri;
struct completion misc_cmd_done;
struct completion fw_deinit_done;
struct completion fw_bringup_done;
struct completion fw_shutdown_done;
struct completion npu_power_up_done;
+ void *prop_buf;
int32_t network_num;
struct npu_network networks[MAX_LOADED_NETWORK];
bool sys_cache_disable;
@@ -141,11 +145,17 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
struct msm_npu_exec_network_ioctl_v2 *exec_ioctl,
struct msm_npu_patch_buf_info *patch_buf_info);
int32_t npu_host_loopback_test(struct npu_device *npu_dev);
+int32_t npu_host_set_fw_property(struct npu_device *npu_dev,
+ struct msm_npu_property *property);
+int32_t npu_host_get_fw_property(struct npu_device *npu_dev,
+ struct msm_npu_property *property);
void npu_host_cleanup_networks(struct npu_client *client);
int npu_host_notify_fw_pwr_state(struct npu_device *npu_dev,
uint32_t pwr_level, bool post);
int npu_host_update_power(struct npu_device *npu_dev);
-
+int32_t npu_host_set_perf_mode(struct npu_client *client, uint32_t network_hdl,
+ uint32_t perf_mode);
+int32_t npu_host_get_perf_mode(struct npu_client *client, uint32_t network_hdl);
void npu_dump_debug_info(struct npu_device *npu_dev);
void npu_dump_ipc_packet(struct npu_device *npu_dev, void *cmd_ptr);
diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c
index 3c7a511..b94402a 100644
--- a/drivers/mmc/host/cqhci.c
+++ b/drivers/mmc/host/cqhci.c
@@ -102,6 +102,10 @@ static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set)
static void cqhci_dumpregs(struct cqhci_host *cq_host)
{
struct mmc_host *mmc = cq_host->mmc;
+ int offset = 0;
+
+ if (cq_host->offset_changed)
+ offset = CQE_V5_VENDOR_CFG;
mmc_log_string(mmc,
"CQHCI_CTL=0x%08x CQHCI_IS=0x%08x CQHCI_ISTE=0x%08x CQHCI_ISGE=0x%08x CQHCI_TDBR=0x%08x CQHCI_TCN=0x%08x CQHCI_DQS=0x%08x CQHCI_DPT=0x%08x CQHCI_TERRI=0x%08x CQHCI_CRI=0x%08x CQHCI_CRA=0x%08x CQHCI_CRDCT=0x%08x\n",
@@ -147,6 +151,8 @@ static void cqhci_dumpregs(struct cqhci_host *cq_host)
CQHCI_DUMP("Resp idx: 0x%08x | Resp arg: 0x%08x\n",
cqhci_readl(cq_host, CQHCI_CRI),
cqhci_readl(cq_host, CQHCI_CRA));
+ CQHCI_DUMP("Vendor cfg 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_VENDOR_CFG + offset));
if (cq_host->ops->dumpregs)
cq_host->ops->dumpregs(mmc);
@@ -279,6 +285,12 @@ static void __cqhci_enable(struct cqhci_host *cq_host)
cq_host->caps |= CQHCI_CAP_CRYPTO_SUPPORT |
CQHCI_TASK_DESC_SZ_128;
cqcfg |= CQHCI_ICE_ENABLE;
+ /*
+ * For SDHC v5.0 onwards, ICE 3.0 specific registers are added
+ * in CQ register space, due to which few CQ registers are
+ * shifted. Set offset_changed boolean to use updated address.
+ */
+ cq_host->offset_changed = true;
}
cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
@@ -698,8 +710,12 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
cq_host->qcnt += 1;
+ /* Ensure the task descriptor list is flushed before ringing doorbell */
+ wmb();
mmc_log_string(mmc, "tag: %d\n", tag);
cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
+ /* Commit the doorbell write immediately */
+ wmb();
if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
pr_debug("%s: cqhci: doorbell not set for tag %d\n",
mmc_hostname(mmc), tag);
@@ -809,8 +825,10 @@ static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag)
struct cqhci_slot *slot = &cq_host->slot[tag];
struct mmc_request *mrq = slot->mrq;
struct mmc_data *data;
- int err = 0;
+ int err = 0, offset = 0;
+ if (cq_host->offset_changed)
+ offset = CQE_V5_VENDOR_CFG;
if (!mrq) {
WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n",
mmc_hostname(mmc), tag);
@@ -840,6 +858,11 @@ static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag)
data->bytes_xfered = 0;
else
data->bytes_xfered = data->blksz * data->blocks;
+ } else {
+ cqhci_writel(cq_host, cqhci_readl(cq_host,
+ CQHCI_VENDOR_CFG + offset) |
+ CMDQ_SEND_STATUS_TRIGGER,
+ CQHCI_VENDOR_CFG + offset);
}
if (!(cq_host->caps & CQHCI_CAP_CRYPTO_SUPPORT) &&
diff --git a/drivers/mmc/host/cqhci.h b/drivers/mmc/host/cqhci.h
index e87e9bc..da4324b 100644
--- a/drivers/mmc/host/cqhci.h
+++ b/drivers/mmc/host/cqhci.h
@@ -1,4 +1,5 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -116,6 +117,14 @@
/* command response argument */
#define CQHCI_CRA 0x5C
+/*
+ * Add new macro for updated CQ vendor specific
+ * register address for SDHC v5.0 onwards.
+ */
+#define CQE_V5_VENDOR_CFG 0x900
+#define CQHCI_VENDOR_CFG 0x100
+#define CMDQ_SEND_STATUS_TRIGGER (1 << 31)
+
#define CQHCI_INT_ALL 0xF
#define CQHCI_IC_DEFAULT_ICCTH 31
#define CQHCI_IC_DEFAULT_ICTOVAL 1
@@ -183,6 +192,7 @@ struct cqhci_host {
bool activated;
bool waiting_for_idle;
bool recovery_halt;
+ bool offset_changed;
size_t desc_size;
size_t data_size;
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index ee219c7..f25c9cd 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2400,6 +2400,7 @@ static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
msm_host->mmc->caps2 |= MMC_CAP2_CQE;
cq_host->ops = &sdhci_msm_cqhci_ops;
+ msm_host->cq_host = cq_host;
dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
if (dma64)
@@ -3950,6 +3951,46 @@ static void sdhci_msm_cache_debug_data(struct sdhci_host *host)
sizeof(struct sdhci_host));
}
+#define MAX_TEST_BUS 60
+#define DRV_NAME "cqhci-host"
+static void sdhci_msm_cqe_dump_debug_ram(struct sdhci_host *host)
+{
+ int i = 0;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+ struct cqhci_host *cq_host;
+ u32 version;
+ u16 minor;
+ int offset;
+
+ if (msm_host->cq_host)
+ cq_host = msm_host->cq_host;
+ else
+ return;
+
+ version = sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_MCI_VERSION);
+ minor = version & CORE_VERSION_TARGET_MASK;
+ /* registers offset changed starting from 4.2.0 */
+ offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
+
+ if (cq_host->offset_changed)
+ offset += CQE_V5_VENDOR_CFG;
+ pr_err("---- Debug RAM dump ----\n");
+ pr_err(DRV_NAME ": Debug RAM wrap-around: 0x%08x | Debug RAM overlap: 0x%08x\n",
+ cqhci_readl(cq_host, CQ_CMD_DBG_RAM_WA + offset),
+ cqhci_readl(cq_host, CQ_CMD_DBG_RAM_OL + offset));
+
+ while (i < 16) {
+ pr_err(DRV_NAME ": Debug RAM dump [%d]: 0x%08x\n", i,
+ cqhci_readl(cq_host, CQ_CMD_DBG_RAM + offset + (4 * i)));
+ i++;
+ }
+ pr_err("-------------------------\n");
+}
+
void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -3964,6 +4005,8 @@ void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
sdhci_msm_cache_debug_data(host);
pr_info("----------- VENDOR REGISTER DUMP -----------\n");
+ if (msm_host->cq_host)
+ sdhci_msm_cqe_dump_debug_ram(host);
mmc_log_string(host->mmc, "Data cnt: 0x%08x | Fifo cnt: 0x%08x\n",
sdhci_msm_readl_relaxed(host,
diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h
index 58f5632a..051dfa8 100644
--- a/drivers/mmc/host/sdhci-msm.h
+++ b/drivers/mmc/host/sdhci-msm.h
@@ -225,6 +225,7 @@ struct sdhci_msm_host {
atomic_t clks_on; /* Set if clocks are enabled */
struct sdhci_msm_pltfm_data *pdata;
struct mmc_host *mmc;
+ struct cqhci_host *cq_host;
struct sdhci_msm_debug_data cached_data;
struct sdhci_pltfm_data sdhci_msm_pdata;
u32 curr_pwr_state;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.c
index 40238e6..e39a151 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.c
@@ -34,8 +34,9 @@ rmnet_get_frag_descriptor(struct rmnet_port *port)
{
struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
struct rmnet_frag_descriptor *frag_desc;
+ unsigned long flags;
- spin_lock(&port->desc_pool_lock);
+ spin_lock_irqsave(&port->desc_pool_lock, flags);
if (!list_empty(&pool->free_list)) {
frag_desc = list_first_entry(&pool->free_list,
struct rmnet_frag_descriptor,
@@ -52,7 +53,7 @@ rmnet_get_frag_descriptor(struct rmnet_port *port)
}
out:
- spin_unlock(&port->desc_pool_lock);
+ spin_unlock_irqrestore(&port->desc_pool_lock, flags);
return frag_desc;
}
EXPORT_SYMBOL(rmnet_get_frag_descriptor);
@@ -62,6 +63,7 @@ void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc,
{
struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
struct page *page = skb_frag_page(&frag_desc->frag);
+ unsigned long flags;
list_del(&frag_desc->list);
if (page)
@@ -70,9 +72,9 @@ void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc,
memset(frag_desc, 0, sizeof(*frag_desc));
INIT_LIST_HEAD(&frag_desc->list);
INIT_LIST_HEAD(&frag_desc->sub_frags);
- spin_lock(&port->desc_pool_lock);
+ spin_lock_irqsave(&port->desc_pool_lock, flags);
list_add_tail(&frag_desc->list, &pool->free_list);
- spin_unlock(&port->desc_pool_lock);
+ spin_unlock_irqrestore(&port->desc_pool_lock, flags);
}
EXPORT_SYMBOL(rmnet_recycle_frag_descriptor);
@@ -670,6 +672,12 @@ static void __rmnet_frag_segment_data(struct rmnet_frag_descriptor *coal_desc,
new_frag->tcp_seq_set = 1;
new_frag->tcp_seq = htonl(ntohl(th->seq) +
coal_desc->data_offset);
+ } else if (coal_desc->trans_proto == IPPROTO_UDP) {
+ struct udphdr *uh;
+
+ uh = (struct udphdr *)(hdr_start + coal_desc->ip_len);
+ if (coal_desc->ip_proto == 4 && !uh->check)
+ csum_valid = true;
}
if (coal_desc->ip_proto == 4) {
@@ -734,6 +742,7 @@ rmnet_frag_segment_coal_data(struct rmnet_frag_descriptor *coal_desc,
u8 pkt, total_pkt = 0;
u8 nlo;
bool gro = coal_desc->dev->features & NETIF_F_GRO_HW;
+ bool zero_csum = false;
/* Pull off the headers we no longer need */
if (!rmnet_frag_pull(coal_desc, port, sizeof(struct rmnet_map_header)))
@@ -794,7 +803,12 @@ rmnet_frag_segment_coal_data(struct rmnet_frag_descriptor *coal_desc,
th = (struct tcphdr *)((u8 *)iph + coal_desc->ip_len);
coal_desc->trans_len = th->doff * 4;
} else if (coal_desc->trans_proto == IPPROTO_UDP) {
- coal_desc->trans_len = sizeof(struct udphdr);
+ struct udphdr *uh;
+
+ uh = (struct udphdr *)((u8 *)iph + coal_desc->ip_len);
+ coal_desc->trans_len = sizeof(*uh);
+ if (coal_desc->ip_proto == 4 && !uh->check)
+ zero_csum = true;
} else {
priv->stats.coal.coal_trans_invalid++;
return;
@@ -802,7 +816,7 @@ rmnet_frag_segment_coal_data(struct rmnet_frag_descriptor *coal_desc,
coal_desc->hdrs_valid = 1;
- if (rmnet_map_v5_csum_buggy(coal_hdr)) {
+ if (rmnet_map_v5_csum_buggy(coal_hdr) && !zero_csum) {
/* Mark the checksum as valid if it checks out */
if (rmnet_frag_validate_csum(coal_desc))
coal_desc->csum_valid = true;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 84a42c9..da5e47c 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -716,6 +716,7 @@ __rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
struct rmnet_priv *priv = netdev_priv(coal_skb->dev);
__sum16 *check = NULL;
u32 alloc_len;
+ bool zero_csum = false;
/* We can avoid copying the data if the SKB we got from the lower-level
* drivers was nonlinear.
@@ -747,6 +748,8 @@ __rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
uh->len = htons(skbn->len);
check = &uh->check;
+ if (coal_meta->ip_proto == 4 && !uh->check)
+ zero_csum = true;
}
/* Push IP header and update necessary fields */
@@ -767,7 +770,7 @@ __rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
}
/* Handle checksum status */
- if (likely(csum_valid)) {
+ if (likely(csum_valid) || zero_csum) {
/* Set the partial checksum information */
rmnet_map_partial_csum(skbn, coal_meta);
} else if (check) {
@@ -865,6 +868,7 @@ static void rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
u8 pkt, total_pkt = 0;
u8 nlo;
bool gro = coal_skb->dev->features & NETIF_F_GRO_HW;
+ bool zero_csum = false;
memset(&coal_meta, 0, sizeof(coal_meta));
@@ -926,12 +930,15 @@ static void rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
uh = (struct udphdr *)((u8 *)iph + coal_meta.ip_len);
coal_meta.trans_len = sizeof(*uh);
coal_meta.trans_header = uh;
+ /* Check for v4 zero checksum */
+ if (coal_meta.ip_proto == 4 && !uh->check)
+ zero_csum = true;
} else {
priv->stats.coal.coal_trans_invalid++;
return;
}
- if (rmnet_map_v5_csum_buggy(coal_hdr)) {
+ if (rmnet_map_v5_csum_buggy(coal_hdr) && !zero_csum) {
rmnet_map_move_headers(coal_skb);
/* Mark as valid if it checks out */
if (rmnet_map_validate_csum(coal_skb, &coal_meta))
diff --git a/drivers/net/wireless/cnss2/bus.c b/drivers/net/wireless/cnss2/bus.c
index 5601883..5aa68e3 100644
--- a/drivers/net/wireless/cnss2/bus.c
+++ b/drivers/net/wireless/cnss2/bus.c
@@ -186,6 +186,36 @@ int cnss_bus_force_fw_assert_hdlr(struct cnss_plat_data *plat_priv)
}
}
+int cnss_bus_qmi_send_get(struct cnss_plat_data *plat_priv)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_qmi_send_get(plat_priv->bus_priv);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return -EINVAL;
+ }
+}
+
+int cnss_bus_qmi_send_put(struct cnss_plat_data *plat_priv)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_qmi_send_put(plat_priv->bus_priv);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return -EINVAL;
+ }
+}
+
void cnss_bus_fw_boot_timeout_hdlr(struct timer_list *t)
{
struct cnss_plat_data *plat_priv =
diff --git a/drivers/net/wireless/cnss2/bus.h b/drivers/net/wireless/cnss2/bus.h
index 8ec4887..5248eb5 100644
--- a/drivers/net/wireless/cnss2/bus.h
+++ b/drivers/net/wireless/cnss2/bus.h
@@ -30,6 +30,8 @@ int cnss_bus_alloc_qdss_mem(struct cnss_plat_data *plat_priv);
void cnss_bus_free_qdss_mem(struct cnss_plat_data *plat_priv);
u32 cnss_bus_get_wake_irq(struct cnss_plat_data *plat_priv);
int cnss_bus_force_fw_assert_hdlr(struct cnss_plat_data *plat_priv);
+int cnss_bus_qmi_send_get(struct cnss_plat_data *plat_priv);
+int cnss_bus_qmi_send_put(struct cnss_plat_data *plat_priv);
void cnss_bus_fw_boot_timeout_hdlr(struct timer_list *t);
void cnss_bus_collect_dump_info(struct cnss_plat_data *plat_priv,
bool in_panic);
diff --git a/drivers/net/wireless/cnss2/debug.c b/drivers/net/wireless/cnss2/debug.c
index a1e0dc5..bb55c31 100644
--- a/drivers/net/wireless/cnss2/debug.c
+++ b/drivers/net/wireless/cnss2/debug.c
@@ -477,6 +477,10 @@ static ssize_t cnss_runtime_pm_debug_write(struct file *fp,
cnss_pci_pm_runtime_put_noidle(pci_priv);
} else if (sysfs_streq(cmd, "mark_last_busy")) {
cnss_pci_pm_runtime_mark_last_busy(pci_priv);
+ } else if (sysfs_streq(cmd, "resume_bus")) {
+ cnss_pci_resume_bus(pci_priv);
+ } else if (sysfs_streq(cmd, "suspend_bus")) {
+ cnss_pci_suspend_bus(pci_priv);
} else {
cnss_pr_err("Runtime PM debugfs command is invalid\n");
ret = -EINVAL;
@@ -500,6 +504,8 @@ static int cnss_runtime_pm_debug_show(struct seq_file *s, void *data)
seq_puts(s, "put_noidle: do runtime PM put noidle\n");
seq_puts(s, "put_autosuspend: do runtime PM put autosuspend\n");
seq_puts(s, "mark_last_busy: do runtime PM mark last busy\n");
+ seq_puts(s, "resume_bus: do bus resume only\n");
+ seq_puts(s, "suspend_bus: do bus suspend only\n");
return 0;
}
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index f64d31d..5beeaa3 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -1210,13 +1210,23 @@ EXPORT_SYMBOL(cnss_force_collect_rddm);
int cnss_qmi_send_get(struct device *dev)
{
- return 0;
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+ if (!test_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state))
+ return 0;
+
+ return cnss_bus_qmi_send_get(plat_priv);
}
EXPORT_SYMBOL(cnss_qmi_send_get);
int cnss_qmi_send_put(struct device *dev)
{
- return 0;
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+ if (!test_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state))
+ return 0;
+
+ return cnss_bus_qmi_send_put(plat_priv);
}
EXPORT_SYMBOL(cnss_qmi_send_put);
@@ -1224,7 +1234,25 @@ int cnss_qmi_send(struct device *dev, int type, void *cmd,
int cmd_len, void *cb_ctx,
int (*cb)(void *ctx, void *event, int event_len))
{
- return -EINVAL;
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+ int ret;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ if (!test_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state))
+ return -EINVAL;
+
+ plat_priv->get_info_cb = cb;
+ plat_priv->get_info_cb_ctx = cb_ctx;
+
+ ret = cnss_wlfw_get_info_send_sync(plat_priv, type, cmd, cmd_len);
+ if (ret) {
+ plat_priv->get_info_cb = NULL;
+ plat_priv->get_info_cb_ctx = NULL;
+ }
+
+ return ret;
}
EXPORT_SYMBOL(cnss_qmi_send);
diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h
index 8009799..9e9068a 100644
--- a/drivers/net/wireless/cnss2/main.h
+++ b/drivers/net/wireless/cnss2/main.h
@@ -353,6 +353,8 @@ struct cnss_plat_data {
struct qmi_handle ims_qmi;
struct qmi_txn txn;
u64 dynamic_feature;
+ void *get_info_cb_ctx;
+ int (*get_info_cb)(void *ctx, void *event, int event_len);
};
#ifdef CONFIG_ARCH_QCOM
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index c3b89ea..3b7e5c70 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -55,6 +55,7 @@
static DEFINE_SPINLOCK(pci_link_down_lock);
static DEFINE_SPINLOCK(pci_reg_window_lock);
+static DEFINE_SPINLOCK(time_sync_lock);
#define MHI_TIMEOUT_OVERWRITE_MS (plat_priv->ctrl_params.mhi_timeout)
#define MHI_M2_TIMEOUT_MS (plat_priv->ctrl_params.mhi_m2_timeout)
@@ -1009,6 +1010,7 @@ static int cnss_pci_get_device_timestamp(struct cnss_pci_data *pci_priv,
static int cnss_pci_update_timestamp(struct cnss_pci_data *pci_priv)
{
struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ unsigned long flags = 0;
u64 host_time_us, device_time_us, offset;
u32 low, high;
int ret;
@@ -1021,8 +1023,10 @@ static int cnss_pci_update_timestamp(struct cnss_pci_data *pci_priv)
if (ret)
return ret;
+ spin_lock_irqsave(&time_sync_lock, flags);
host_time_us = cnss_get_host_timestamp(plat_priv);
ret = cnss_pci_get_device_timestamp(pci_priv, &device_time_us);
+ spin_unlock_irqrestore(&time_sync_lock, flags);
if (ret)
goto force_wake_put;
@@ -1219,6 +1223,9 @@ int cnss_pci_call_driver_remove(struct cnss_pci_data *pci_priv)
}
}
+ plat_priv->get_info_cb_ctx = NULL;
+ plat_priv->get_info_cb = NULL;
+
return 0;
}
@@ -2011,7 +2018,7 @@ static int cnss_pci_resume_driver(struct cnss_pci_data *pci_priv)
return ret;
}
-static int cnss_pci_suspend_bus(struct cnss_pci_data *pci_priv)
+int cnss_pci_suspend_bus(struct cnss_pci_data *pci_priv)
{
struct pci_dev *pci_dev = pci_priv->pci_dev;
int ret = 0;
@@ -2056,7 +2063,7 @@ static int cnss_pci_suspend_bus(struct cnss_pci_data *pci_priv)
return ret;
}
-static int cnss_pci_resume_bus(struct cnss_pci_data *pci_priv)
+int cnss_pci_resume_bus(struct cnss_pci_data *pci_priv)
{
struct pci_dev *pci_dev = pci_priv->pci_dev;
int ret = 0;
@@ -2482,11 +2489,16 @@ int cnss_auto_suspend(struct device *dev)
if (!plat_priv)
return -ENODEV;
+ mutex_lock(&pci_priv->bus_lock);
ret = cnss_pci_suspend_bus(pci_priv);
- if (ret)
+ if (ret) {
+ mutex_unlock(&pci_priv->bus_lock);
return ret;
+ }
cnss_pci_set_auto_suspended(pci_priv, 1);
+ mutex_unlock(&pci_priv->bus_lock);
+
cnss_pci_set_monitor_wake_intr(pci_priv, true);
bus_bw_info = &plat_priv->bus_bw_info;
@@ -2512,11 +2524,15 @@ int cnss_auto_resume(struct device *dev)
if (!plat_priv)
return -ENODEV;
+ mutex_lock(&pci_priv->bus_lock);
ret = cnss_pci_resume_bus(pci_priv);
- if (ret)
+ if (ret) {
+ mutex_unlock(&pci_priv->bus_lock);
return ret;
+ }
cnss_pci_set_auto_suspended(pci_priv, 0);
+ mutex_unlock(&pci_priv->bus_lock);
bus_bw_info = &plat_priv->bus_bw_info;
msm_bus_scale_client_update_request(bus_bw_info->bus_client,
@@ -2609,6 +2625,46 @@ int cnss_pci_force_wake_release(struct device *dev)
}
EXPORT_SYMBOL(cnss_pci_force_wake_release);
+int cnss_pci_qmi_send_get(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ mutex_lock(&pci_priv->bus_lock);
+ if (!cnss_pci_get_auto_suspended(pci_priv))
+ goto out;
+
+ cnss_pr_vdbg("Starting to handle get info prepare\n");
+
+ ret = cnss_pci_resume_bus(pci_priv);
+
+out:
+ mutex_unlock(&pci_priv->bus_lock);
+ return ret;
+}
+
+int cnss_pci_qmi_send_put(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ mutex_lock(&pci_priv->bus_lock);
+ if (!cnss_pci_get_auto_suspended(pci_priv))
+ goto out;
+
+ cnss_pr_vdbg("Starting to handle get info done\n");
+
+ ret = cnss_pci_suspend_bus(pci_priv);
+
+out:
+ mutex_unlock(&pci_priv->bus_lock);
+ return ret;
+}
+
int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv)
{
struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
@@ -3712,6 +3768,7 @@ static int cnss_pci_probe(struct pci_dev *pci_dev,
plat_priv->bus_priv = pci_priv;
snprintf(plat_priv->firmware_name, sizeof(plat_priv->firmware_name),
DEFAULT_FW_FILE_NAME);
+ mutex_init(&pci_priv->bus_lock);
ret = cnss_register_subsys(plat_priv);
if (ret)
diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h
index 8dcb14a6..e7da860 100644
--- a/drivers/net/wireless/cnss2/pci.h
+++ b/drivers/net/wireless/cnss2/pci.h
@@ -92,6 +92,7 @@ struct cnss_pci_data {
struct timer_list dev_rddm_timer;
struct delayed_work time_sync_work;
u8 disable_pc;
+ struct mutex bus_lock; /* mutex for suspend and resume bus */
struct cnss_pci_debug_reg *debug_reg;
struct cnss_misc_reg *wcss_reg;
u32 wcss_reg_size;
@@ -173,6 +174,8 @@ void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic);
void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv);
u32 cnss_pci_get_wake_msi(struct cnss_pci_data *pci_priv);
int cnss_pci_force_fw_assert_hdlr(struct cnss_pci_data *pci_priv);
+int cnss_pci_qmi_send_get(struct cnss_pci_data *pci_priv);
+int cnss_pci_qmi_send_put(struct cnss_pci_data *pci_priv);
void cnss_pci_fw_boot_timeout_hdlr(struct cnss_pci_data *pci_priv);
int cnss_pci_call_driver_probe(struct cnss_pci_data *pci_priv);
int cnss_pci_call_driver_remove(struct cnss_pci_data *pci_priv);
@@ -196,5 +199,7 @@ void cnss_pci_pm_runtime_mark_last_busy(struct cnss_pci_data *pci_priv);
int cnss_pci_update_status(struct cnss_pci_data *pci_priv,
enum cnss_driver_status status);
int cnss_pcie_is_device_down(struct cnss_pci_data *pci_priv);
+int cnss_pci_suspend_bus(struct cnss_pci_data *pci_priv);
+int cnss_pci_resume_bus(struct cnss_pci_data *pci_priv);
#endif /* _CNSS_PCI_H */
diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c
index 02178b3..2ffb1be 100644
--- a/drivers/net/wireless/cnss2/qmi.c
+++ b/drivers/net/wireless/cnss2/qmi.c
@@ -92,6 +92,8 @@ static int cnss_wlfw_ind_register_send_sync(struct cnss_plat_data *plat_priv)
req->qdss_trace_save_enable = 1;
req->qdss_trace_free_enable_valid = 1;
req->qdss_trace_free_enable = 1;
+ req->respond_get_info_enable_valid = 1;
+ req->respond_get_info_enable = 1;
ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
wlfw_ind_register_resp_msg_v01_ei, resp);
@@ -1458,6 +1460,77 @@ int cnss_wlfw_dynamic_feature_mask_send_sync(struct cnss_plat_data *plat_priv)
return ret;
}
+int cnss_wlfw_get_info_send_sync(struct cnss_plat_data *plat_priv, int type,
+ void *cmd, int cmd_len)
+{
+ struct wlfw_get_info_req_msg_v01 *req;
+ struct wlfw_get_info_resp_msg_v01 *resp;
+ struct qmi_txn txn;
+ int ret = 0;
+
+ cnss_pr_vdbg("Sending get info message, type: %d, cmd length: %d, state: 0x%lx\n",
+ type, cmd_len, plat_priv->driver_state);
+
+ if (cmd_len > QMI_WLFW_MAX_DATA_SIZE_V01)
+ return -EINVAL;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ req->type = type;
+ req->data_len = cmd_len;
+ memcpy(req->data, cmd, req->data_len);
+
+ ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+ wlfw_get_info_resp_msg_v01_ei, resp);
+ if (ret < 0) {
+ cnss_pr_err("Failed to initialize txn for get info request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
+ QMI_WLFW_GET_INFO_REQ_V01,
+ WLFW_GET_INFO_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_get_info_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("Failed to send get info request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+ if (ret < 0) {
+ cnss_pr_err("Failed to wait for response of get info request, err: %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("Get info request failed, result: %d, err: %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -resp->resp.result;
+ goto out;
+ }
+
+ kfree(req);
+ kfree(resp);
+ return 0;
+
+out:
+ kfree(req);
+ kfree(resp);
+ return ret;
+}
+
unsigned int cnss_get_qmi_timeout(struct cnss_plat_data *plat_priv)
{
cnss_pr_dbg("QMI timeout is %u ms\n", QMI_WLFW_TIMEOUT_MS);
@@ -1717,6 +1790,31 @@ static void cnss_wlfw_qdss_trace_free_ind_cb(struct qmi_handle *qmi_wlfw,
0, NULL);
}
+static void cnss_wlfw_respond_get_info_ind_cb(struct qmi_handle *qmi_wlfw,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *data)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
+ const struct wlfw_respond_get_info_ind_msg_v01 *ind_msg = data;
+
+ cnss_pr_vdbg("Received QMI WLFW respond get info indication\n");
+
+ if (!txn) {
+ cnss_pr_err("Spurious indication\n");
+ return;
+ }
+
+ cnss_pr_vdbg("Extract message with event length: %d, type: %d, is last: %d, seq no: %d\n",
+ ind_msg->data_len, ind_msg->type,
+ ind_msg->is_last, ind_msg->seq_no);
+
+ if (plat_priv->get_info_cb_ctx && plat_priv->get_info_cb)
+ plat_priv->get_info_cb(plat_priv->get_info_cb_ctx,
+ (void *)ind_msg->data,
+ ind_msg->data_len);
+}
static struct qmi_msg_handler qmi_wlfw_msg_handlers[] = {
{
.type = QMI_INDICATION,
@@ -1785,6 +1883,14 @@ static struct qmi_msg_handler qmi_wlfw_msg_handlers[] = {
sizeof(struct wlfw_qdss_trace_free_ind_msg_v01),
.fn = cnss_wlfw_qdss_trace_free_ind_cb
},
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_RESPOND_GET_INFO_IND_V01,
+ .ei = wlfw_respond_get_info_ind_msg_v01_ei,
+ .decoded_size =
+ sizeof(struct wlfw_respond_get_info_ind_msg_v01),
+ .fn = cnss_wlfw_respond_get_info_ind_cb
+ },
{}
};
diff --git a/drivers/net/wireless/cnss2/qmi.h b/drivers/net/wireless/cnss2/qmi.h
index a064660..fc2a2c6 100644
--- a/drivers/net/wireless/cnss2/qmi.h
+++ b/drivers/net/wireless/cnss2/qmi.h
@@ -59,6 +59,8 @@ int cnss_wlfw_ini_send_sync(struct cnss_plat_data *plat_priv,
int cnss_wlfw_antenna_switch_send_sync(struct cnss_plat_data *plat_priv);
int cnss_wlfw_antenna_grant_send_sync(struct cnss_plat_data *plat_priv);
int cnss_wlfw_dynamic_feature_mask_send_sync(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_get_info_send_sync(struct cnss_plat_data *plat_priv, int type,
+ void *cmd, int cmd_len);
int cnss_register_coex_service(struct cnss_plat_data *plat_priv);
void cnss_unregister_coex_service(struct cnss_plat_data *plat_priv);
int coex_antenna_switch_to_wlan_send_sync_msg(struct cnss_plat_data *plat_priv);
diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
index 03a418e..682a20b 100644
--- a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
+++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
@@ -704,6 +704,24 @@ struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = {
qdss_trace_free_enable),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1F,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ respond_get_info_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1F,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ respond_get_info_enable),
+ },
+ {
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
@@ -3608,3 +3626,143 @@ struct qmi_elem_info wlfw_wfc_call_status_resp_msg_v01_ei[] = {
},
};
+struct qmi_elem_info wlfw_get_info_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_get_info_req_msg_v01,
+ type),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_get_info_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_get_info_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct qmi_elem_info wlfw_get_info_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_get_info_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct qmi_elem_info wlfw_respond_get_info_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_respond_get_info_ind_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_respond_get_info_ind_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_respond_get_info_ind_msg_v01,
+ type_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_respond_get_info_ind_msg_v01,
+ type),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_respond_get_info_ind_msg_v01,
+ is_last_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_respond_get_info_ind_msg_v01,
+ is_last),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_respond_get_info_ind_msg_v01,
+ seq_no_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_respond_get_info_ind_msg_v01,
+ seq_no),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
index dacdfdb..0e18d80 100644
--- a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
+++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
@@ -13,6 +13,7 @@
#define QMI_WLFW_BDF_DOWNLOAD_REQ_V01 0x0025
#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
#define QMI_WLFW_QDSS_TRACE_CONFIG_DOWNLOAD_REQ_V01 0x0044
+#define QMI_WLFW_GET_INFO_REQ_V01 0x004A
#define QMI_WLFW_INITIATE_CAL_UPDATE_IND_V01 0x002A
#define QMI_WLFW_CAL_DONE_IND_V01 0x003E
#define QMI_WLFW_WFC_CALL_STATUS_RESP_V01 0x0049
@@ -23,6 +24,7 @@
#define QMI_WLFW_FW_INIT_DONE_IND_V01 0x0038
#define QMI_WLFW_ANTENNA_GRANT_RESP_V01 0x0048
#define QMI_WLFW_CAL_REPORT_REQ_V01 0x0026
+#define QMI_WLFW_RESPOND_GET_INFO_IND_V01 0x004B
#define QMI_WLFW_M3_INFO_RESP_V01 0x003C
#define QMI_WLFW_CAL_UPDATE_RESP_V01 0x0029
#define QMI_WLFW_CAL_DOWNLOAD_RESP_V01 0x0027
@@ -49,6 +51,7 @@
#define QMI_WLFW_ATHDIAG_WRITE_REQ_V01 0x0031
#define QMI_WLFW_WLAN_MODE_RESP_V01 0x0022
#define QMI_WLFW_RESPOND_MEM_REQ_V01 0x0036
+#define QMI_WLFW_GET_INFO_RESP_V01 0x004A
#define QMI_WLFW_QDSS_TRACE_MODE_RESP_V01 0x0045
#define QMI_WLFW_PIN_CONNECT_RESULT_IND_V01 0x002C
#define QMI_WLFW_FW_READY_IND_V01 0x0021
@@ -272,9 +275,11 @@ struct wlfw_ind_register_req_msg_v01 {
u8 qdss_trace_save_enable;
u8 qdss_trace_free_enable_valid;
u8 qdss_trace_free_enable;
+ u8 respond_get_info_enable_valid;
+ u8 respond_get_info_enable;
};
-#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 66
+#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 70
extern struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[];
struct wlfw_ind_register_resp_msg_v01 {
@@ -949,4 +954,34 @@ struct wlfw_wfc_call_status_resp_msg_v01 {
#define WLFW_WFC_CALL_STATUS_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_wfc_call_status_resp_msg_v01_ei[];
+struct wlfw_get_info_req_msg_v01 {
+ u8 type;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+};
+
+#define WLFW_GET_INFO_REQ_MSG_V01_MAX_MSG_LEN 6153
+extern struct qmi_elem_info wlfw_get_info_req_msg_v01_ei[];
+
+struct wlfw_get_info_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_GET_INFO_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct qmi_elem_info wlfw_get_info_resp_msg_v01_ei[];
+
+struct wlfw_respond_get_info_ind_msg_v01 {
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+ u8 type_valid;
+ u8 type;
+ u8 is_last_valid;
+ u8 is_last;
+ u8 seq_no_valid;
+ u32 seq_no;
+};
+
+#define WLFW_RESPOND_GET_INFO_IND_MSG_V01_MAX_MSG_LEN 6164
+extern struct qmi_elem_info wlfw_respond_get_info_ind_msg_v01_ei[];
+
#endif
diff --git a/drivers/pci/controller/pci-msm.c b/drivers/pci/controller/pci-msm.c
index 43ecc57..864d5c9 100644
--- a/drivers/pci/controller/pci-msm.c
+++ b/drivers/pci/controller/pci-msm.c
@@ -44,6 +44,7 @@
#define PCIE20_PARF_DBI_BASE_ADDR (0x350)
#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE (0x358)
+#define PCIE_GEN3_PRESET_DEFAULT (0x55555555)
#define PCIE_GEN3_SPCIE_CAP (0x0154)
#define PCIE_GEN3_GEN2_CTRL (0x080c)
#define PCIE_GEN3_RELATED (0x0890)
@@ -720,6 +721,7 @@ struct msm_pcie_dev_t {
uint32_t phy_status_offset;
uint32_t phy_status_bit;
uint32_t phy_power_down_offset;
+ uint32_t core_preset;
uint32_t cpl_timeout;
uint32_t current_bdf;
uint32_t perst_delay_us_min;
@@ -1377,6 +1379,8 @@ static void msm_pcie_show_status(struct msm_pcie_dev_t *dev)
dev->phy_status_bit);
PCIE_DBG_FS(dev, "phy_power_down_offset: 0x%x\n",
dev->phy_power_down_offset);
+ PCIE_DBG_FS(dev, "core_preset: 0x%x\n",
+ dev->core_preset);
PCIE_DBG_FS(dev, "cpl_timeout: 0x%x\n",
dev->cpl_timeout);
PCIE_DBG_FS(dev, "current_bdf: 0x%x\n",
@@ -4093,7 +4097,7 @@ static int msm_pcie_link_train(struct msm_pcie_dev_t *dev)
msm_pcie_write_reg_field(dev->dm_core,
PCIE_GEN3_MISC_CONTROL, BIT(0), 1);
msm_pcie_write_reg(dev->dm_core,
- PCIE_GEN3_SPCIE_CAP, 0x77777777);
+ PCIE_GEN3_SPCIE_CAP, dev->core_preset);
msm_pcie_write_reg_field(dev->dm_core,
PCIE_GEN3_MISC_CONTROL, BIT(0), 0);
@@ -6144,6 +6148,13 @@ static int msm_pcie_probe(struct platform_device *pdev)
PCIE_DBG(pcie_dev, "RC%d: phy-power-down-offset: 0x%x.\n",
pcie_dev->rc_idx, pcie_dev->phy_power_down_offset);
+ pcie_dev->core_preset = PCIE_GEN3_PRESET_DEFAULT;
+ of_property_read_u32(pdev->dev.of_node,
+ "qcom,core-preset",
+ &pcie_dev->core_preset);
+ PCIE_DBG(pcie_dev, "RC%d: core-preset: 0x%x.\n",
+ pcie_dev->rc_idx, pcie_dev->core_preset);
+
of_property_read_u32(of_node, "qcom,cpl-timeout",
&pcie_dev->cpl_timeout);
PCIE_DBG(pcie_dev, "RC%d: cpl-timeout: 0x%x.\n",
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index f06ca58..cd0e278 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -908,6 +908,9 @@ static int pci_pm_resume(struct device *dev)
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int error = 0;
+ if (pci_dev->no_d3hot)
+ goto skip_pci_pm_restore;
+
/*
* This is necessary for the suspend error path in which resume is
* called without restoring the standard config registers of the device.
@@ -915,6 +918,7 @@ static int pci_pm_resume(struct device *dev)
if (pci_dev->state_saved)
pci_restore_standard_config(pci_dev);
+skip_pci_pm_restore:
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume(dev);
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.h b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.h
index de03667..a8dc7a4 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.h
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.h
@@ -87,6 +87,9 @@
#define QSERDES_TX0_PWM_GEAR_3_DIVIDER_BAND0_1 TX_OFF(0, 0x170)
#define QSERDES_TX0_PWM_GEAR_4_DIVIDER_BAND0_1 TX_OFF(0, 0x174)
#define QSERDES_TX0_LANE_MODE_1 TX_OFF(0, 0x84)
+#define QSERDES_TX0_LANE_MODE_3 TX_OFF(0, 0x8C)
+#define QSERDES_TX0_RES_CODE_LANE_OFFSET_TX TX_OFF(0, 0x3C)
+#define QSERDES_TX0_RES_CODE_LANE_OFFSET_RX TX_OFF(0, 0x40)
#define QSERDES_TX0_TRAN_DRVR_EMP_EN TX_OFF(0, 0xC0)
#define QSERDES_TX1_PWM_GEAR_1_DIVIDER_BAND0_1 TX_OFF(1, 0x168)
@@ -94,6 +97,9 @@
#define QSERDES_TX1_PWM_GEAR_3_DIVIDER_BAND0_1 TX_OFF(1, 0x170)
#define QSERDES_TX1_PWM_GEAR_4_DIVIDER_BAND0_1 TX_OFF(1, 0x174)
#define QSERDES_TX1_LANE_MODE_1 TX_OFF(1, 0x84)
+#define QSERDES_TX1_LANE_MODE_3 TX_OFF(1, 0x8C)
+#define QSERDES_TX1_RES_CODE_LANE_OFFSET_TX TX_OFF(1, 0x3C)
+#define QSERDES_TX1_RES_CODE_LANE_OFFSET_RX TX_OFF(1, 0x40)
#define QSERDES_TX1_TRAN_DRVR_EMP_EN TX_OFF(1, 0xC0)
/* UFS PHY RX registers */
@@ -132,6 +138,8 @@
#define QSERDES_RX0_RX_MODE_10_HIGH3 RX_OFF(0, 0x190)
#define QSERDES_RX0_RX_MODE_10_HIGH4 RX_OFF(0, 0x194)
#define QSERDES_RX0_DCC_CTRL1 RX_OFF(0, 0x1A8)
+#define QSERDES_RX0_VGA_CAL_CNTRL2 RX_OFF(0, 0xD8)
+
#define QSERDES_RX0_GM_CAL RX_OFF(0, 0xDC)
#define QSERDES_RX0_AC_JTAG_ENABLE RX_OFF(0, 0x68)
#define QSERDES_RX0_UCDR_FO_GAIN RX_OFF(0, 0x08)
@@ -172,6 +180,7 @@
#define QSERDES_RX1_RX_MODE_10_HIGH3 RX_OFF(1, 0x190)
#define QSERDES_RX1_RX_MODE_10_HIGH4 RX_OFF(1, 0x194)
#define QSERDES_RX1_DCC_CTRL1 RX_OFF(1, 0x1A8)
+#define QSERDES_RX1_VGA_CAL_CNTRL2 RX_OFF(1, 0xD8)
#define QSERDES_RX1_GM_CAL RX_OFF(1, 0xDC)
#define QSERDES_RX1_AC_JTAG_ENABLE RX_OFF(1, 0x68)
#define QSERDES_RX1_UCDR_FO_GAIN RX_OFF(1, 0x08)
@@ -221,7 +230,10 @@ static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_no_g4[] = {
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_PWM_GEAR_2_DIVIDER_BAND0_1, 0x03),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_PWM_GEAR_3_DIVIDER_BAND0_1, 0x01),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_PWM_GEAR_4_DIVIDER_BAND0_1, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_LANE_MODE_1, 0x35),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_LANE_MODE_1, 0xF5),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_LANE_MODE_3, 0x3F),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_RES_CODE_LANE_OFFSET_TX, 0x06),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_RES_CODE_LANE_OFFSET_RX, 0x09),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_TRAN_DRVR_EMP_EN, 0x0C),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_SIGDET_LVL, 0x24),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_SIGDET_CNTRL, 0x0F),
@@ -232,7 +244,7 @@ static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_no_g4[] = {
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_PI_CONTROLS, 0xF1),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_FASTLOCK_COUNT_LOW, 0x80),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_PI_CTRL2, 0x80),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_FO_GAIN, 0x0E),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_FO_GAIN, 0x0C),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SO_GAIN, 0x04),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_TERM_BW, 0x1B),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL2, 0x06),
@@ -242,11 +254,11 @@ static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_no_g4[] = {
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_IDAC_MEASURE_TIME, 0x10),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_IDAC_TSETTLE_LOW, 0xC0),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_IDAC_TSETTLE_HIGH, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00_LOW, 0x6D),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00_HIGH, 0x6D),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00_HIGH2, 0xED),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00_HIGH3, 0x3B),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00_HIGH4, 0x3C),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00_LOW, 0x64),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00_HIGH, 0x64),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00_HIGH2, 0x24),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00_HIGH3, 0x3F),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00_HIGH4, 0x1F),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_01_LOW, 0xE0),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_01_HIGH, 0xC8),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_01_HIGH2, 0xC8),
@@ -258,6 +270,7 @@ static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_no_g4[] = {
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_10_HIGH3, 0x3B),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_10_HIGH4, 0xB1),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_DCC_CTRL1, 0x0C),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_VGA_CAL_CNTRL2, 0x04),
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6D),
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_DRV_LVL, 0x0A),
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_DRV_LVL, 0x02),
@@ -279,7 +292,10 @@ static struct ufs_qcom_phy_calibration phy_cal_table_2nd_lane_no_g4[] = {
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_PWM_GEAR_2_DIVIDER_BAND0_1, 0x03),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_PWM_GEAR_3_DIVIDER_BAND0_1, 0x01),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_PWM_GEAR_4_DIVIDER_BAND0_1, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_LANE_MODE_1, 0x35),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_LANE_MODE_1, 0xF5),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_LANE_MODE_3, 0x3F),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_RES_CODE_LANE_OFFSET_TX, 0x06),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_RES_CODE_LANE_OFFSET_RX, 0x09),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_TRAN_DRVR_EMP_EN, 0x0C),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_SIGDET_LVL, 0x24),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_SIGDET_CNTRL, 0x0F),
@@ -290,7 +306,7 @@ static struct ufs_qcom_phy_calibration phy_cal_table_2nd_lane_no_g4[] = {
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_PI_CONTROLS, 0xF1),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_FASTLOCK_COUNT_LOW, 0x80),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_PI_CTRL2, 0x80),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_FO_GAIN, 0x0E),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_FO_GAIN, 0x0C),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SO_GAIN, 0x04),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_TERM_BW, 0x1B),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL2, 0x06),
@@ -300,11 +316,11 @@ static struct ufs_qcom_phy_calibration phy_cal_table_2nd_lane_no_g4[] = {
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_IDAC_MEASURE_TIME, 0x10),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_IDAC_TSETTLE_LOW, 0xC0),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_IDAC_TSETTLE_HIGH, 0x00),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00_LOW, 0x6D),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00_HIGH, 0x6D),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00_HIGH2, 0xED),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00_HIGH3, 0x3B),
- UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00_HIGH4, 0x3C),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00_LOW, 0x64),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00_HIGH, 0x64),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00_HIGH2, 0x24),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00_HIGH3, 0x3F),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_00_HIGH4, 0x1F),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_01_LOW, 0xE0),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_01_HIGH, 0xC8),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_01_HIGH2, 0xC8),
@@ -316,6 +332,7 @@ static struct ufs_qcom_phy_calibration phy_cal_table_2nd_lane_no_g4[] = {
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_10_HIGH3, 0x3B),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_10_HIGH4, 0xB1),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_DCC_CTRL1, 0x0C),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_VGA_CAL_CNTRL2, 0x04),
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_MULTI_LANE_CTRL1, 0x02),
};
diff --git a/drivers/pinctrl/qcom/pinctrl-bengal.c b/drivers/pinctrl/qcom/pinctrl-bengal.c
index cd20603..a93ddca 100644
--- a/drivers/pinctrl/qcom/pinctrl-bengal.c
+++ b/drivers/pinctrl/qcom/pinctrl-bengal.c
@@ -1,15 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
@@ -32,7 +23,7 @@
#define EAST 0x00900000
#define DUMMY 0x0
#define REG_SIZE 0x1000
-#define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+#define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9, wake_off, bit) \
{ \
.name = "gpio" #id, \
.pins = gpio##id##_pins, \
@@ -71,6 +62,8 @@
.intr_polarity_bit = 1, \
.intr_detection_bit = 2, \
.intr_detection_width = 2, \
+ .wake_reg = base + wake_off, \
+ .wake_bit = bit, \
}
#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
@@ -372,14 +365,14 @@ static const unsigned int sdc2_data_pins[] = { 119 };
static const unsigned int ufs_reset_pins[] = { 120 };
enum bengal_functions {
- msm_mux_ddr_bist,
- msm_mux_m_voc,
- msm_mux_gpio,
msm_mux_qup0,
+ msm_mux_gpio,
+ msm_mux_ddr_bist,
msm_mux_phase_flag0,
msm_mux_qdss_gpio8,
msm_mux_atest_tsens,
msm_mux_mpm_pwr,
+ msm_mux_m_voc,
msm_mux_phase_flag1,
msm_mux_qdss_gpio9,
msm_mux_atest_tsens2,
@@ -555,11 +548,8 @@ enum bengal_functions {
msm_mux_NA,
};
-static const char * const ddr_bist_groups[] = {
- "gpio0", "gpio1", "gpio2", "gpio3",
-};
-static const char * const m_voc_groups[] = {
- "gpio0",
+static const char * const qup0_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio82", "gpio86",
};
static const char * const gpio_groups[] = {
"gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
@@ -580,8 +570,8 @@ static const char * const gpio_groups[] = {
"gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
"gpio111", "gpio112",
};
-static const char * const qup0_groups[] = {
- "gpio0", "gpio1", "gpio2", "gpio3", "gpio82", "gpio86",
+static const char * const ddr_bist_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
};
static const char * const phase_flag0_groups[] = {
"gpio0",
@@ -595,6 +585,9 @@ static const char * const atest_tsens_groups[] = {
static const char * const mpm_pwr_groups[] = {
"gpio1",
};
+static const char * const m_voc_groups[] = {
+ "gpio0",
+};
static const char * const phase_flag1_groups[] = {
"gpio1",
};
@@ -1114,14 +1107,14 @@ static const char * const dac_calib25_groups[] = {
};
static const struct msm_function bengal_functions[] = {
- FUNCTION(ddr_bist),
- FUNCTION(m_voc),
- FUNCTION(gpio),
FUNCTION(qup0),
+ FUNCTION(gpio),
+ FUNCTION(ddr_bist),
FUNCTION(phase_flag0),
FUNCTION(qdss_gpio8),
FUNCTION(atest_tsens),
FUNCTION(mpm_pwr),
+ FUNCTION(m_voc),
FUNCTION(phase_flag1),
FUNCTION(qdss_gpio9),
FUNCTION(atest_tsens2),
@@ -1303,182 +1296,215 @@ static const struct msm_function bengal_functions[] = {
*/
static const struct msm_pingroup bengal_groups[] = {
[0] = PINGROUP(0, WEST, qup0, m_voc, ddr_bist, NA, phase_flag0,
- qdss_gpio8, atest_tsens, NA, NA),
+ qdss_gpio8, atest_tsens, NA, NA, 0x71000, 1),
[1] = PINGROUP(1, WEST, qup0, mpm_pwr, ddr_bist, NA, phase_flag1,
- qdss_gpio9, atest_tsens2, NA, NA),
+ qdss_gpio9, atest_tsens2, NA, NA, 0, -1),
[2] = PINGROUP(2, WEST, qup0, ddr_bist, NA, phase_flag2, qdss_gpio10,
- dac_calib0, atest_usb10, NA, NA),
+ dac_calib0, atest_usb10, NA, NA, 0, -1),
[3] = PINGROUP(3, WEST, qup0, ddr_bist, NA, phase_flag3, qdss_gpio11,
- dac_calib1, atest_usb11, NA, NA),
+ dac_calib1, atest_usb11, NA, NA, 0x71000, 2),
[4] = PINGROUP(4, WEST, qup1, CRI_TRNG0, NA, phase_flag4, dac_calib2,
- atest_usb12, NA, NA, NA),
+ atest_usb12, NA, NA, NA, 0x71000, 3),
[5] = PINGROUP(5, WEST, qup1, CRI_TRNG1, NA, phase_flag5, dac_calib3,
- atest_usb13, NA, NA, NA),
+ atest_usb13, NA, NA, NA, 0, -1),
[6] = PINGROUP(6, WEST, qup2, NA, phase_flag6, dac_calib4, atest_usb1,
- NA, NA, NA, NA),
- [7] = PINGROUP(7, WEST, qup2, NA, NA, NA, NA, NA, NA, NA, NA),
+ NA, NA, NA, NA, 0x71000, 4),
+ [7] = PINGROUP(7, WEST, qup2, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
[8] = PINGROUP(8, EAST, qup3, pbs_out, PLL_BIST, NA, qdss_gpio, NA,
- tsense_pwm, NA, NA),
+ tsense_pwm, NA, NA, 0x71000, 0),
[9] = PINGROUP(9, EAST, qup3, pbs_out, PLL_BIST, NA, qdss_gpio, NA, NA,
- NA, NA),
+ NA, NA, 0, -1),
[10] = PINGROUP(10, EAST, qup3, AGERA_PLL, NA, pbs0, qdss_gpio0, NA,
- NA, NA, NA),
+ NA, NA, NA, 0, -1),
[11] = PINGROUP(11, EAST, qup3, AGERA_PLL, NA, pbs1, qdss_gpio1, NA,
- NA, NA, NA),
- [12] = PINGROUP(12, WEST, qup4, tgu_ch0, NA, NA, NA, NA, NA, NA, NA),
- [13] = PINGROUP(13, WEST, qup4, tgu_ch1, NA, NA, NA, NA, NA, NA, NA),
+ NA, NA, NA, 0x71000, 1),
+ [12] = PINGROUP(12, WEST, qup4, tgu_ch0, NA, NA, NA, NA, NA, NA, NA,
+ 0, -1),
+ [13] = PINGROUP(13, WEST, qup4, tgu_ch1, NA, NA, NA, NA, NA, NA, NA,
+ 0x71000, 5),
[14] = PINGROUP(14, WEST, qup5, tgu_ch2, NA, phase_flag7, qdss_gpio4,
- dac_calib5, NA, NA, NA),
+ dac_calib5, NA, NA, NA, 0x71000, 6),
[15] = PINGROUP(15, WEST, qup5, tgu_ch3, NA, phase_flag8, qdss_gpio5,
- dac_calib6, NA, NA, NA),
+ dac_calib6, NA, NA, NA, 0, -1),
[16] = PINGROUP(16, WEST, qup5, NA, phase_flag9, qdss_gpio6,
- dac_calib7, NA, NA, NA, NA),
+ dac_calib7, NA, NA, NA, NA, 0, -1),
[17] = PINGROUP(17, WEST, qup5, NA, phase_flag10, qdss_gpio7,
- dac_calib8, NA, NA, NA, NA),
+ dac_calib8, NA, NA, NA, NA, 0x71000, 7),
[18] = PINGROUP(18, EAST, SDC2_TB, CRI_TRNG, pbs2, qdss_gpio2, NA, NA,
- NA, NA, NA),
+ NA, NA, NA, 0x71000, 2),
[19] = PINGROUP(19, EAST, SDC1_TB, pbs3, qdss_gpio3, NA, NA, NA, NA,
- NA, NA),
+ NA, NA, 0x71000, 3),
[20] = PINGROUP(20, EAST, cam_mclk, pbs4, qdss_gpio4, NA, NA, NA, NA,
- NA, NA),
+ NA, NA, 0, -1),
[21] = PINGROUP(21, EAST, cam_mclk, adsp_ext, pbs5, qdss_gpio5, NA, NA,
- NA, NA, NA),
+ NA, NA, NA, 0, -1),
[22] = PINGROUP(22, EAST, cci_i2c, prng_rosc, NA, pbs6, phase_flag11,
- qdss_gpio6, dac_calib9, atest_usb20, NA),
+ qdss_gpio6, dac_calib9, atest_usb20, NA, 0, -1),
[23] = PINGROUP(23, EAST, cci_i2c, prng_rosc, NA, pbs7, phase_flag12,
- qdss_gpio7, dac_calib10, atest_usb21, NA),
+ qdss_gpio7, dac_calib10, atest_usb21, NA, 0, -1),
[24] = PINGROUP(24, EAST, CCI_TIMER1, GCC_GP1, NA, pbs8, phase_flag13,
- qdss_gpio8, dac_calib11, atest_usb22, NA),
+ qdss_gpio8, dac_calib11, atest_usb22, NA, 0x71000, 4),
[25] = PINGROUP(25, EAST, cci_async, CCI_TIMER0, NA, pbs9,
- phase_flag14, qdss_gpio9, dac_calib12, atest_usb23, NA),
+ phase_flag14, qdss_gpio9, dac_calib12, atest_usb23, NA,
+ 0x71000, 5),
[26] = PINGROUP(26, EAST, NA, pbs10, phase_flag15, qdss_gpio10,
- dac_calib13, atest_usb2, vsense_trigger, NA, NA),
+ dac_calib13, atest_usb2, vsense_trigger, NA, NA, 0, -1),
[27] = PINGROUP(27, EAST, cam_mclk, qdss_cti, NA, NA, NA, NA, NA, NA,
- NA),
+ NA, 0x71000, 6),
[28] = PINGROUP(28, EAST, cam_mclk, CCI_TIMER2, qdss_cti, NA, NA, NA,
- NA, NA, NA),
+ NA, NA, NA, 0x71000, 7),
[29] = PINGROUP(29, EAST, cci_i2c, NA, phase_flag16, dac_calib14,
- atest_char, NA, NA, NA, NA),
+ atest_char, NA, NA, NA, NA, 0, -1),
[30] = PINGROUP(30, EAST, cci_i2c, NA, phase_flag17, dac_calib15,
- atest_char0, NA, NA, NA, NA),
+ atest_char0, NA, NA, NA, NA, 0, -1),
[31] = PINGROUP(31, EAST, GP_PDM0, NA, phase_flag18, dac_calib16,
- atest_char1, NA, NA, NA, NA),
+ atest_char1, NA, NA, NA, NA, 0x71000, 8),
[32] = PINGROUP(32, EAST, CCI_TIMER3, GP_PDM1, NA, phase_flag19,
- dac_calib17, atest_char2, NA, NA, NA),
+ dac_calib17, atest_char2, NA, NA, NA, 0x71000, 9),
[33] = PINGROUP(33, EAST, GP_PDM2, NA, phase_flag20, dac_calib18,
- atest_char3, NA, NA, NA, NA),
- [34] = PINGROUP(34, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- [35] = PINGROUP(35, EAST, NA, phase_flag21, NA, NA, NA, NA, NA, NA, NA),
- [36] = PINGROUP(36, EAST, NA, phase_flag22, NA, NA, NA, NA, NA, NA, NA),
- [37] = PINGROUP(37, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- [38] = PINGROUP(38, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- [39] = PINGROUP(39, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- [40] = PINGROUP(40, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- [41] = PINGROUP(41, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- [42] = PINGROUP(42, EAST, NA, NAV_GPIO, NA, NA, NA, NA, NA, NA, NA),
- [43] = PINGROUP(43, EAST, NA, NA, phase_flag23, NA, NA, NA, NA, NA, NA),
- [44] = PINGROUP(44, EAST, NA, NA, phase_flag24, NA, NA, NA, NA, NA, NA),
- [45] = PINGROUP(45, EAST, NA, NA, phase_flag25, NA, NA, NA, NA, NA, NA),
- [46] = PINGROUP(46, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ atest_char3, NA, NA, NA, NA, 0x71000, 10),
+ [34] = PINGROUP(34, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+ 0x71000, 11),
+ [35] = PINGROUP(35, EAST, NA, phase_flag21, NA, NA, NA, NA, NA, NA, NA,
+ 0x71000, 12),
+ [36] = PINGROUP(36, EAST, NA, phase_flag22, NA, NA, NA, NA, NA, NA, NA,
+ 0x71000, 13),
+ [37] = PINGROUP(37, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+ [38] = PINGROUP(38, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+ [39] = PINGROUP(39, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+ 0x71000, 14),
+ [40] = PINGROUP(40, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+ [41] = PINGROUP(41, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+ [42] = PINGROUP(42, EAST, NA, NAV_GPIO, NA, NA, NA, NA, NA, NA, NA,
+ 0, -1),
+ [43] = PINGROUP(43, EAST, NA, NA, phase_flag23, NA, NA, NA, NA, NA, NA,
+ 0, -1),
+ [44] = PINGROUP(44, EAST, NA, NA, phase_flag24, NA, NA, NA, NA, NA, NA,
+ 0, -1),
+ [45] = PINGROUP(45, EAST, NA, NA, phase_flag25, NA, NA, NA, NA, NA, NA,
+ 0, -1),
+ [46] = PINGROUP(46, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+ 0x71000, 15),
[47] = PINGROUP(47, EAST, NA, NAV_GPIO, pbs14, qdss_gpio14, NA, NA, NA,
- NA, NA),
+ NA, NA, 0, -1),
[48] = PINGROUP(48, EAST, NA, vfr_1, NA, pbs15, qdss_gpio15, NA, NA,
- NA, NA),
- [49] = PINGROUP(49, EAST, NA, PA_INDICATOR, NA, NA, NA, NA, NA, NA, NA),
- [50] = PINGROUP(50, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- [51] = PINGROUP(51, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ NA, NA, 0, -1),
+ [49] = PINGROUP(49, EAST, NA, PA_INDICATOR, NA, NA, NA, NA, NA, NA, NA,
+ 0, -1),
+ [50] = PINGROUP(50, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+ [51] = PINGROUP(51, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
[52] = PINGROUP(52, EAST, NA, NAV_GPIO, pbs_out, NA, NA, NA, NA, NA,
- NA),
- [53] = PINGROUP(53, EAST, NA, gsm1_tx, NA, NA, NA, NA, NA, NA, NA),
- [54] = PINGROUP(54, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- [55] = PINGROUP(55, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- [56] = PINGROUP(56, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- [57] = PINGROUP(57, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- [58] = PINGROUP(58, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- [59] = PINGROUP(59, EAST, NA, SSBI_WTR1, NA, NA, NA, NA, NA, NA, NA),
- [60] = PINGROUP(60, EAST, NA, SSBI_WTR1, NA, NA, NA, NA, NA, NA, NA),
- [61] = PINGROUP(61, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- [62] = PINGROUP(62, EAST, NA, pll_bypassnl, NA, NA, NA, NA, NA, NA, NA),
+ NA, 0, -1),
+ [53] = PINGROUP(53, EAST, NA, gsm1_tx, NA, NA, NA, NA, NA, NA, NA,
+ 0, -1),
+ [54] = PINGROUP(54, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+ [55] = PINGROUP(55, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+ [56] = PINGROUP(56, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+ [57] = PINGROUP(57, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+ [58] = PINGROUP(58, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+ [59] = PINGROUP(59, EAST, NA, SSBI_WTR1, NA, NA, NA, NA, NA, NA, NA,
+ 0, -1),
+ [60] = PINGROUP(60, EAST, NA, SSBI_WTR1, NA, NA, NA, NA, NA, NA, NA,
+ 0, -1),
+ [61] = PINGROUP(61, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+ [62] = PINGROUP(62, EAST, NA, pll_bypassnl, NA, NA, NA, NA, NA, NA, NA,
+ 0x71000, 16),
[63] = PINGROUP(63, EAST, pll_reset, NA, phase_flag26, ddr_pxi0, NA,
- NA, NA, NA, NA),
+ NA, NA, NA, NA, 0x71000, 17),
[64] = PINGROUP(64, EAST, gsm0_tx, NA, phase_flag27, ddr_pxi0, NA, NA,
- NA, NA, NA),
- [65] = PINGROUP(65, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- [66] = PINGROUP(66, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- [67] = PINGROUP(67, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- [68] = PINGROUP(68, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ NA, NA, NA, 0x71000, 18),
+ [65] = PINGROUP(65, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+ 0x71000, 8),
+ [66] = PINGROUP(66, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+ 0x71000, 9),
+ [67] = PINGROUP(67, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+ 0x71000, 10),
+ [68] = PINGROUP(68, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
[69] = PINGROUP(69, WEST, qup1, GCC_GP2, qdss_gpio12, ddr_pxi1, NA, NA,
- NA, NA, NA),
+ NA, NA, NA, 0x71000, 11),
[70] = PINGROUP(70, WEST, qup1, GCC_GP3, qdss_gpio13, ddr_pxi1, NA, NA,
- NA, NA, NA),
- [71] = PINGROUP(71, WEST, qup2, dbg_out, NA, NA, NA, NA, NA, NA, NA),
+ NA, NA, NA, 0x71000, 12),
+ [71] = PINGROUP(71, WEST, qup2, dbg_out, NA, NA, NA, NA, NA, NA, NA,
+ 0, -1),
[72] = PINGROUP(72, SOUTH, uim2_data, qdss_cti, NA, NA, NA, NA, NA, NA,
- NA),
+ NA, 0x71000, 3),
[73] = PINGROUP(73, SOUTH, uim2_clk, NA, qdss_cti, NA, NA, NA, NA, NA,
- NA),
- [74] = PINGROUP(74, SOUTH, uim2_reset, NA, NA, NA, NA, NA, NA, NA, NA),
+ NA, 0, -1),
+ [74] = PINGROUP(74, SOUTH, uim2_reset, NA, NA, NA, NA, NA, NA, NA, NA,
+ 0, -1),
[75] = PINGROUP(75, SOUTH, uim2_present, NA, NA, NA, NA, NA, NA, NA,
- NA),
- [76] = PINGROUP(76, SOUTH, uim1_data, NA, NA, NA, NA, NA, NA, NA, NA),
- [77] = PINGROUP(77, SOUTH, uim1_clk, NA, NA, NA, NA, NA, NA, NA, NA),
- [78] = PINGROUP(78, SOUTH, uim1_reset, NA, NA, NA, NA, NA, NA, NA, NA),
+ NA, 0x71000, 4),
+ [76] = PINGROUP(76, SOUTH, uim1_data, NA, NA, NA, NA, NA, NA, NA, NA,
+ 0, -1),
+ [77] = PINGROUP(77, SOUTH, uim1_clk, NA, NA, NA, NA, NA, NA, NA, NA,
+ 0, -1),
+ [78] = PINGROUP(78, SOUTH, uim1_reset, NA, NA, NA, NA, NA, NA, NA, NA,
+ 0, -1),
[79] = PINGROUP(79, SOUTH, uim1_present, NA, NA, NA, NA, NA, NA, NA,
- NA),
+ NA, 0x71000, 5),
[80] = PINGROUP(80, WEST, qup2, dac_calib19, NA, NA, NA, NA, NA, NA,
- NA),
+ NA, 0x71000, 13),
[81] = PINGROUP(81, WEST, mdp_vsync, mdp_vsync, mdp_vsync, dac_calib20,
- NA, NA, NA, NA, NA),
+ NA, NA, NA, NA, NA, 0x71000, 14),
[82] = PINGROUP(82, WEST, qup0, dac_calib21, NA, NA, NA, NA, NA, NA,
- NA),
- [83] = PINGROUP(83, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- [84] = PINGROUP(84, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- [85] = PINGROUP(85, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ NA, 0, -1),
+ [83] = PINGROUP(83, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+ 0x71000, 15),
+ [84] = PINGROUP(84, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+ 0x71000, 16),
+ [85] = PINGROUP(85, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+ 0x71000, 17),
[86] = PINGROUP(86, WEST, qup0, GCC_GP1, atest_bbrx1, NA, NA, NA, NA,
- NA, NA),
+ NA, NA, 0x71000, 18),
[87] = PINGROUP(87, EAST, pbs11, qdss_gpio11, NA, NA, NA, NA, NA, NA,
- NA),
- [88] = PINGROUP(88, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ NA, 0x71000, 19),
+ [88] = PINGROUP(88, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+ 0x71000, 20),
[89] = PINGROUP(89, WEST, usb_phy, atest_bbrx0, NA, NA, NA, NA, NA, NA,
- NA),
+ NA, 0x71000, 19),
[90] = PINGROUP(90, EAST, mss_lte, pbs12, qdss_gpio12, NA, NA, NA, NA,
- NA, NA),
+ NA, NA, 0, -1),
[91] = PINGROUP(91, EAST, mss_lte, pbs13, qdss_gpio13, NA, NA, NA, NA,
- NA, NA),
- [92] = PINGROUP(92, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- [93] = PINGROUP(93, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ NA, NA, 0x71000, 21),
+ [92] = PINGROUP(92, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+ [93] = PINGROUP(93, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+ 0x71000, 20),
[94] = PINGROUP(94, WEST, NA, qdss_gpio14, wlan1_adc0, NA, NA, NA, NA,
- NA, NA),
+ NA, NA, 0x71000, 21),
[95] = PINGROUP(95, WEST, NAV_GPIO, GP_PDM0, qdss_gpio15, wlan1_adc1,
- NA, NA, NA, NA, NA),
+ NA, NA, NA, NA, NA, 0x71000, 22),
[96] = PINGROUP(96, WEST, qup4, NAV_GPIO, mdp_vsync, GP_PDM1, sd_write,
- JITTER_BIST, qdss_cti, qdss_cti, NA),
+ JITTER_BIST, qdss_cti, qdss_cti, NA, 0x71000, 23),
[97] = PINGROUP(97, WEST, qup4, NAV_GPIO, mdp_vsync, GP_PDM2,
- JITTER_BIST, qdss_cti, qdss_cti, NA, NA),
- [98] = PINGROUP(98, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- [99] = PINGROUP(99, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ JITTER_BIST, qdss_cti, qdss_cti, NA, NA, 0x71000, 24),
+ [98] = PINGROUP(98, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+ [99] = PINGROUP(99, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+ 0x71000, 6),
[100] = PINGROUP(100, SOUTH, atest_gpsadc_dtest0_native, NA, NA, NA,
- NA, NA, NA, NA, NA),
+ NA, NA, NA, NA, NA, 0, -1),
[101] = PINGROUP(101, SOUTH, atest_gpsadc_dtest1_native, NA, NA, NA,
- NA, NA, NA, NA, NA),
+ NA, NA, NA, NA, NA, 0, -1),
[102] = PINGROUP(102, SOUTH, NA, phase_flag28, dac_calib22, ddr_pxi2,
- NA, NA, NA, NA, NA),
+ NA, NA, NA, NA, NA, 0x71000, 7),
[103] = PINGROUP(103, SOUTH, NA, phase_flag29, dac_calib23, ddr_pxi2,
- NA, NA, NA, NA, NA),
+ NA, NA, NA, NA, NA, 0x71000, 8),
[104] = PINGROUP(104, SOUTH, NA, phase_flag30, qdss_gpio1, dac_calib24,
- ddr_pxi3, NA, NA, NA, NA),
+ ddr_pxi3, NA, NA, NA, NA, 0x71000, 9),
[105] = PINGROUP(105, SOUTH, NA, phase_flag31, qdss_gpio, dac_calib25,
- ddr_pxi3, NA, NA, NA, NA),
+ ddr_pxi3, NA, NA, NA, NA, 0x71000, 10),
[106] = PINGROUP(106, SOUTH, NAV_GPIO, GCC_GP3, qdss_gpio, NA, NA, NA,
- NA, NA, NA),
+ NA, NA, NA, 0x71000, 11),
[107] = PINGROUP(107, SOUTH, NAV_GPIO, GCC_GP2, qdss_gpio0, NA, NA, NA,
- NA, NA, NA),
- [108] = PINGROUP(108, SOUTH, NAV_GPIO, NA, NA, NA, NA, NA, NA, NA, NA),
+ NA, NA, NA, 0x71000, 12),
+ [108] = PINGROUP(108, SOUTH, NAV_GPIO, NA, NA, NA, NA, NA, NA, NA, NA,
+ 0, -1),
[109] = PINGROUP(109, SOUTH, NA, qdss_gpio2, NA, NA, NA, NA, NA, NA,
- NA),
+ NA, 0x71000, 13),
[110] = PINGROUP(110, SOUTH, NA, qdss_gpio3, NA, NA, NA, NA, NA, NA,
- NA),
- [111] = PINGROUP(111, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
- [112] = PINGROUP(112, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ NA, 0, -1),
+ [111] = PINGROUP(111, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1),
+ [112] = PINGROUP(112, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA,
+ 0x71000, 14),
[113] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x175000, 15, 0),
[114] = SDC_QDSD_PINGROUP(sdc1_clk, 0x175000, 13, 6),
[115] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x175000, 11, 3),
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index d578a72..cc6bf43 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -2423,6 +2423,11 @@ int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
ctx->stats.dp.last_timestamp = jiffies_to_msecs(jiffies);
atomic_inc(&gsi_ctx->num_chan);
+ if (props->prot == GSI_CHAN_PROT_GCI) {
+ gsi_ctx->coal_info.ch_id = props->ch_id;
+ gsi_ctx->coal_info.evchid = props->evt_ring_hdl;
+ }
+
return GSI_STATUS_SUCCESS;
}
EXPORT_SYMBOL(gsi_alloc_channel);
@@ -3155,6 +3160,10 @@ int gsi_dealloc_channel(unsigned long chan_hdl)
atomic_dec(&ctx->evtr->chan_ref_cnt);
atomic_dec(&gsi_ctx->num_chan);
+ if (ctx->props.prot == GSI_CHAN_PROT_GCI) {
+ gsi_ctx->coal_info.ch_id = GSI_CHAN_MAX;
+ gsi_ctx->coal_info.evchid = GSI_EVT_RING_MAX;
+ }
return GSI_STATUS_SUCCESS;
}
EXPORT_SYMBOL(gsi_dealloc_channel);
@@ -3707,7 +3716,7 @@ EXPORT_SYMBOL(gsi_poll_n_channel);
int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
{
- struct gsi_chan_ctx *ctx;
+ struct gsi_chan_ctx *ctx, *coal_ctx;
enum gsi_chan_mode curr;
unsigned long flags;
enum gsi_chan_mode chan_mode;
@@ -3753,8 +3762,14 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
gsi_writel(1 << ctx->evtr->id, gsi_ctx->base +
GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(gsi_ctx->per.ee));
atomic_set(&ctx->poll_mode, mode);
- if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && ctx->evtr->chan)
+ if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && ctx->evtr->chan) {
atomic_set(&ctx->evtr->chan->poll_mode, mode);
+ } else if (gsi_ctx->coal_info.evchid == ctx->evtr->id) {
+ coal_ctx = &gsi_ctx->chan[gsi_ctx->coal_info.ch_id];
+ if (coal_ctx != NULL)
+ atomic_set(&coal_ctx->poll_mode, mode);
+ }
+
GSIDBG("set gsi_ctx evtr_id %d to %d mode\n",
ctx->evtr->id, mode);
ctx->stats.callback_to_poll++;
@@ -3763,8 +3778,13 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
if (curr == GSI_CHAN_MODE_POLL &&
mode == GSI_CHAN_MODE_CALLBACK) {
atomic_set(&ctx->poll_mode, mode);
- if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && ctx->evtr->chan)
+ if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && ctx->evtr->chan) {
atomic_set(&ctx->evtr->chan->poll_mode, mode);
+ } else if (gsi_ctx->coal_info.evchid == ctx->evtr->id) {
+ coal_ctx = &gsi_ctx->chan[gsi_ctx->coal_info.ch_id];
+ if (coal_ctx != NULL)
+ atomic_set(&coal_ctx->poll_mode, mode);
+ }
__gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, ~0);
GSIDBG("set gsi_ctx evtr_id %d to %d mode\n",
ctx->evtr->id, mode);
diff --git a/drivers/platform/msm/gsi/gsi.h b/drivers/platform/msm/gsi/gsi.h
index 45dec44..3b6c648 100644
--- a/drivers/platform/msm/gsi/gsi.h
+++ b/drivers/platform/msm/gsi/gsi.h
@@ -200,6 +200,11 @@ struct gsi_generic_ee_cmd_debug_stats {
unsigned long halt_channel;
};
+struct gsi_coal_chan_info {
+ uint8_t ch_id;
+ uint8_t evchid;
+};
+
struct gsi_ctx {
void __iomem *base;
struct device *dev;
@@ -223,6 +228,7 @@ struct gsi_ctx {
struct completion gen_ee_cmd_compl;
void *ipc_logbuf;
void *ipc_logbuf_low;
+ struct gsi_coal_chan_info coal_info;
/*
* The following used only on emulation systems.
*/
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 927b040..79bc2f9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -34,6 +34,7 @@
#include <asm/cacheflush.h>
#include <linux/soc/qcom/smem_state.h>
#include <linux/of_irq.h>
+#include <linux/ctype.h>
#ifdef CONFIG_ARM64
@@ -5968,6 +5969,7 @@ static ssize_t ipa3_write(struct file *file, const char __user *buf,
unsigned long missing;
char dbg_buff[32] = { 0 };
+ int i = 0;
if (count >= sizeof(dbg_buff))
return -EFAULT;
@@ -5988,6 +5990,17 @@ static ssize_t ipa3_write(struct file *file, const char __user *buf,
if (ipa3_is_ready())
return count;
+ /*Ignore empty ipa_config file*/
+ for (i = 0 ; i < count ; ++i) {
+ if (!isspace(dbg_buff[i]))
+ break;
+ }
+
+ if (i == count) {
+ IPADBG("Empty ipa_config file\n");
+ return count;
+ }
+
/* Check MHI configuration on MDM devices */
if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 0222b28..240513a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -1418,6 +1418,8 @@ struct ipa3_uc_ctx {
u32 ering_rp_local;
u32 ering_wp;
u32 ering_rp;
+ struct ipa_wdi_bw_info info;
+ uint64_t bw_info_max;
};
/**
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index beb42b5..3796b98 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -385,6 +385,9 @@ static void ipa3_handle_mhi_vote_req(struct qmi_handle *qmi_handle,
IPA_QMI_ERR_NOT_SUPPORTED_V01;
}
resp = &resp2;
+ } else {
+ IPAWANERR("clk_rate_valid is false\n");
+ return;
}
} else {
resp = imp_handle_vote_req(vote_req->mhi_vote);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
index a6ce947..8f0933b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
@@ -362,16 +362,21 @@ static void ipa3_event_ring_hdlr(void)
e_b->Protocol,
e_b->Value.bw_param.ThresholdIndex,
e_b->Value.bw_param.throughput);
-
- memset(&bw_info, 0, sizeof(struct ipa_inform_wlan_bw));
- bw_info.index =
- e_b->Value.bw_param.ThresholdIndex;
- mul = 1000 / IPA_UC_MON_INTERVAL;
- bw_info.throughput =
- e_b->Value.bw_param.throughput*mul;
- if (ipa3_inform_wlan_bw(&bw_info))
- IPAERR_RL("failed on index %d to wlan\n",
- bw_info.index);
+ /* check values */
+ mul = 1000 * IPA_UC_MON_INTERVAL;
+ if (e_b->Value.bw_param.throughput <
+ ipa3_ctx->uc_ctx.bw_info_max*mul) {
+ memset(&bw_info, 0,
+ sizeof(struct ipa_inform_wlan_bw));
+ bw_info.index =
+ e_b->Value.bw_param.ThresholdIndex;
+ mul = 1000 / IPA_UC_MON_INTERVAL;
+ bw_info.throughput =
+ e_b->Value.bw_param.throughput*mul;
+ if (ipa3_inform_wlan_bw(&bw_info))
+ IPAERR_RL("failed index %d to wlan\n",
+ bw_info.index);
+ }
} else if (((struct eventElement_t *) rp_va)->Opcode
== QUOTA_NOTIFY) {
e_q = ((struct eventElement_t *) rp_va);
@@ -1353,10 +1358,21 @@ int ipa3_uc_bw_monitor(struct ipa_wdi_bw_info *info)
bw_info->params.WdiBw.Stop = info->stop;
IPADBG("stop bw-monitor? %d\n", bw_info->params.WdiBw.Stop);
+ /* cache the bw info */
+ ipa3_ctx->uc_ctx.info.num = info->num;
+ ipa3_ctx->uc_ctx.info.stop = info->stop;
+ ipa3_ctx->uc_ctx.bw_info_max = 0;
+
for (i = 0; i < info->num; i++) {
bw_info->params.WdiBw.BwThreshold[i] = info->threshold[i];
IPADBG("%d-st, %lu\n", i, bw_info->params.WdiBw.BwThreshold[i]);
+ ipa3_ctx->uc_ctx.info.threshold[i] = info->threshold[i];
+ if (info->threshold[i] > ipa3_ctx->uc_ctx.bw_info_max)
+ ipa3_ctx->uc_ctx.bw_info_max = info->threshold[i];
}
+ /* set max to both UL+DL */
+ ipa3_ctx->uc_ctx.bw_info_max *= 2;
+ IPADBG("bw-monitor max %lu\n", ipa3_ctx->uc_ctx.bw_info_max);
bw_info->params.WdiBw.info.Num = 8;
ind = ipa3_ctx->fnr_info.hw_counter_offset +
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index d17ff30..fb8b055 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -2423,7 +2423,7 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 16, 10, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
- [IPA_4_5][IPA_CLIENT_USB_DPL_CONS] = {
+ [IPA_4_5_MHI][IPA_CLIENT_USB_DPL_CONS] = {
true, IPA_v4_5_MHI_GROUP_DDR,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 11c520f..cfc7751 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -3104,7 +3104,8 @@ static int rmnet_ipa3_set_data_quota_wifi(struct wan_ioctl_set_data_quota *data)
IPAWANERR("iface name %s, quota %lu\n",
data->interface_name, (unsigned long) data->quota_mbytes);
- if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5 &&
+ ipa3_ctx->ipa_hw_type != IPA_HW_v4_7) {
IPADBG("use ipa-uc for quota\n");
rc = ipa3_uc_quota_monitor(data->set_quota);
} else {
@@ -3835,6 +3836,7 @@ int rmnet_ipa3_query_tethering_stats_all(
} else if (upstream_type == IPA_UPSTEAM_WLAN) {
IPAWANDBG_LOW(" query wifi-backhaul stats\n");
if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5 ||
+ ipa3_ctx->ipa_hw_type == IPA_HW_v4_7 ||
!ipa3_ctx->hw_stats.enabled) {
IPAWANDBG("hw version %d,hw_stats.enabled %d\n",
ipa3_ctx->ipa_hw_type,
diff --git a/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c b/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c
index caae2ba..c4d53df 100644
--- a/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c
+++ b/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c
@@ -736,7 +736,8 @@ static int ipa_test_hw_stats_set_uc_event_ring(void *priv)
/* set uc event ring */
IPA_UT_INFO("========set uc event ring ========\n");
- if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5 &&
+ ipa3_ctx->ipa_hw_type != IPA_HW_v4_7) {
if (ipa3_ctx->uc_ctx.uc_loaded &&
!ipa3_ctx->uc_ctx.uc_event_ring_valid) {
if (ipa3_uc_setup_event_ring()) {
diff --git a/drivers/platform/msm/ipa/test/ipa_ut_framework.c b/drivers/platform/msm/ipa/test/ipa_ut_framework.c
index 4bd4434..0dfb8184 100644
--- a/drivers/platform/msm/ipa/test/ipa_ut_framework.c
+++ b/drivers/platform/msm/ipa/test/ipa_ut_framework.c
@@ -403,10 +403,10 @@ static ssize_t ipa_ut_dbgfs_meta_test_read(struct file *file,
meta_type = (long)(file->private_data);
IPA_UT_DBG("Meta test type %ld\n", meta_type);
- buf = kmalloc(IPA_UT_DEBUG_READ_BUF_SIZE, GFP_KERNEL);
+ buf = kmalloc(IPA_UT_DEBUG_READ_BUF_SIZE + 1, GFP_KERNEL);
if (!buf) {
IPA_UT_ERR("failed to allocate %d bytes\n",
- IPA_UT_DEBUG_READ_BUF_SIZE);
+ IPA_UT_DEBUG_READ_BUF_SIZE + 1);
cnt = 0;
goto unlock_mutex;
}
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 0b46a81..d98e9be 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -45,7 +45,7 @@ static const char * const power_supply_type_text[] = {
"USB_DCP", "USB_CDP", "USB_ACA", "USB_C",
"USB_PD", "USB_PD_DRP", "BrickID",
"USB_HVDCP", "USB_HVDCP_3", "Wireless", "USB_FLOAT",
- "BMS", "Parallel", "Main", "Wipower", "USB_C_UFP", "USB_C_DFP",
+ "BMS", "Parallel", "Main", "USB_C_UFP", "USB_C_DFP",
"Charge_Pump",
};
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index b3766cb..f55c1b7 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -98,6 +98,7 @@ struct pl_data {
int fcc_step_delay_ms;
/* debugfs directory */
struct dentry *dfs_root;
+ u32 float_voltage_uv;
};
struct pl_data *the_chip;
@@ -982,6 +983,17 @@ static void fcc_stepper_work(struct work_struct *work)
vote(chip->pl_awake_votable, FCC_STEPPER_VOTER, false, 0);
}
+static bool is_batt_available(struct pl_data *chip)
+{
+ if (!chip->batt_psy)
+ chip->batt_psy = power_supply_get_by_name("battery");
+
+ if (!chip->batt_psy)
+ return false;
+
+ return true;
+}
+
#define PARALLEL_FLOAT_VOLTAGE_DELTA_UV 50000
static int pl_fv_vote_callback(struct votable *votable, void *data,
int fv_uv, const char *client)
@@ -1015,6 +1027,31 @@ static int pl_fv_vote_callback(struct votable *votable, void *data,
}
}
+ /*
+ * check for termination at reduced float voltage and re-trigger
+ * charging if new float voltage is above last FV.
+ */
+ if ((chip->float_voltage_uv < fv_uv) && is_batt_available(chip)) {
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_STATUS, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get battery status rc=%d\n", rc);
+ } else {
+ if (pval.intval == POWER_SUPPLY_STATUS_FULL) {
+ pr_debug("re-triggering charging\n");
+ pval.intval = 1;
+ rc = power_supply_set_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_FORCE_RECHARGE,
+ &pval);
+ if (rc < 0)
+ pr_err("Couldn't set force recharge rc=%d\n",
+ rc);
+ }
+ }
+ }
+
+ chip->float_voltage_uv = fv_uv;
+
return 0;
}
@@ -1107,17 +1144,6 @@ static void pl_disable_forever_work(struct work_struct *work)
vote(chip->hvdcp_hw_inov_dis_votable, PL_VOTER, false, 0);
}
-static bool is_batt_available(struct pl_data *chip)
-{
- if (!chip->batt_psy)
- chip->batt_psy = power_supply_get_by_name("battery");
-
- if (!chip->batt_psy)
- return false;
-
- return true;
-}
-
static int pl_disable_vote_callback(struct votable *votable,
void *data, int pl_disable, const char *client)
{
diff --git a/drivers/power/supply/qcom/hl6111r.c b/drivers/power/supply/qcom/hl6111r.c
index 6f0d297..1f2b132 100644
--- a/drivers/power/supply/qcom/hl6111r.c
+++ b/drivers/power/supply/qcom/hl6111r.c
@@ -297,6 +297,20 @@ static int hl6111r_get_vout_target(struct hl6111r *chip, int *val)
return rc;
}
+static int hl6111r_get_chip_version(struct hl6111r *chip, int *val)
+{
+ int rc;
+ u8 id;
+
+ rc = hl6111r_read(chip, ID_REG, &id);
+ if (rc < 0)
+ return rc;
+
+ *val = id;
+
+ return 0;
+}
+
/* Callbacks for settable properties */
#define HL6111R_MIN_VOLTAGE_UV 4940000
@@ -382,6 +396,7 @@ static enum power_supply_property hl6111r_psy_props[] = {
POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION,
+ POWER_SUPPLY_PROP_CHIP_VERSION,
};
static int hl6111r_get_prop(struct power_supply *psy,
@@ -427,6 +442,9 @@ static int hl6111r_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION:
rc = hl6111r_get_vout_target(chip, val);
break;
+ case POWER_SUPPLY_PROP_CHIP_VERSION:
+ rc = hl6111r_get_chip_version(chip, val);
+ break;
default:
rc = -EINVAL;
break;
diff --git a/drivers/power/supply/qcom/hl6111r.h b/drivers/power/supply/qcom/hl6111r.h
index cd4043e..c88cfedbb 100644
--- a/drivers/power/supply/qcom/hl6111r.h
+++ b/drivers/power/supply/qcom/hl6111r.h
@@ -19,6 +19,8 @@
#define VOUT_TARGET_REG 0x0E
+#define ID_REG 0xA
+
#define IOUT_LIM_SEL_REG 0x28
#define IOUT_LIM_SEL_MASK GENMASK(7, 3)
#define IOUT_LIM_SHIFT 3
diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c
index 9a00920..47197f0 100644
--- a/drivers/power/supply/qcom/qpnp-smb5.c
+++ b/drivers/power/supply/qcom/qpnp-smb5.c
@@ -1436,7 +1436,7 @@ static int smb5_dc_get_prop(struct power_supply *psy,
rc = smblib_get_prop_dc_voltage_max(chg, val);
break;
case POWER_SUPPLY_PROP_REAL_TYPE:
- val->intval = POWER_SUPPLY_TYPE_WIPOWER;
+ val->intval = POWER_SUPPLY_TYPE_WIRELESS;
break;
case POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION:
rc = smblib_get_prop_voltage_wls_output(chg, val);
@@ -2785,6 +2785,7 @@ static int smb5_determine_initial_status(struct smb5 *chip)
smblib_suspend_on_debug_battery(chg);
usb_plugin_irq_handler(0, &irq_data);
+ dc_plugin_irq_handler(0, &irq_data);
typec_attach_detach_irq_handler(0, &irq_data);
typec_state_change_irq_handler(0, &irq_data);
usb_source_change_irq_handler(0, &irq_data);
diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c
index 2890df6..cadb594 100644
--- a/drivers/power/supply/qcom/smb5-lib.c
+++ b/drivers/power/supply/qcom/smb5-lib.c
@@ -832,10 +832,10 @@ static int smblib_set_usb_pd_allowed_voltage(struct smb_charger *chg,
if (vbus_allowance != CONTINUOUS)
return 0;
+ aicl_threshold = min_allowed_uv / 1000 - CONT_AICL_HEADROOM_MV;
if (chg->adapter_cc_mode)
- aicl_threshold = AICL_THRESHOLD_MV_IN_CC;
- else
- aicl_threshold = min_allowed_uv / 1000 - CONT_AICL_HEADROOM_MV;
+ aicl_threshold = min(aicl_threshold, AICL_THRESHOLD_MV_IN_CC);
+
rc = smblib_set_charge_param(chg, &chg->param.aicl_cont_threshold,
aicl_threshold);
if (rc < 0) {
@@ -3584,6 +3584,7 @@ int smblib_get_prop_typec_power_role(struct smb_charger *chg,
return -EINVAL;
}
+ chg->power_role = val->intval;
return rc;
}
@@ -4172,15 +4173,86 @@ int smblib_set_prop_usb_voltage_max_limit(struct smb_charger *chg,
return 0;
}
+void smblib_typec_irq_config(struct smb_charger *chg, bool en)
+{
+ if (en == chg->typec_irq_en)
+ return;
+
+ if (en) {
+ enable_irq(
+ chg->irq_info[TYPEC_ATTACH_DETACH_IRQ].irq);
+ enable_irq(
+ chg->irq_info[TYPEC_CC_STATE_CHANGE_IRQ].irq);
+ enable_irq(
+ chg->irq_info[TYPEC_OR_RID_DETECTION_CHANGE_IRQ].irq);
+ } else {
+ disable_irq_nosync(
+ chg->irq_info[TYPEC_ATTACH_DETACH_IRQ].irq);
+ disable_irq_nosync(
+ chg->irq_info[TYPEC_CC_STATE_CHANGE_IRQ].irq);
+ disable_irq_nosync(
+ chg->irq_info[TYPEC_OR_RID_DETECTION_CHANGE_IRQ].irq);
+ }
+
+ chg->typec_irq_en = en;
+}
+
+#define PR_LOCK_TIMEOUT_MS 1000
int smblib_set_prop_typec_power_role(struct smb_charger *chg,
const union power_supply_propval *val)
{
int rc = 0;
u8 power_role;
+ enum power_supply_typec_mode typec_mode;
+ bool snk_attached = false, src_attached = false, is_pr_lock = false;
if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)
return 0;
+ smblib_dbg(chg, PR_MISC, "power role change: %d --> %d!",
+ chg->power_role, val->intval);
+
+ if (chg->power_role == val->intval) {
+ smblib_dbg(chg, PR_MISC, "power role already in %d, ignore!",
+ chg->power_role);
+ return 0;
+ }
+
+ typec_mode = smblib_get_prop_typec_mode(chg);
+ if (typec_mode >= POWER_SUPPLY_TYPEC_SINK &&
+ typec_mode <= POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER)
+ snk_attached = true;
+ else if (typec_mode >= POWER_SUPPLY_TYPEC_SOURCE_DEFAULT &&
+ typec_mode <= POWER_SUPPLY_TYPEC_SOURCE_HIGH)
+ src_attached = true;
+
+ /*
+ * If current power role is in DRP, and type-c is already in the
+ * mode (source or sink) that's being requested, it means this is
+ * a power role locking request from USBPD driver. Disable type-c
+ * related interrupts for locking power role to avoid the redundant
+ * notifications.
+ */
+ if ((chg->power_role == POWER_SUPPLY_TYPEC_PR_DUAL) &&
+ ((src_attached && val->intval == POWER_SUPPLY_TYPEC_PR_SINK) ||
+ (snk_attached && val->intval == POWER_SUPPLY_TYPEC_PR_SOURCE)))
+ is_pr_lock = true;
+
+ smblib_dbg(chg, PR_MISC, "snk_attached = %d, src_attached = %d, is_pr_lock = %d\n",
+ snk_attached, src_attached, is_pr_lock);
+ cancel_delayed_work_sync(&chg->pr_lock_clear_work);
+ if (!chg->pr_lock_in_progress && is_pr_lock) {
+ smblib_dbg(chg, PR_MISC, "disable type-c interrupts for power role locking\n");
+ smblib_typec_irq_config(chg, false);
+ schedule_delayed_work(&chg->pr_lock_clear_work,
+ msecs_to_jiffies(PR_LOCK_TIMEOUT_MS));
+ } else if (chg->pr_lock_in_progress && !is_pr_lock) {
+ smblib_dbg(chg, PR_MISC, "restore type-c interrupts after exit power role locking\n");
+ smblib_typec_irq_config(chg, true);
+ }
+
+ chg->pr_lock_in_progress = is_pr_lock;
+
switch (val->intval) {
case POWER_SUPPLY_TYPEC_PR_NONE:
power_role = TYPEC_DISABLE_CMD_BIT;
@@ -4208,6 +4280,7 @@ int smblib_set_prop_typec_power_role(struct smb_charger *chg,
return rc;
}
+ chg->power_role = val->intval;
return rc;
}
@@ -5532,8 +5605,10 @@ static void typec_src_insertion(struct smb_charger *chg)
int rc = 0;
u8 stat;
- if (chg->pr_swap_in_progress)
+ if (chg->pr_swap_in_progress) {
+ vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, false, 0);
return;
+ }
rc = smblib_read(chg, LEGACY_CABLE_STATUS_REG, &stat);
if (rc < 0) {
@@ -6190,7 +6265,8 @@ irqreturn_t dc_plugin_irq_handler(int irq, void *data)
chg->last_wls_vout = 0;
}
- power_supply_changed(chg->dc_psy);
+ if (chg->dc_psy)
+ power_supply_changed(chg->dc_psy);
smblib_dbg(chg, (PR_WLS | PR_INTERRUPT), "dcin_present= %d, usbin_present= %d, cp_reason = %d\n",
dcin_present, vbus_present, chg->cp_reason);
@@ -6467,13 +6543,19 @@ int smblib_set_prop_pr_swap_in_progress(struct smb_charger *chg,
if (rc < 0) {
smblib_err(chg, "Couldn't read TYPE_C_CCOUT_CONTROL_REG rc=%d\n",
rc);
+ return rc;
}
/* enable DRP */
rc = smblib_masked_write(chg, TYPE_C_MODE_CFG_REG,
TYPEC_POWER_ROLE_CMD_MASK, 0);
- if (rc < 0)
+ if (rc < 0) {
smblib_err(chg, "Couldn't enable DRP rc=%d\n", rc);
+ return rc;
+ }
+ chg->power_role = POWER_SUPPLY_TYPEC_PR_DUAL;
+ smblib_dbg(chg, PR_MISC, "restore power role: %d\n",
+ chg->power_role);
}
return 0;
@@ -6482,6 +6564,18 @@ int smblib_set_prop_pr_swap_in_progress(struct smb_charger *chg,
/***************
* Work Queues *
***************/
+static void smblib_pr_lock_clear_work(struct work_struct *work)
+{
+ struct smb_charger *chg = container_of(work, struct smb_charger,
+ pr_lock_clear_work.work);
+
+ if (chg->pr_lock_in_progress) {
+ smblib_dbg(chg, PR_MISC, "restore type-c interrupts\n");
+ smblib_typec_irq_config(chg, true);
+ chg->pr_lock_in_progress = false;
+ }
+}
+
static void smblib_pr_swap_detach_work(struct work_struct *work)
{
struct smb_charger *chg = container_of(work, struct smb_charger,
@@ -7258,6 +7352,8 @@ int smblib_init(struct smb_charger *chg)
INIT_DELAYED_WORK(&chg->usbov_dbc_work, smblib_usbov_dbc_work);
INIT_DELAYED_WORK(&chg->pr_swap_detach_work,
smblib_pr_swap_detach_work);
+ INIT_DELAYED_WORK(&chg->pr_lock_clear_work,
+ smblib_pr_lock_clear_work);
if (chg->wa_flags & CHG_TERMINATION_WA) {
INIT_WORK(&chg->chg_termination_work,
@@ -7301,6 +7397,7 @@ int smblib_init(struct smb_charger *chg)
chg->sec_chg_selected = POWER_SUPPLY_CHARGER_SEC_NONE;
chg->cp_reason = POWER_SUPPLY_CP_NONE;
chg->thermal_status = TEMP_BELOW_RANGE;
+ chg->typec_irq_en = true;
switch (chg->mode) {
case PARALLEL_MASTER:
diff --git a/drivers/power/supply/qcom/smb5-lib.h b/drivers/power/supply/qcom/smb5-lib.h
index 32cd1a6..613304d 100644
--- a/drivers/power/supply/qcom/smb5-lib.h
+++ b/drivers/power/supply/qcom/smb5-lib.h
@@ -447,6 +447,7 @@ struct smb_charger {
struct delayed_work thermal_regulation_work;
struct delayed_work usbov_dbc_work;
struct delayed_work pr_swap_detach_work;
+ struct delayed_work pr_lock_clear_work;
struct alarm lpd_recheck_timer;
struct alarm moisture_protection_alarm;
@@ -464,10 +465,12 @@ struct smb_charger {
int voltage_max_uv;
int pd_active;
bool pd_hard_reset;
+ bool pr_lock_in_progress;
bool pr_swap_in_progress;
bool early_usb_attach;
bool ok_to_pd;
bool typec_legacy;
+ bool typec_irq_en;
/* cached status */
bool system_suspend_supported;
@@ -502,6 +505,7 @@ struct smb_charger {
int hw_max_icl_ua;
int auto_recharge_soc;
enum sink_src_mode sink_src_mode;
+ enum power_supply_typec_power_role power_role;
enum jeita_cfg_stat jeita_configured;
int charger_temp_max;
int smb_temp_max;
diff --git a/drivers/regulator/qpnp-amoled-regulator.c b/drivers/regulator/qpnp-amoled-regulator.c
index f5f89ee..78da900 100644
--- a/drivers/regulator/qpnp-amoled-regulator.c
+++ b/drivers/regulator/qpnp-amoled-regulator.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -74,7 +73,6 @@ struct ab_regulator {
/* DT params */
bool swire_control;
bool pd_control;
- u32 aod_entry_poll_time_ms;
};
struct ibb_regulator {
@@ -91,9 +89,6 @@ struct qpnp_amoled {
struct oledb_regulator oledb;
struct ab_regulator ab;
struct ibb_regulator ibb;
- struct mutex reg_lock;
- struct work_struct aod_work;
- struct workqueue_struct *wq;
/* DT params */
u32 oledb_base;
@@ -226,102 +221,6 @@ static int qpnp_ab_pd_control(struct qpnp_amoled *chip, bool en)
return qpnp_amoled_write(chip, AB_LDO_PD_CTL(chip), &val, 1);
}
-#define AB_VREG_OK_POLL_TIME_US 2000
-#define AB_VREG_OK_POLL_HIGH_TRIES 8
-#define AB_VREG_OK_POLL_HIGH_TIME_US 10000
-#define AB_VREG_OK_POLL_AGAIN_TRIES 10
-
-static int qpnp_ab_poll_vreg_ok(struct qpnp_amoled *chip, bool status,
- u32 poll_time_us)
-{
- u32 i, poll_us = AB_VREG_OK_POLL_TIME_US, wait_time_us = 0;
- bool swire_high = false, poll_again = false, monitor = false;
- int rc;
- u8 val;
-
- if (poll_time_us < AB_VREG_OK_POLL_TIME_US)
- return -EINVAL;
-
- i = poll_time_us / AB_VREG_OK_POLL_TIME_US;
-loop:
- while (i--) {
- /* Write a dummy value before reading AB_STATUS1 */
- rc = qpnp_amoled_write(chip, AB_STATUS1(chip), &val, 1);
- if (rc < 0)
- return rc;
-
- rc = qpnp_amoled_read(chip, AB_STATUS1(chip), &val, 1);
- if (rc < 0)
- return rc;
-
- wait_time_us += poll_us;
- if (((val & VREG_OK_BIT) >> VREG_OK_SHIFT) == status) {
- pr_debug("Waited for %d us\n", wait_time_us);
-
- /*
- * Return if we're polling for VREG_OK low. Else, poll
- * for VREG_OK high for at least 80 ms. IF VREG_OK stays
- * high, then consider it as a valid SWIRE pulse.
- */
-
- if (status) {
- swire_high = true;
- if (!poll_again && !monitor) {
- pr_debug("SWIRE is high, start monitoring\n");
- i = AB_VREG_OK_POLL_HIGH_TRIES;
- poll_us = AB_VREG_OK_POLL_HIGH_TIME_US;
- wait_time_us = 0;
- monitor = true;
- }
-
- if (poll_again)
- poll_again = false;
- } else {
- return 0;
- }
- } else {
- /*
- * If we're here when polling for VREG_OK high, then it
- * is possibly because of an intermittent SWIRE pulse.
- * Ignore it and poll for valid SWIRE pulse again.
- */
- if (status && swire_high && monitor) {
- pr_debug("SWIRE is low\n");
- poll_again = true;
- swire_high = false;
- break;
- }
-
- if (poll_again)
- poll_again = false;
- }
-
- usleep_range(poll_us, poll_us + 1);
- }
-
- /*
- * If poll_again is set, then VREG_OK should be polled for another
- * 100 ms for valid SWIRE signal.
- */
-
- if (poll_again) {
- pr_debug("polling again for SWIRE\n");
- i = AB_VREG_OK_POLL_AGAIN_TRIES;
- poll_us = AB_VREG_OK_POLL_HIGH_TIME_US;
- wait_time_us = 0;
- goto loop;
- }
-
- /* If swire_high is set, then it's a valid SWIRE signal, return 0. */
- if (swire_high) {
- pr_debug("SWIRE is high\n");
- return 0;
- }
-
- pr_err("AB_STATUS1: %x poll for VREG_OK %d timed out\n", val, status);
- return -ETIMEDOUT;
-}
-
static int qpnp_ibb_pd_control(struct qpnp_amoled *chip, bool en)
{
u8 val = en ? ENABLE_PD_BIT : 0;
@@ -330,70 +229,24 @@ static int qpnp_ibb_pd_control(struct qpnp_amoled *chip, bool en)
val);
}
-static int qpnp_ibb_aod_config(struct qpnp_amoled *chip, bool aod)
+static int qpnp_ab_ibb_regulator_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
{
+ struct qpnp_amoled *chip = rdev_get_drvdata(rdev);
int rc;
- u8 ps_ctl, smart_ps_ctl, nlimit_dac;
- pr_debug("aod: %d\n", aod);
- if (aod) {
- ps_ctl = 0x82;
- smart_ps_ctl = 0;
- nlimit_dac = 0;
- } else {
- ps_ctl = 0x02;
- smart_ps_ctl = 0x80;
- nlimit_dac = 0x3;
+ if (mode != REGULATOR_MODE_NORMAL && mode != REGULATOR_MODE_STANDBY &&
+ mode != REGULATOR_MODE_IDLE) {
+ pr_err("Unsupported mode %u\n", mode);
+ return -EINVAL;
}
- rc = qpnp_amoled_write(chip, IBB_SMART_PS_CTL(chip), &smart_ps_ctl, 1);
- if (rc < 0)
- return rc;
-
- rc = qpnp_amoled_write(chip, IBB_NLIMIT_DAC(chip), &nlimit_dac, 1);
- if (rc < 0)
- return rc;
-
- rc = qpnp_amoled_write(chip, IBB_PS_CTL(chip), &ps_ctl, 1);
- return rc;
-}
-
-static void qpnp_amoled_aod_work(struct work_struct *work)
-{
- struct qpnp_amoled *chip = container_of(work, struct qpnp_amoled,
- aod_work);
- u8 val = 0;
- unsigned int mode;
- u32 poll_time_us = 100000;
- int rc;
-
- mutex_lock(&chip->reg_lock);
- mode = chip->ab.vreg.mode;
- mutex_unlock(&chip->reg_lock);
+ if (mode == chip->ab.vreg.mode || mode == chip->ibb.vreg.mode)
+ return 0;
pr_debug("mode: %d\n", mode);
- if (mode == REGULATOR_MODE_NORMAL) {
- rc = qpnp_ibb_aod_config(chip, true);
- if (rc < 0)
- goto error;
- /* poll for VREG_OK high */
- rc = qpnp_ab_poll_vreg_ok(chip, true, poll_time_us);
- if (rc < 0)
- goto error;
-
- /*
- * As per the hardware recommendation, Wait for ~10 ms after
- * polling for VREG_OK before changing the configuration when
- * exiting from AOD mode.
- */
-
- usleep_range(10000, 10001);
-
- rc = qpnp_ibb_aod_config(chip, false);
- if (rc < 0)
- goto error;
-
+ if (mode == REGULATOR_MODE_NORMAL || mode == REGULATOR_MODE_STANDBY) {
if (chip->ibb.pd_control) {
rc = qpnp_ibb_pd_control(chip, true);
if (rc < 0)
@@ -406,14 +259,6 @@ static void qpnp_amoled_aod_work(struct work_struct *work)
goto error;
}
} else if (mode == REGULATOR_MODE_IDLE) {
- if (chip->ab.aod_entry_poll_time_ms > 0)
- poll_time_us = chip->ab.aod_entry_poll_time_ms * 1000;
-
- /* poll for VREG_OK low */
- rc = qpnp_ab_poll_vreg_ok(chip, false, poll_time_us);
- if (rc < 0)
- goto error;
-
if (chip->ibb.pd_control) {
rc = qpnp_ibb_pd_control(chip, false);
if (rc < 0)
@@ -425,53 +270,12 @@ static void qpnp_amoled_aod_work(struct work_struct *work)
if (rc < 0)
goto error;
}
-
- val = 0xF1;
- } else if (mode == REGULATOR_MODE_STANDBY) {
- /* Restore the normal configuration without any delay */
- rc = qpnp_ibb_aod_config(chip, false);
- if (rc < 0)
- goto error;
-
- if (chip->ibb.pd_control) {
- rc = qpnp_ibb_pd_control(chip, true);
- if (rc < 0)
- goto error;
- }
-
- if (chip->ab.pd_control) {
- rc = qpnp_ab_pd_control(chip, true);
- if (rc < 0)
- goto error;
- }
}
- rc = qpnp_amoled_write(chip, AB_LDO_SW_DBG_CTL(chip), &val, 1);
+ chip->ab.vreg.mode = chip->ibb.vreg.mode = mode;
error:
if (rc < 0)
pr_err("Failed to configure for mode %d\n", mode);
-}
-
-static int qpnp_ab_ibb_regulator_set_mode(struct regulator_dev *rdev,
- unsigned int mode)
-{
- struct qpnp_amoled *chip = rdev_get_drvdata(rdev);
-
- if (mode != REGULATOR_MODE_NORMAL && mode != REGULATOR_MODE_STANDBY &&
- mode != REGULATOR_MODE_IDLE) {
- pr_err("Unsupported mode %u\n", mode);
- return -EINVAL;
- }
-
- pr_debug("mode: %d\n", mode);
- if (mode == chip->ab.vreg.mode || mode == chip->ibb.vreg.mode)
- return 0;
-
- mutex_lock(&chip->reg_lock);
- chip->ab.vreg.mode = chip->ibb.vreg.mode = mode;
- mutex_unlock(&chip->reg_lock);
-
- queue_work(chip->wq, &chip->aod_work);
return 0;
}
@@ -720,9 +524,6 @@ static int qpnp_amoled_parse_dt(struct qpnp_amoled *chip)
"qcom,swire-control");
chip->ab.pd_control = of_property_read_bool(temp,
"qcom,aod-pd-control");
- of_property_read_u32(temp,
- "qcom,aod-entry-poll-time-ms",
- &chip->ab.aod_entry_poll_time_ms);
break;
case IBB_PERIPH_TYPE:
chip->ibb_base = base;
@@ -758,19 +559,6 @@ static int qpnp_amoled_regulator_probe(struct platform_device *pdev)
if (!chip)
return -ENOMEM;
- /*
- * We need this workqueue to order the mode transitions that require
- * timing considerations. This way, we can ensure whenever the mode
- * transition is requested, it can be queued with high priority.
- */
- chip->wq = alloc_ordered_workqueue("qpnp_amoled_wq", WQ_HIGHPRI);
- if (!chip->wq) {
- dev_err(chip->dev, "Unable to alloc workqueue\n");
- return -ENOMEM;
- }
-
- mutex_init(&chip->reg_lock);
- INIT_WORK(&chip->aod_work, qpnp_amoled_aod_work);
chip->dev = &pdev->dev;
chip->regmap = dev_get_regmap(pdev->dev.parent, NULL);
@@ -789,23 +577,15 @@ static int qpnp_amoled_regulator_probe(struct platform_device *pdev)
}
rc = qpnp_amoled_hw_init(chip);
- if (rc < 0) {
+ if (rc < 0)
dev_err(chip->dev, "Failed to initialize HW rc=%d\n", rc);
- goto error;
- }
- return 0;
error:
- destroy_workqueue(chip->wq);
return rc;
}
static int qpnp_amoled_regulator_remove(struct platform_device *pdev)
{
- struct qpnp_amoled *chip = dev_get_drvdata(&pdev->dev);
-
- cancel_work_sync(&chip->aod_work);
- destroy_workqueue(chip->wq);
return 0;
}
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 4da79b9..2da1b15 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -535,6 +535,7 @@ struct ufs_vreg {
int max_uV;
bool low_voltage_sup;
bool low_voltage_active;
+ bool sys_suspend_pwr_off;
int min_uA;
int max_uA;
};
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 382e923..bd0415c 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -217,9 +217,24 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
} else if (!strcmp(name, "vccq")) {
vreg->min_uV = UFS_VREG_VCCQ_MIN_UV;
vreg->max_uV = UFS_VREG_VCCQ_MAX_UV;
+ /**
+ * Only if the SoC supports turning off VCCQ or VCCQ2 power
+ * supply source during power collapse, set a flag to turn off
+ * the specified power supply to reduce the system power
+ * consumption during system suspend events. The tradeoffs are:
+ * - System resume time will increase due
+ * to UFS device full re-initialization time.
+ * - UFS device life may be affected due to multiple
+ * UFS power on/off events.
+ * The benefits vs tradeoff should be considered carefully.
+ */
+ if (of_property_read_bool(np, "vccq-pwr-collapse-sup"))
+ vreg->sys_suspend_pwr_off = true;
} else if (!strcmp(name, "vccq2")) {
vreg->min_uV = UFS_VREG_VCCQ2_MIN_UV;
vreg->max_uV = UFS_VREG_VCCQ2_MAX_UV;
+ if (of_property_read_bool(np, "vccq2-pwr-collapse-sup"))
+ vreg->sys_suspend_pwr_off = true;
}
goto out;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 3bbff74..e4329a45 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -8864,6 +8864,22 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
goto out;
}
+ /**
+ * UFS3.0 and newer devices use Vcc and Vccq(1.2V)
+ * while UFS2.1 devices use Vcc and Vccq2(1.8V) power
+ * supplies. If the system allows turning off the regulators
+ * during power collapse event, turn off the regulators
+ * during system suspend events. This will cause the UFS
+ * device to re-initialize upon system resume events.
+ */
+ if ((hba->dev_info.w_spec_version >= 0x300 &&
+ hba->vreg_info.vccq->sys_suspend_pwr_off) ||
+ (hba->dev_info.w_spec_version < 0x300 &&
+ hba->vreg_info.vccq2->sys_suspend_pwr_off))
+ hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ UFS_POWERDOWN_PWR_MODE,
+ UIC_LINK_OFF_STATE);
+
/* UFS device is also active now */
ufshcd_set_ufs_dev_active(hba);
ufshcd_force_reset_auto_bkops(hba);
@@ -10109,7 +10125,20 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
*/
if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
!hba->dev_info.is_lu_power_on_wp) {
- ufshcd_setup_vreg(hba, false);
+ ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
+ if (hba->dev_info.w_spec_version >= 0x300 &&
+ hba->vreg_info.vccq->sys_suspend_pwr_off)
+ ufshcd_toggle_vreg(hba->dev,
+ hba->vreg_info.vccq, false);
+ else
+ ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
+
+ if (hba->dev_info.w_spec_version < 0x300 &&
+ hba->vreg_info.vccq2->sys_suspend_pwr_off)
+ ufshcd_toggle_vreg(hba->dev,
+ hba->vreg_info.vccq2, false);
+ else
+ ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
} else if (!ufshcd_is_ufs_dev_active(hba)) {
ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
if (!ufshcd_is_link_active(hba)) {
@@ -10124,23 +10153,40 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
int ret = 0;
if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
- !hba->dev_info.is_lu_power_on_wp) {
- ret = ufshcd_setup_vreg(hba, true);
- } else if (!ufshcd_is_ufs_dev_active(hba)) {
- if (!ret && !ufshcd_is_link_active(hba)) {
+ !hba->dev_info.is_lu_power_on_wp) {
+ if (hba->dev_info.w_spec_version < 0x300 &&
+ hba->vreg_info.vccq2->sys_suspend_pwr_off)
+ ret = ufshcd_toggle_vreg(hba->dev,
+ hba->vreg_info.vccq2, true);
+ else
+ ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
+ if (ret)
+ goto vcc_disable;
+
+ if (hba->dev_info.w_spec_version >= 0x300 &&
+ hba->vreg_info.vccq->sys_suspend_pwr_off)
+ ret = ufshcd_toggle_vreg(hba->dev,
+ hba->vreg_info.vccq, true);
+ else
ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
- if (ret)
- goto vcc_disable;
+ if (ret)
+ goto vccq2_lpm;
+ ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
+ } else if (!ufshcd_is_ufs_dev_active(hba)) {
+ if (!ufshcd_is_link_active(hba)) {
ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
if (ret)
- goto vccq_lpm;
+ goto vcc_disable;
+ ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
+ if (ret)
+ goto vccq2_lpm;
}
ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
}
goto out;
-vccq_lpm:
- ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
+vccq2_lpm:
+ ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
vcc_disable:
ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
out:
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 2866ef8..0338e7d 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -1388,6 +1388,35 @@ static int icnss_msa0_ramdump(struct icnss_priv *priv)
return do_ramdump(priv->msa0_dump_dev, &segment, 1);
}
+static void icnss_update_state_send_modem_shutdown(struct icnss_priv *priv,
+ void *data)
+{
+ struct notif_data *notif = data;
+ int ret = 0;
+
+ if (!notif->crashed) {
+ if (atomic_read(&priv->is_shutdown)) {
+ atomic_set(&priv->is_shutdown, false);
+ if (!test_bit(ICNSS_PD_RESTART, &priv->state) &&
+ !test_bit(ICNSS_SHUTDOWN_DONE, &priv->state)) {
+ icnss_call_driver_remove(priv);
+ }
+ }
+
+ if (test_bit(ICNSS_BLOCK_SHUTDOWN, &priv->state)) {
+ if (!wait_for_completion_timeout(
+ &priv->unblock_shutdown,
+ msecs_to_jiffies(PROBE_TIMEOUT)))
+ icnss_pr_err("modem block shutdown timeout\n");
+ }
+
+ ret = wlfw_send_modem_shutdown_msg(priv);
+ if (ret < 0)
+ icnss_pr_err("Fail to send modem shutdown Indication %d\n",
+ ret);
+ }
+}
+
static int icnss_modem_notifier_nb(struct notifier_block *nb,
unsigned long code,
void *data)
@@ -1397,7 +1426,6 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb,
struct icnss_priv *priv = container_of(nb, struct icnss_priv,
modem_ssr_nb);
struct icnss_uevent_fw_down_data fw_down_data;
- int ret = 0;
icnss_pr_vdbg("Modem-Notify: event %lu\n", code);
@@ -1413,28 +1441,7 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb,
priv->is_ssr = true;
- if (code == SUBSYS_BEFORE_SHUTDOWN && !notif->crashed &&
- atomic_read(&priv->is_shutdown)) {
- atomic_set(&priv->is_shutdown, false);
- if (!test_bit(ICNSS_PD_RESTART, &priv->state) &&
- !test_bit(ICNSS_SHUTDOWN_DONE, &priv->state)) {
- icnss_call_driver_remove(priv);
- }
- }
-
- if (code == SUBSYS_BEFORE_SHUTDOWN && !notif->crashed &&
- test_bit(ICNSS_BLOCK_SHUTDOWN, &priv->state)) {
- if (!wait_for_completion_timeout(&priv->unblock_shutdown,
- msecs_to_jiffies(PROBE_TIMEOUT)))
- icnss_pr_err("modem block shutdown timeout\n");
- }
-
- if (code == SUBSYS_BEFORE_SHUTDOWN && !notif->crashed) {
- ret = wlfw_send_modem_shutdown_msg(priv);
- if (ret < 0)
- icnss_pr_err("Fail to send modem shutdown Indication %d\n",
- ret);
- }
+ icnss_update_state_send_modem_shutdown(priv, data);
if (test_bit(ICNSS_PDR_REGISTERED, &priv->state)) {
set_bit(ICNSS_FW_DOWN, &priv->state);
@@ -2991,23 +2998,16 @@ static int icnss_regread_show(struct seq_file *s, void *data)
return 0;
}
-static ssize_t icnss_regread_write(struct file *fp, const char __user *user_buf,
- size_t count, loff_t *off)
+static ssize_t icnss_reg_parse(const char __user *user_buf, size_t count,
+ struct icnss_reg_info *reg_info_ptr)
{
- struct icnss_priv *priv =
- ((struct seq_file *)fp->private_data)->private;
- char buf[64];
- char *sptr, *token;
- unsigned int len = 0;
- uint32_t reg_offset, mem_type;
- uint32_t data_len = 0;
- uint8_t *reg_buf = NULL;
+ char buf[64] = {0};
+ char *sptr = NULL, *token = NULL;
const char *delim = " ";
- int ret = 0;
+ unsigned int len = 0;
- if (!test_bit(ICNSS_FW_READY, &priv->state) ||
- !test_bit(ICNSS_POWER_ON, &priv->state))
- return -EINVAL;
+ if (user_buf == NULL)
+ return -EFAULT;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
@@ -3023,7 +3023,7 @@ static ssize_t icnss_regread_write(struct file *fp, const char __user *user_buf,
if (!sptr)
return -EINVAL;
- if (kstrtou32(token, 0, &mem_type))
+ if (kstrtou32(token, 0, ®_info_ptr->mem_type))
return -EINVAL;
token = strsep(&sptr, delim);
@@ -3033,32 +3033,53 @@ static ssize_t icnss_regread_write(struct file *fp, const char __user *user_buf,
if (!sptr)
return -EINVAL;
- if (kstrtou32(token, 0, ®_offset))
+ if (kstrtou32(token, 0, ®_info_ptr->reg_offset))
return -EINVAL;
token = strsep(&sptr, delim);
if (!token)
return -EINVAL;
- if (kstrtou32(token, 0, &data_len))
+ if (kstrtou32(token, 0, ®_info_ptr->data_len))
return -EINVAL;
- if (data_len == 0 ||
- data_len > WLFW_MAX_DATA_SIZE)
+ if (reg_info_ptr->data_len == 0 ||
+ reg_info_ptr->data_len > WLFW_MAX_DATA_SIZE)
return -EINVAL;
+ return 0;
+}
+
+static ssize_t icnss_regread_write(struct file *fp, const char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ struct icnss_priv *priv =
+ ((struct seq_file *)fp->private_data)->private;
+ uint8_t *reg_buf = NULL;
+ int ret = 0;
+ struct icnss_reg_info reg_info;
+
+ if (!test_bit(ICNSS_FW_READY, &priv->state) ||
+ !test_bit(ICNSS_POWER_ON, &priv->state))
+ return -EINVAL;
+
+ ret = icnss_reg_parse(user_buf, count, ®_info);
+ if (ret)
+ return ret;
+
mutex_lock(&priv->dev_lock);
kfree(priv->diag_reg_read_buf);
priv->diag_reg_read_buf = NULL;
- reg_buf = kzalloc(data_len, GFP_KERNEL);
+ reg_buf = kzalloc(reg_info.data_len, GFP_KERNEL);
if (!reg_buf) {
mutex_unlock(&priv->dev_lock);
return -ENOMEM;
}
- ret = wlfw_athdiag_read_send_sync_msg(priv, reg_offset,
- mem_type, data_len,
+ ret = wlfw_athdiag_read_send_sync_msg(priv, reg_info.reg_offset,
+ reg_info.mem_type,
+ reg_info.data_len,
reg_buf);
if (ret) {
kfree(reg_buf);
@@ -3066,9 +3087,9 @@ static ssize_t icnss_regread_write(struct file *fp, const char __user *user_buf,
return ret;
}
- priv->diag_reg_read_addr = reg_offset;
- priv->diag_reg_read_mem_type = mem_type;
- priv->diag_reg_read_len = data_len;
+ priv->diag_reg_read_addr = reg_info.reg_offset;
+ priv->diag_reg_read_mem_type = reg_info.mem_type;
+ priv->diag_reg_read_len = reg_info.data_len;
priv->diag_reg_read_buf = reg_buf;
mutex_unlock(&priv->dev_lock);
@@ -3211,37 +3232,12 @@ static int icnss_get_vbatt_info(struct icnss_priv *priv)
return 0;
}
-static int icnss_probe(struct platform_device *pdev)
+static int icnss_resource_parse(struct icnss_priv *priv)
{
- int ret = 0;
- struct resource *res;
- int i;
+ int ret = 0, i = 0;
+ struct platform_device *pdev = priv->pdev;
struct device *dev = &pdev->dev;
- struct icnss_priv *priv;
- const __be32 *addrp;
- u64 prop_size = 0;
- struct device_node *np;
- u32 addr_win[2];
-
- if (penv) {
- icnss_pr_err("Driver is already initialized\n");
- return -EEXIST;
- }
-
- icnss_pr_dbg("Platform driver probe\n");
-
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->magic = ICNSS_MAGIC;
- dev_set_drvdata(dev, priv);
-
- priv->pdev = pdev;
-
- priv->vreg_info = icnss_vreg_info;
-
- icnss_allow_recursive_recovery(dev);
+ struct resource *res;
if (of_property_read_bool(pdev->dev.of_node, "qcom,icnss-adc_tm")) {
ret = icnss_get_vbatt_info(priv);
@@ -3294,6 +3290,21 @@ static int icnss_probe(struct platform_device *pdev)
}
}
+ return 0;
+
+out:
+ return ret;
+}
+
+static int icnss_msa_dt_parse(struct icnss_priv *priv)
+{
+ int ret = 0;
+ struct platform_device *pdev = priv->pdev;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = NULL;
+ u64 prop_size = 0;
+ const __be32 *addrp = NULL;
+
np = of_parse_phandle(dev->of_node,
"qcom,wlan-msa-fixed-region", 0);
if (np) {
@@ -3342,6 +3353,20 @@ static int icnss_probe(struct platform_device *pdev)
icnss_pr_dbg("MSA pa: %pa, MSA va: 0x%pK MSA Memory Size: 0x%x\n",
&priv->msa_pa, (void *)priv->msa_va, priv->msa_mem_size);
+ return 0;
+
+out:
+ return ret;
+}
+
+static int icnss_smmu_dt_parse(struct icnss_priv *priv)
+{
+ int ret = 0;
+ struct platform_device *pdev = priv->pdev;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ u32 addr_win[2];
+
ret = of_property_read_u32_array(dev->of_node,
"qcom,iommu-dma-addr-pool",
addr_win,
@@ -3367,6 +3392,47 @@ static int icnss_probe(struct platform_device *pdev)
}
}
+ return 0;
+}
+
+static int icnss_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct device *dev = &pdev->dev;
+ struct icnss_priv *priv;
+
+ if (penv) {
+ icnss_pr_err("Driver is already initialized\n");
+ return -EEXIST;
+ }
+
+ icnss_pr_dbg("Platform driver probe\n");
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->magic = ICNSS_MAGIC;
+ dev_set_drvdata(dev, priv);
+
+ priv->pdev = pdev;
+
+ priv->vreg_info = icnss_vreg_info;
+
+ icnss_allow_recursive_recovery(dev);
+
+ ret = icnss_resource_parse(priv);
+ if (ret)
+ goto out;
+
+ ret = icnss_msa_dt_parse(priv);
+ if (ret)
+ goto out;
+
+ ret = icnss_smmu_dt_parse(priv);
+ if (ret)
+ goto out;
+
spin_lock_init(&priv->event_lock);
spin_lock_init(&priv->on_off_lock);
mutex_init(&priv->dev_lock);
diff --git a/drivers/soc/qcom/icnss_private.h b/drivers/soc/qcom/icnss_private.h
index 597a530..1c425d1 100644
--- a/drivers/soc/qcom/icnss_private.h
+++ b/drivers/soc/qcom/icnss_private.h
@@ -352,6 +352,12 @@ struct icnss_priv {
};
+struct icnss_reg_info {
+ uint32_t mem_type;
+ uint32_t reg_offset;
+ uint32_t data_len;
+};
+
int icnss_call_driver_uevent(struct icnss_priv *priv,
enum icnss_uevent uevent, void *data);
int icnss_driver_event_post(enum icnss_driver_event_type type,
diff --git a/drivers/soc/qcom/mem-offline.c b/drivers/soc/qcom/mem-offline.c
index 82f0717..38dd2f1 100644
--- a/drivers/soc/qcom/mem-offline.c
+++ b/drivers/soc/qcom/mem-offline.c
@@ -383,7 +383,7 @@ static int mem_online_remaining_blocks(void)
start_section_nr = pfn_to_section_nr(memblock_end_pfn);
end_section_nr = pfn_to_section_nr(ram_end_pfn);
- if (start_section_nr >= end_section_nr) {
+ if (memblock_end_of_DRAM() >= bootloader_memory_limit) {
pr_info("mem-offline: System booted with no zone movable memory blocks. Cannot perform memory offlining\n");
return -EINVAL;
}
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index 9007fa3..724d597 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -379,7 +379,11 @@ static int pil_do_minidump(struct pil_desc *desc, void *ramdump_dev)
&ss_valid_seg_cnt,
desc->num_aux_minidump_ids);
- ret = do_minidump(ramdump_dev, ramdump_segs, ss_valid_seg_cnt);
+ if (desc->minidump_as_elf32)
+ ret = do_elf_ramdump(ramdump_dev, ramdump_segs,
+ ss_valid_seg_cnt);
+ else
+ ret = do_minidump(ramdump_dev, ramdump_segs, ss_valid_seg_cnt);
if (ret)
pil_err(desc, "%s: Minidump collection failed for subsys %s rc:%d\n",
__func__, desc->name, ret);
@@ -1557,6 +1561,9 @@ int pil_desc_init(struct pil_desc *desc)
if (!desc->unmap_fw_mem)
desc->unmap_fw_mem = unmap_fw_mem;
+ desc->minidump_as_elf32 = of_property_read_bool(
+ ofnode, "qcom,minidump-as-elf32");
+
return 0;
err_parse_dt:
ida_simple_remove(&pil_ida, priv->id);
diff --git a/drivers/soc/qcom/peripheral-loader.h b/drivers/soc/qcom/peripheral-loader.h
index c265657..62e5d4a 100644
--- a/drivers/soc/qcom/peripheral-loader.h
+++ b/drivers/soc/qcom/peripheral-loader.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2019, The Linux Foundation. All rights reserved.
*/
#ifndef __MSM_PERIPHERAL_LOADER_H
#define __MSM_PERIPHERAL_LOADER_H
@@ -62,6 +62,7 @@ struct pil_desc {
int minidump_id;
int *aux_minidump_ids;
int num_aux_minidump_ids;
+ bool minidump_as_elf32;
};
/**
diff --git a/drivers/soc/qcom/qmi_rmnet.c b/drivers/soc/qcom/qmi_rmnet.c
index 46732bf..5cc51bf 100644
--- a/drivers/soc/qcom/qmi_rmnet.c
+++ b/drivers/soc/qcom/qmi_rmnet.c
@@ -976,6 +976,9 @@ void qmi_rmnet_work_init(void *port)
rmnet_ps_wq = alloc_workqueue("rmnet_powersave_work",
WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
+ if (!rmnet_ps_wq)
+ return;
+
rmnet_work = kzalloc(sizeof(*rmnet_work), GFP_ATOMIC);
if (!rmnet_work) {
destroy_workqueue(rmnet_ps_wq);
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index 4443e277..be34be0 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/atomic.h>
@@ -559,6 +559,10 @@ int rpmh_flush(const struct device *dev)
return 0;
}
+ do {
+ ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
+ } while (ret == -EAGAIN);
+
/* First flush the cached batch requests */
ret = flush_batch(ctrlr);
if (ret)
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index 233346b..93a63d8 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -185,6 +185,12 @@ struct spcom_channel {
size_t actual_rx_size; /* actual data size received */
void *rpmsg_rx_buf;
+ /**
+ * to track if rx_buf is read in the same session
+ * in which it is updated
+ */
+ uint32_t rx_buf_txn_id;
+
/* shared buffer lock/unlock support */
int dmabuf_fd_table[SPCOM_MAX_ION_BUF_PER_CH];
struct dma_buf *dmabuf_handle_table[SPCOM_MAX_ION_BUF_PER_CH];
@@ -341,6 +347,7 @@ static int spcom_init_channel(struct spcom_channel *ch,
ch->actual_rx_size = 0;
ch->is_busy = false;
ch->txn_id = INITIAL_TXN_ID; /* use non-zero nonce for debug */
+ ch->rx_buf_txn_id = ch->txn_id;
memset(ch->pid, 0, sizeof(ch->pid));
ch->rpmsg_abort = false;
ch->rpmsg_rx_buf = NULL;
@@ -394,6 +401,16 @@ static int spcom_rx(struct spcom_channel *ch,
mutex_lock(&ch->lock);
+ if (ch->rx_buf_txn_id != ch->txn_id) {
+ pr_debug("rpmsg_rx_buf is updated in a different session\n");
+ if (ch->rpmsg_rx_buf) {
+ memset(ch->rpmsg_rx_buf, 0, ch->actual_rx_size);
+ kfree((void *)ch->rpmsg_rx_buf);
+ ch->rpmsg_rx_buf = NULL;
+ ch->actual_rx_size = 0;
+ }
+ }
+
/* check for already pending data */
if (!ch->actual_rx_size) {
reinit_completion(&ch->rx_done);
@@ -1537,9 +1554,9 @@ static int spcom_device_release(struct inode *inode, struct file *filp)
if (ch->active_pid == current_pid()) {
pr_debug("active_pid [%x] is releasing ch [%s] sync lock\n",
ch->active_pid, name);
- mutex_unlock(&ch->shared_sync_lock);
/* No longer the current active user of the channel */
ch->active_pid = 0;
+ mutex_unlock(&ch->shared_sync_lock);
}
ch->num_clients--;
ch->is_busy = false;
@@ -1640,8 +1657,8 @@ static ssize_t spcom_device_write(struct file *filp,
pr_err("handle command error [%d]\n", ret);
kfree(buf);
if (ch && ch->active_pid == current_pid()) {
- mutex_unlock(&ch->shared_sync_lock);
ch->active_pid = 0;
+ mutex_unlock(&ch->shared_sync_lock);
}
return ret;
}
@@ -1726,16 +1743,16 @@ static ssize_t spcom_device_read(struct file *filp, char __user *user_buff,
pr_debug("ch [%s] ret [%d]\n", name, (int) actual_size);
if (ch->active_pid == cur_pid) {
- mutex_unlock(&ch->shared_sync_lock);
ch->active_pid = 0;
+ mutex_unlock(&ch->shared_sync_lock);
}
return actual_size;
exit_err:
kfree(buf);
if (ch->active_pid == cur_pid) {
- mutex_unlock(&ch->shared_sync_lock);
ch->active_pid = 0;
+ mutex_unlock(&ch->shared_sync_lock);
}
return ret;
}
@@ -2161,6 +2178,7 @@ static void spcom_signal_rx_done(struct work_struct *ignored)
}
ch->rpmsg_rx_buf = rx_item->rpmsg_rx_buf;
ch->actual_rx_size = rx_item->rx_buf_size;
+ ch->rx_buf_txn_id = ch->txn_id;
complete_all(&ch->rx_done);
mutex_unlock(&ch->lock);
diff --git a/drivers/soc/qcom/watchdog_v2.c b/drivers/soc/qcom/watchdog_v2.c
index 87eb07f7..8343023 100644
--- a/drivers/soc/qcom/watchdog_v2.c
+++ b/drivers/soc/qcom/watchdog_v2.c
@@ -92,7 +92,6 @@ struct msm_watchdog_data {
unsigned long long thread_start;
unsigned long long ping_start[NR_CPUS];
unsigned long long ping_end[NR_CPUS];
- unsigned int cpu_scandump_sizes[NR_CPUS];
};
/*
@@ -550,95 +549,6 @@ static irqreturn_t wdog_ppi_bark(int irq, void *dev_id)
return wdog_bark_handler(irq, wdog_dd);
}
-static void configure_bark_dump(struct msm_watchdog_data *wdog_dd)
-{
- int ret;
- struct msm_dump_entry dump_entry;
- struct msm_dump_data *cpu_data;
- int cpu;
- void *cpu_buf;
-
- cpu_data = kcalloc(num_present_cpus(), sizeof(struct msm_dump_data),
- GFP_KERNEL);
- if (!cpu_data)
- goto out0;
-
- cpu_buf = kcalloc(num_present_cpus(), MAX_CPU_CTX_SIZE, GFP_KERNEL);
- if (!cpu_buf)
- goto out1;
-
- for_each_cpu(cpu, cpu_present_mask) {
- cpu_data[cpu].addr = virt_to_phys(cpu_buf +
- cpu * MAX_CPU_CTX_SIZE);
- cpu_data[cpu].len = MAX_CPU_CTX_SIZE;
- snprintf(cpu_data[cpu].name, sizeof(cpu_data[cpu].name),
- "KCPU_CTX%d", cpu);
- dump_entry.id = MSM_DUMP_DATA_CPU_CTX + cpu;
- dump_entry.addr = virt_to_phys(&cpu_data[cpu]);
- ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS,
- &dump_entry);
- /*
- * Don't free the buffers in case of error since
- * registration may have succeeded for some cpus.
- */
- if (ret)
- pr_err("cpu %d reg dump setup failed\n", cpu);
- }
-
- return;
-out1:
- kfree(cpu_data);
-out0:
- return;
-}
-
-static void configure_scandump(struct msm_watchdog_data *wdog_dd)
-{
- int ret;
- struct msm_dump_entry dump_entry;
- struct msm_dump_data *cpu_data;
- int cpu;
- static dma_addr_t dump_addr;
- static void *dump_vaddr;
- unsigned int scandump_size;
-
- for_each_cpu(cpu, cpu_present_mask) {
- scandump_size = wdog_dd->cpu_scandump_sizes[cpu];
- cpu_data = devm_kzalloc(wdog_dd->dev,
- sizeof(struct msm_dump_data),
- GFP_KERNEL);
- if (!cpu_data)
- continue;
-
- dump_vaddr = (void *) dma_alloc_coherent(wdog_dd->dev,
- scandump_size,
- &dump_addr,
- GFP_KERNEL);
- if (!dump_vaddr) {
- dev_err(wdog_dd->dev, "Couldn't get memory for dump\n");
- continue;
- }
- memset(dump_vaddr, 0x0, scandump_size);
-
- cpu_data->addr = dump_addr;
- cpu_data->len = scandump_size;
- snprintf(cpu_data->name, sizeof(cpu_data->name),
- "KSCANDUMP%d", cpu);
- dump_entry.id = MSM_DUMP_DATA_SCANDUMP_PER_CPU + cpu;
- dump_entry.addr = virt_to_phys(cpu_data);
- ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS,
- &dump_entry);
- if (ret) {
- dev_err(wdog_dd->dev, "Dump setup failed, id = %d\n",
- MSM_DUMP_DATA_SCANDUMP_PER_CPU + cpu);
- dma_free_coherent(wdog_dd->dev, scandump_size,
- dump_vaddr,
- dump_addr);
- devm_kfree(wdog_dd->dev, cpu_data);
- }
- }
-}
-
static int init_watchdog_sysfs(struct msm_watchdog_data *wdog_dd)
{
int error = 0;
@@ -699,8 +609,6 @@ static void init_watchdog_data(struct msm_watchdog_data *wdog_dd)
delay_time = msecs_to_jiffies(wdog_dd->pet_time);
wdog_dd->min_slack_ticks = UINT_MAX;
wdog_dd->min_slack_ns = ULLONG_MAX;
- configure_scandump(wdog_dd);
- configure_bark_dump(wdog_dd);
timeout = (wdog_dd->bark_time * WDT_HZ)/1000;
__raw_writel(timeout, wdog_dd->base + WDT0_BARK_TIME);
__raw_writel(timeout + 3*WDT_HZ, wdog_dd->base + WDT0_BITE_TIME);
@@ -754,7 +662,7 @@ static int msm_wdog_dt_to_pdata(struct platform_device *pdev,
{
struct device_node *node = pdev->dev.of_node;
struct resource *res;
- int ret, cpu, num_scandump_sizes;
+ int ret;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "wdt-base");
if (!res)
@@ -815,18 +723,6 @@ static int msm_wdog_dt_to_pdata(struct platform_device *pdev,
}
pdata->wakeup_irq_enable = of_property_read_bool(node,
"qcom,wakeup-enable");
-
- num_scandump_sizes = of_property_count_elems_of_size(node,
- "qcom,scandump-sizes",
- sizeof(u32));
- if (num_scandump_sizes < 0 || num_scandump_sizes != num_possible_cpus())
- dev_info(&pdev->dev, "%s scandump sizes property not correct\n",
- __func__);
- else
- for_each_cpu(cpu, cpu_present_mask)
- of_property_read_u32_index(node, "qcom,scandump-sizes",
- cpu,
- &pdata->cpu_scandump_sizes[cpu]);
pdata->irq_ppi = irq_is_percpu(pdata->bark_irq);
dump_pdata(pdata);
return 0;
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index a57cf1d..1c8e898 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -166,6 +166,7 @@ struct msm_geni_serial_port {
void *ipc_log_rx;
void *ipc_log_pwr;
void *ipc_log_misc;
+ void *console_log;
unsigned int cur_baud;
int ioctl_count;
int edge_count;
@@ -777,6 +778,7 @@ static void msm_geni_serial_console_write(struct console *co, const char *s,
bool locked = true;
unsigned long flags;
unsigned int geni_status;
+ int irq_en;
/* Max 1 port supported as of now */
WARN_ON(co->index < 0 || co->index >= GENI_UART_CONS_PORTS);
@@ -806,12 +808,22 @@ static void msm_geni_serial_console_write(struct console *co, const char *s,
}
writel_relaxed(M_CMD_CANCEL_EN, uport->membase +
SE_GENI_M_IRQ_CLEAR);
- } else if ((geni_status & M_GENI_CMD_ACTIVE) && !port->cur_tx_remaining)
+ } else if ((geni_status & M_GENI_CMD_ACTIVE) &&
+ !port->cur_tx_remaining) {
/* It seems we can interrupt existing transfers unless all data
* has been sent, in which case we need to look for done first.
*/
msm_geni_serial_poll_cancel_tx(uport);
+ /* Enable WM interrupt for every new console write op */
+ if (uart_circ_chars_pending(&uport->state->xmit)) {
+ irq_en = geni_read_reg_nolog(uport->membase,
+ SE_GENI_M_IRQ_EN);
+ geni_write_reg_nolog(irq_en | M_TX_FIFO_WATERMARK_EN,
+ uport->membase, SE_GENI_M_IRQ_EN);
+ }
+ }
+
__msm_geni_serial_console_write(uport, s, count);
if (port->cur_tx_remaining)
@@ -1310,6 +1322,7 @@ static int msm_geni_serial_handle_tx(struct uart_port *uport, bool done,
unsigned int fifo_width_bytes =
(uart_console(uport) ? 1 : (msm_port->tx_fifo_width >> 3));
int temp_tail = 0;
+ int irq_en;
tx_fifo_status = geni_read_reg_nolog(uport->membase,
SE_GENI_TX_FIFO_STATUS);
@@ -1340,6 +1353,12 @@ static int msm_geni_serial_handle_tx(struct uart_port *uport, bool done,
if (!msm_port->cur_tx_remaining) {
msm_geni_serial_setup_tx(uport, pending);
msm_port->cur_tx_remaining = pending;
+
+ /* Re-enable WM interrupt when starting new transfer */
+ irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN);
+ if (!(irq_en & M_TX_FIFO_WATERMARK_EN))
+ geni_write_reg_nolog(irq_en | M_TX_FIFO_WATERMARK_EN,
+ uport->membase, SE_GENI_M_IRQ_EN);
}
bytes_remaining = xmit_size;
@@ -1367,7 +1386,24 @@ static int msm_geni_serial_handle_tx(struct uart_port *uport, bool done,
wmb();
}
xmit->tail = temp_tail;
+
+ /*
+ * The tx fifo watermark is level triggered and latched. Though we had
+ * cleared it in qcom_geni_serial_isr it will have already reasserted
+ * so we must clear it again here after our writes.
+ */
+ geni_write_reg_nolog(M_TX_FIFO_WATERMARK_EN, uport->membase,
+ SE_GENI_M_IRQ_CLEAR);
+
exit_handle_tx:
+ if (!msm_port->cur_tx_remaining) {
+ irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN);
+ /* Clear WM interrupt post each transfer completion */
+ if (irq_en & M_TX_FIFO_WATERMARK_EN)
+ geni_write_reg_nolog(irq_en & ~M_TX_FIFO_WATERMARK_EN,
+ uport->membase, SE_GENI_M_IRQ_EN);
+ }
+
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(uport);
return 0;
@@ -1469,8 +1505,11 @@ static irqreturn_t msm_geni_serial_isr(int isr, void *dev)
bool drop_rx = false;
spin_lock_irqsave(&uport->lock, flags);
- if (uart_console(uport) && uport->suspended)
+ if (uart_console(uport) && uport->suspended) {
+ IPC_LOG_MSG(msm_port->console_log,
+ "%s. Console in suspend state\n", __func__);
goto exit_geni_serial_isr;
+ }
if (!uart_console(uport) && pm_runtime_status_suspended(uport->dev)) {
dev_err(uport->dev, "%s.Device is suspended.\n", __func__);
IPC_LOG_MSG(msm_port->ipc_log_misc,
@@ -1481,6 +1520,10 @@ static irqreturn_t msm_geni_serial_isr(int isr, void *dev)
SE_GENI_M_IRQ_STATUS);
s_irq_status = geni_read_reg_nolog(uport->membase,
SE_GENI_S_IRQ_STATUS);
+ if (uart_console(uport))
+ IPC_LOG_MSG(msm_port->console_log,
+ "%s. sirq 0x%x mirq:0x%x\n", __func__, s_irq_status,
+ m_irq_status);
m_irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN);
dma = geni_read_reg_nolog(uport->membase, SE_GENI_DMA_MODE_EN);
dma_tx_status = geni_read_reg_nolog(uport->membase, SE_DMA_TX_IRQ_STAT);
@@ -2318,14 +2361,13 @@ static void console_unregister(struct uart_driver *drv)
static void msm_geni_serial_debug_init(struct uart_port *uport, bool console)
{
struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
+ char name[30];
msm_port->dbg = debugfs_create_dir(dev_name(uport->dev), NULL);
if (IS_ERR_OR_NULL(msm_port->dbg))
dev_err(uport->dev, "Failed to create dbg dir\n");
if (!console) {
- char name[30];
-
memset(name, 0, sizeof(name));
if (!msm_port->ipc_log_rx) {
scnprintf(name, sizeof(name), "%s%s",
@@ -2362,6 +2404,16 @@ static void msm_geni_serial_debug_init(struct uart_port *uport, bool console)
if (!msm_port->ipc_log_misc)
dev_info(uport->dev, "Err in Misc IPC Log\n");
}
+ } else {
+ memset(name, 0, sizeof(name));
+ if (!msm_port->console_log) {
+ scnprintf(name, sizeof(name), "%s%s",
+ dev_name(uport->dev), "_console");
+ msm_port->console_log = ipc_log_context_create(
+ IPC_LOG_MISC_PAGES, name, 0);
+ if (!msm_port->console_log)
+ dev_info(uport->dev, "Err in Misc IPC Log\n");
+ }
}
}
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 1b0f981..8a0f116 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -355,8 +355,10 @@ static void dwc3_free_event_buffers(struct dwc3 *dwc)
struct dwc3_event_buffer *evt;
evt = dwc->ev_buf;
- if (evt)
+ if (evt) {
dwc3_free_one_event_buffer(dwc, evt);
+ dwc->ev_buf = NULL;
+ }
/* free GSI related event buffers */
dwc3_notify_event(dwc, DWC3_GSI_EVT_BUF_FREE, 0);
@@ -1404,6 +1406,7 @@ static int dwc3_probe(struct platform_device *pdev)
void __iomem *regs;
int irq;
+ char dma_ipc_log_ctx_name[40];
if (count >= DWC_CTRL_COUNT) {
dev_err(dev, "Err dwc instance %d >= %d available\n",
@@ -1543,6 +1546,13 @@ static int dwc3_probe(struct platform_device *pdev)
if (!dwc->dwc_ipc_log_ctxt)
dev_err(dwc->dev, "Error getting ipc_log_ctxt\n");
+ snprintf(dma_ipc_log_ctx_name, sizeof(dma_ipc_log_ctx_name),
+ "%s.ep_events", dev_name(dwc->dev));
+ dwc->dwc_dma_ipc_log_ctxt = ipc_log_context_create(NUM_LOG_PAGES,
+ dma_ipc_log_ctx_name, 0);
+ if (!dwc->dwc_dma_ipc_log_ctxt)
+ dev_err(dwc->dev, "Error getting ipc_log_ctxt for ep_events\n");
+
dwc3_instance[count] = dwc;
dwc->index = count;
count++;
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 6f24144..4b039b3 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1318,6 +1318,7 @@ struct dwc3 {
unsigned int index;
void *dwc_ipc_log_ctxt;
+ void *dwc_dma_ipc_log_ctxt;
struct dwc3_gadget_events dbg_gadget_events;
int tx_fifo_size;
int last_fifo_depth;
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index a03602d..f1659b7 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -36,6 +36,18 @@
#define dbg_setup(ep_num, req) \
dwc3_dbg_setup(dwc, ep_num, req)
+#define dbg_ep_queue(ep_num, req) \
+ dwc3_dbg_dma_queue(dwc, ep_num, req)
+
+#define dbg_ep_dequeue(ep_num, req) \
+ dwc3_dbg_dma_dequeue(dwc, ep_num, req)
+
+#define dbg_ep_unmap(ep_num, req) \
+ dwc3_dbg_dma_unmap(dwc, ep_num, req)
+
+#define dbg_ep_map(ep_num, req) \
+ dwc3_dbg_dma_map(dwc, ep_num, req)
+
#define dbg_log_string(fmt, ...) \
ipc_log_string(dwc->dwc_ipc_log_ctxt,\
"%s: " fmt, __func__, ##__VA_ARGS__)
@@ -660,6 +672,14 @@ void dwc3_dbg_setup(struct dwc3 *dwc, u8 ep_num,
const struct usb_ctrlrequest *req);
void dwc3_dbg_print_reg(struct dwc3 *dwc,
const char *name, int reg);
+void dwc3_dbg_dma_queue(struct dwc3 *dwc, u8 ep_num,
+ struct dwc3_request *req);
+void dwc3_dbg_dma_dequeue(struct dwc3 *dwc, u8 ep_num,
+ struct dwc3_request *req);
+void dwc3_dbg_dma_map(struct dwc3 *dwc, u8 ep_num,
+ struct dwc3_request *req);
+void dwc3_dbg_dma_unmap(struct dwc3 *dwc, u8 ep_num,
+ struct dwc3_request *req);
#ifdef CONFIG_DEBUG_FS
extern void dwc3_debugfs_init(struct dwc3 *);
diff --git a/drivers/usb/dwc3/debug_ipc.c b/drivers/usb/dwc3/debug_ipc.c
index b694246..98f27a5 100644
--- a/drivers/usb/dwc3/debug_ipc.c
+++ b/drivers/usb/dwc3/debug_ipc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#include "debug.h"
@@ -136,3 +136,47 @@ void dwc3_dbg_print_reg(struct dwc3 *dwc, const char *name, int reg)
ipc_log_string(dwc->dwc_ipc_log_ctxt, "%s = 0x%08x", name, reg);
}
+
+void dwc3_dbg_dma_unmap(struct dwc3 *dwc, u8 ep_num, struct dwc3_request *req)
+{
+ if (ep_num < 2)
+ return;
+
+ ipc_log_string(dwc->dwc_dma_ipc_log_ctxt,
+ "%02X-%-3.3s %-25.25s 0x%pK 0x%lx %u 0x%lx %d", ep_num >> 1,
+ ep_num & 1 ? "IN":"OUT", "UNMAP", &req->request,
+ req->request.dma, req->request.length, req->trb_dma,
+ req->trb->ctrl & DWC3_TRB_CTRL_HWO);
+}
+
+void dwc3_dbg_dma_map(struct dwc3 *dwc, u8 ep_num, struct dwc3_request *req)
+{
+ if (ep_num < 2)
+ return;
+
+ ipc_log_string(dwc->dwc_dma_ipc_log_ctxt,
+ "%02X-%-3.3s %-25.25s 0x%pK 0x%lx %u 0x%lx", ep_num >> 1,
+ ep_num & 1 ? "IN":"OUT", "MAP", &req->request, req->request.dma,
+ req->request.length, req->trb_dma);
+}
+
+void dwc3_dbg_dma_dequeue(struct dwc3 *dwc, u8 ep_num, struct dwc3_request *req)
+{
+ if (ep_num < 2)
+ return;
+
+ ipc_log_string(dwc->dwc_dma_ipc_log_ctxt,
+ "%02X-%-3.3s %-25.25s 0x%pK 0x%lx 0x%lx", ep_num >> 1,
+ ep_num & 1 ? "IN":"OUT", "DEQUEUE", &req->request,
+ req->request.dma, req->trb_dma);
+}
+
+void dwc3_dbg_dma_queue(struct dwc3 *dwc, u8 ep_num, struct dwc3_request *req)
+{
+ if (ep_num < 2)
+ return;
+
+ ipc_log_string(dwc->dwc_dma_ipc_log_ctxt,
+ "%02X-%-3.3s %-25.25s 0x%pK", ep_num >> 1,
+ ep_num & 1 ? "IN":"OUT", "QUEUE", &req->request);
+}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 64b1922..6b4dcd9 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -289,9 +289,11 @@ static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
if (req->request.status == -EINPROGRESS)
req->request.status = status;
- if (req->trb)
+ if (req->trb) {
+ dbg_ep_unmap(dep->number, req);
usb_gadget_unmap_request_by_dev(dwc->sysdev,
&req->request, req->direction);
+ }
req->trb = NULL;
trace_dwc3_gadget_giveback(req);
@@ -1401,6 +1403,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep)
else
dwc3_prepare_one_trb_linear(dep, req);
+ dbg_ep_map(dep->number, req);
if (!dwc3_calc_trbs_left(dep))
return;
}
@@ -1545,6 +1548,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
list_add_tail(&req->list, &dep->pending_list);
+ dbg_ep_queue(dep->number, req);
/*
* NOTICE: Isochronous endpoints should NEVER be prestarted. We must
* wait for a XferNotReady event so we will know what's the current
@@ -1704,7 +1708,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
}
out1:
- dbg_event(dep->number, "DEQUEUE", 0);
+ dbg_ep_dequeue(dep->number, req);
/* giveback the request */
dwc3_gadget_giveback(dep, req, -ECONNRESET);
@@ -3771,12 +3775,16 @@ static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
{
- struct dwc3 *dwc = evt->dwc;
+ struct dwc3 *dwc;
u32 amount;
u32 count;
u32 reg;
ktime_t start_time;
+ if (!evt)
+ return IRQ_NONE;
+
+ dwc = evt->dwc;
start_time = ktime_get();
dwc->irq_cnt++;
diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c
index 93fc8fd..ce1afb0 100644
--- a/drivers/usb/phy/phy-msm-qusb.c
+++ b/drivers/usb/phy/phy-msm-qusb.c
@@ -99,6 +99,7 @@ struct qusb_phy {
void __iomem *ref_clk_base;
void __iomem *tcsr_clamp_dig_n;
void __iomem *tcsr_conn_box_spare;
+ void __iomem *eud_enable_reg;
struct clk *ref_clk_src;
struct clk *ref_clk;
@@ -404,6 +405,11 @@ static int qusb_phy_init(struct usb_phy *phy)
dev_dbg(phy->dev, "%s\n", __func__);
+ if (qphy->eud_enable_reg && readl_relaxed(qphy->eud_enable_reg)) {
+ dev_err(qphy->phy.dev, "eud is enabled\n");
+ return 0;
+ }
+
/*
* ref clock is enabled by default after power on reset. Linux clock
* driver will disable this clock as part of late init if peripheral
@@ -717,6 +723,11 @@ static int qusb_phy_dpdm_regulator_enable(struct regulator_dev *rdev)
dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n",
__func__, qphy->dpdm_enable);
+ if (qphy->eud_enable_reg && readl_relaxed(qphy->eud_enable_reg)) {
+ dev_err(qphy->phy.dev, "eud is enabled\n");
+ return 0;
+ }
+
mutex_lock(&qphy->phy_lock);
if (!qphy->dpdm_enable) {
ret = qusb_phy_enable_power(qphy, true);
@@ -904,6 +915,16 @@ static int qusb_phy_probe(struct platform_device *pdev)
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "eud_enable_reg");
+ if (res) {
+ qphy->eud_enable_reg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qphy->eud_enable_reg)) {
+ dev_err(dev, "err getting eud_enable_reg address\n");
+ return PTR_ERR(qphy->eud_enable_reg);
+ }
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"ref_clk_addr");
if (res) {
qphy->ref_clk_base = devm_ioremap_nocache(dev,
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 3b7988cb..a1877e7 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -1745,6 +1745,8 @@ static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
static int f2fs_ioc_start_atomic_write(struct file *filp)
{
struct inode *inode = file_inode(filp);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int ret;
if (!inode_owner_or_capable(inode))
@@ -1785,6 +1787,12 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
goto out;
}
+ spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
+ if (list_empty(&fi->inmem_ilist))
+ list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
+ spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
+
+ /* add inode in inmem_list first and set atomic_file */
set_inode_flag(inode, FI_ATOMIC_FILE);
clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
@@ -1826,11 +1834,8 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
goto err_out;
ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
- if (!ret) {
- clear_inode_flag(inode, FI_ATOMIC_FILE);
- F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
- stat_dec_atomic_write(inode);
- }
+ if (!ret)
+ f2fs_drop_inmem_pages(inode);
} else {
ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index ac824f6..d71a143 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -185,8 +185,6 @@ bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
void f2fs_register_inmem_page(struct inode *inode, struct page *page)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct f2fs_inode_info *fi = F2FS_I(inode);
struct inmem_pages *new;
f2fs_trace_pid(page);
@@ -200,15 +198,11 @@ void f2fs_register_inmem_page(struct inode *inode, struct page *page)
INIT_LIST_HEAD(&new->list);
/* increase reference count with clean state */
- mutex_lock(&fi->inmem_lock);
get_page(page);
- list_add_tail(&new->list, &fi->inmem_pages);
- spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
- if (list_empty(&fi->inmem_ilist))
- list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
- spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
+ mutex_lock(&F2FS_I(inode)->inmem_lock);
+ list_add_tail(&new->list, &F2FS_I(inode)->inmem_pages);
inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
- mutex_unlock(&fi->inmem_lock);
+ mutex_unlock(&F2FS_I(inode)->inmem_lock);
trace_f2fs_register_inmem_page(page, INMEM);
}
@@ -330,19 +324,17 @@ void f2fs_drop_inmem_pages(struct inode *inode)
mutex_lock(&fi->inmem_lock);
__revoke_inmem_pages(inode, &fi->inmem_pages,
true, false, true);
-
- if (list_empty(&fi->inmem_pages)) {
- spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
- if (!list_empty(&fi->inmem_ilist))
- list_del_init(&fi->inmem_ilist);
- spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
- }
mutex_unlock(&fi->inmem_lock);
}
clear_inode_flag(inode, FI_ATOMIC_FILE);
fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
stat_dec_atomic_write(inode);
+
+ spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
+ if (!list_empty(&fi->inmem_ilist))
+ list_del_init(&fi->inmem_ilist);
+ spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
}
void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
@@ -471,11 +463,6 @@ int f2fs_commit_inmem_pages(struct inode *inode)
mutex_lock(&fi->inmem_lock);
err = __f2fs_commit_inmem_pages(inode);
-
- spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
- if (!list_empty(&fi->inmem_ilist))
- list_del_init(&fi->inmem_ilist);
- spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
mutex_unlock(&fi->inmem_lock);
clear_inode_flag(inode, FI_ATOMIC_COMMIT);
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index d0a8920..57cf4b6 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -540,7 +540,7 @@ static int ovl_create_or_link(struct dentry *dentry, struct inode *inode,
struct ovl_cattr *attr, bool origin)
{
int err;
- const struct cred *old_cred;
+ const struct cred *old_cred, *hold_cred = NULL;
struct cred *override_cred;
struct dentry *parent = dentry->d_parent;
@@ -575,7 +575,7 @@ static int ovl_create_or_link(struct dentry *dentry, struct inode *inode,
goto out_revert_creds;
}
}
- put_cred(override_creds(override_cred));
+ hold_cred = override_creds(override_cred);
put_cred(override_cred);
if (!ovl_dentry_is_whiteout(dentry))
@@ -584,7 +584,9 @@ static int ovl_create_or_link(struct dentry *dentry, struct inode *inode,
err = ovl_create_over_whiteout(dentry, inode, attr);
}
out_revert_creds:
- ovl_revert_creds(old_cred);
+ ovl_revert_creds(old_cred ?: hold_cred);
+ if (old_cred && hold_cred)
+ put_cred(hold_cred);
return err;
}
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index 3b908e5..2cf6fa6 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -142,7 +142,7 @@
* a new RANGE of SSIDs to the msg_mask_tbl.
*/
#define MSG_MASK_TBL_CNT 26
-#define APPS_EVENT_LAST_ID 0xCAA
+#define APPS_EVENT_LAST_ID 0xCB4
#define MSG_SSID_0 0
#define MSG_SSID_0_LAST 130
@@ -916,7 +916,7 @@ static const uint32_t msg_bld_masks_25[] = {
/* LOG CODES */
static const uint32_t log_code_last_tbl[] = {
0x0, /* EQUIP ID 0 */
- 0x1C9A, /* EQUIP ID 1 */
+ 0x1CB2, /* EQUIP ID 1 */
0x0, /* EQUIP ID 2 */
0x0, /* EQUIP ID 3 */
0x4910, /* EQUIP ID 4 */
diff --git a/include/linux/msm_adreno_devfreq.h b/include/linux/msm_adreno_devfreq.h
index 2b6ea00..9091e52 100644
--- a/include/linux/msm_adreno_devfreq.h
+++ b/include/linux/msm_adreno_devfreq.h
@@ -56,6 +56,7 @@ struct devfreq_msm_adreno_tz_data {
s32 *p_down;
unsigned int *index;
uint64_t *ib;
+ bool floating;
} bus;
unsigned int device_id;
bool is_64;
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index d75eb6d..e683aa3 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -389,7 +389,6 @@ enum power_supply_type {
POWER_SUPPLY_TYPE_BMS, /* Battery Monitor System */
POWER_SUPPLY_TYPE_PARALLEL, /* Parallel Path */
POWER_SUPPLY_TYPE_MAIN, /* Main Path */
- POWER_SUPPLY_TYPE_WIPOWER, /* Wipower */
POWER_SUPPLY_TYPE_UFP, /* Type-C UFP */
POWER_SUPPLY_TYPE_DFP, /* Type-C DFP */
POWER_SUPPLY_TYPE_CHARGE_PUMP, /* Charge Pump */
diff --git a/include/uapi/media/msm_cvp_private.h b/include/uapi/media/msm_cvp_private.h
index 2f58502..645ae6a 100644
--- a/include/uapi/media/msm_cvp_private.h
+++ b/include/uapi/media/msm_cvp_private.h
@@ -75,6 +75,8 @@
#define CVP_KMD_HFI_FD_FRAME_CMD (CVP_KMD_CMD_START + 16)
+#define CVP_KMD_UPDATE_POWER (CVP_KMD_CMD_START + 17)
+
#define CVP_KMD_SEND_CMD_PKT (CVP_KMD_CMD_START + 64)
#define CVP_KMD_RECEIVE_MSG_PKT (CVP_KMD_CMD_START + 65)
@@ -225,6 +227,24 @@ struct cvp_kmd_hfi_packet {
#define CVP_KMD_PROP_SESSION_PRIORITY 4
#define CVP_KMD_PROP_SESSION_SECURITY 5
#define CVP_KMD_PROP_SESSION_DSPMASK 6
+
+#define CVP_KMD_PROP_PWR_FDU 0x10
+#define CVP_KMD_PROP_PWR_ICA 0x11
+#define CVP_KMD_PROP_PWR_OD 0x12
+#define CVP_KMD_PROP_PWR_MPU 0x13
+#define CVP_KMD_PROP_PWR_FW 0x14
+#define CVP_KMD_PROP_PWR_DDR 0x15
+#define CVP_KMD_PROP_PWR_SYSCACHE 0x16
+#define CVP_KMD_PROP_PWR_FDU_OP 0x17
+#define CVP_KMD_PROP_PWR_ICA_OP 0x18
+#define CVP_KMD_PROP_PWR_OD_OP 0x19
+#define CVP_KMD_PROP_PWR_MPU_OP 0x1A
+#define CVP_KMD_PROP_PWR_FW_OP 0x1B
+#define CVP_KMD_PROP_PWR_DDR_OP 0x1C
+#define CVP_KMD_PROP_PWR_SYSCACHE_OP 0x1D
+
+#define MAX_KMD_PROP_NUM (CVP_KMD_PROP_PWR_SYSCACHE_OP + 1)
+
struct cvp_kmd_sys_property {
unsigned int prop_type;
unsigned int data;
diff --git a/kernel/module.c b/kernel/module.c
index 8644c18..984fdb8 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2187,6 +2187,10 @@ static void free_module(struct module *mod)
/* Finally, free the core (containing the module structure) */
disable_ro_nx(&mod->core_layout);
+#ifdef CONFIG_DEBUG_MODULE_LOAD_INFO
+ pr_info("Unloaded %s: module core layout, start: 0x%pK size: 0x%x\n",
+ mod->name, mod->core_layout.base, mod->core_layout.size);
+#endif
module_memfree(mod->core_layout.base);
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index cca87ab..81ccd30 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9421,7 +9421,8 @@ group_similar_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
long diff = sg->sgc->min_capacity - ref->sgc->min_capacity;
long max = max(sg->sgc->min_capacity, ref->sgc->min_capacity);
- return abs(diff) < max >> 3;
+ return ((abs(diff) < max >> 3) ||
+ asym_cap_siblings(group_first_cpu(sg), group_first_cpu(ref)));
}
static inline enum
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 6c164cd..f3d53fc 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -155,6 +155,18 @@
actual pointer values, ignoring the kptr_restrict setting.
Not to be enabled on production builds.
+config DEBUG_MODULE_LOAD_INFO
+ bool "Use prints for module info under a debug flag"
+ help
+ If you say Y here the resulting kernel image will include
+ debug prints which was kept under DEBUG_MODULE_LOAD_INFO.
+ This will be used by developer to debug loadable modules in
+ the kernel.
+ Say Y here only if you plan to debug the kernel.
+ Not to be enabled on production builds.
+
+ If unsure, say N.
+
endmenu # "printk and dmesg options"
menu "Compile-time checks and compiler options"
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 4272af2..23c82c3 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -1256,12 +1256,10 @@ void add_to_oom_reaper(struct task_struct *p)
task_unlock(p);
- if (strcmp(current->comm, ULMK_MAGIC) && __ratelimit(&reaper_rs)
+ if (!strcmp(current->comm, ULMK_MAGIC) && __ratelimit(&reaper_rs)
&& p->signal->oom_score_adj == 0) {
show_mem(SHOW_MEM_FILTER_NODES, NULL);
show_mem_call_notifiers();
- if (sysctl_oom_dump_tasks)
- dump_tasks(NULL, NULL);
}
put_task_struct(p);
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 97ac8c7..6390991 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -10,6 +10,8 @@
#include <linux/migrate.h>
#include <linux/stackdepot.h>
#include <linux/seq_file.h>
+#include <linux/sched.h>
+#include <linux/sched/clock.h>
#include "internal.h"
@@ -24,6 +26,8 @@ struct page_owner {
short last_migrate_reason;
gfp_t gfp_mask;
depot_stack_handle_t handle;
+ int pid;
+ u64 ts_nsec;
};
static bool page_owner_disabled =
@@ -183,6 +187,8 @@ static inline void __set_page_owner_handle(struct page_ext *page_ext,
page_owner->order = order;
page_owner->gfp_mask = gfp_mask;
page_owner->last_migrate_reason = -1;
+ page_owner->pid = current->pid;
+ page_owner->ts_nsec = local_clock();
__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
}
@@ -243,6 +249,8 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
new_page_owner->last_migrate_reason =
old_page_owner->last_migrate_reason;
new_page_owner->handle = old_page_owner->handle;
+ new_page_owner->pid = old_page_owner->pid;
+ new_page_owner->ts_nsec = old_page_owner->ts_nsec;
/*
* We don't clear the bit on the oldpage as it's going to be freed
@@ -360,9 +368,10 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
return -ENOMEM;
ret = snprintf(kbuf, count,
- "Page allocated via order %u, mask %#x(%pGg)\n",
+ "Page allocated via order %u, mask %#x(%pGg), pid %d, ts %llu ns\n",
page_owner->order, page_owner->gfp_mask,
- &page_owner->gfp_mask);
+ &page_owner->gfp_mask, page_owner->pid,
+ page_owner->ts_nsec);
if (ret >= count)
goto err;
@@ -445,8 +454,9 @@ void __dump_page_owner(struct page *page)
}
depot_fetch_stack(handle, &trace);
- pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
- page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
+ pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg), pid %d, ts %llu ns\n",
+ page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask,
+ page_owner->pid, page_owner->ts_nsec);
print_stack_trace(&trace, 0);
if (page_owner->last_migrate_reason != -1)
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 2cc2ec7..dc2287c 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1162,7 +1162,7 @@ const char * const vmstat_text[] = {
"nr_vmscan_immediate_reclaim",
"nr_dirtied",
"nr_written",
- "", /* nr_indirectly_reclaimable */
+ "nr_indirectly_reclaimable",
"nr_unreclaimable_pages",
/* enum writeback_stat_item counters */