Merge branch 'android-msm-pixel-4.19-rvc' into android-msm-pixel-4.19-rvc-qpr1
Bug: 169785565
Change-Id: Ice16d647127f3c8a4014d120154b4a5f9960d602
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 99d027d..e3a1457 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -2842,7 +2842,7 @@
return 0;
unmap:
- if (param->type == KGSL_USER_MEM_TYPE_DMABUF) {
+ if (kgsl_memdesc_usermem_type(&entry->memdesc) == KGSL_MEM_ENTRY_ION) {
kgsl_destroy_ion(dev_priv->device, entry->priv_data);
entry->memdesc.sgt = NULL;
}
@@ -3156,7 +3156,7 @@
return result;
error_attach:
- switch (memtype) {
+ switch (kgsl_memdesc_usermem_type(&entry->memdesc)) {
case KGSL_MEM_ENTRY_ION:
kgsl_destroy_ion(dev_priv->device, entry->priv_data);
entry->memdesc.sgt = NULL;
@@ -4503,6 +4503,8 @@
if (vma->vm_flags & VM_WRITE)
return -EPERM;
+ vma->vm_flags &= ~VM_MAYWRITE;
+
if (memdesc->size != vma_size) {
dev_err(device->dev,
"memstore bad size: %d should be %llu\n",
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index e411f7e..670b1b2 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -2409,6 +2409,22 @@
return addr;
}
+static bool iommu_addr_in_svm_ranges(struct kgsl_iommu_pt *pt,
+ u64 gpuaddr, u64 size)
+{
+ if ((gpuaddr >= pt->compat_va_start && gpuaddr < pt->compat_va_end) &&
+ ((gpuaddr + size) > pt->compat_va_start &&
+ (gpuaddr + size) <= pt->compat_va_end))
+ return true;
+
+ if ((gpuaddr >= pt->svm_start && gpuaddr < pt->svm_end) &&
+ ((gpuaddr + size) > pt->svm_start &&
+ (gpuaddr + size) <= pt->svm_end))
+ return true;
+
+ return false;
+}
+
static int kgsl_iommu_set_svm_region(struct kgsl_pagetable *pagetable,
uint64_t gpuaddr, uint64_t size)
{
@@ -2416,9 +2432,8 @@
struct kgsl_iommu_pt *pt = pagetable->priv;
struct rb_node *node;
- /* Make sure the requested address doesn't fall in the global range */
- if (ADDR_IN_GLOBAL(pagetable->mmu, gpuaddr) ||
- ADDR_IN_GLOBAL(pagetable->mmu, gpuaddr + size))
+ /* Make sure the requested address doesn't fall out of SVM range */
+ if (!iommu_addr_in_svm_ranges(pt, gpuaddr, size))
return -ENOMEM;
spin_lock(&pagetable->lock);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 2c85d07..0512216 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1426,6 +1426,17 @@
}
/*
+ * Compute the size of a report.
+ */
+static size_t hid_compute_report_size(struct hid_report *report)
+{
+ if (report->size)
+ return ((report->size - 1) >> 3) + 1;
+
+ return 0;
+}
+
+/*
* Create a report. 'data' has to be allocated using
* hid_alloc_report_buf() so that it has proper size.
*/
@@ -1437,7 +1448,7 @@
if (report->id > 0)
*data++ = report->id;
- memset(data, 0, ((report->size - 1) >> 3) + 1);
+ memset(data, 0, hid_compute_report_size(report));
for (n = 0; n < report->maxfield; n++)
hid_output_field(report->device, report->field[n], data);
}
@@ -1564,7 +1575,7 @@
csize--;
}
- rsize = ((report->size - 1) >> 3) + 1;
+ rsize = hid_compute_report_size(report);
if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
rsize = HID_MAX_BUFFER_SIZE - 1;
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index dbb0cbe6..0062b37 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1125,6 +1125,10 @@
}
mapped:
+ /* Mapping failed, bail out */
+ if (!bit)
+ return;
+
if (device->driver->input_mapped &&
device->driver->input_mapped(device, hidinput, field, usage,
&bit, &max) < 0) {
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 19dfd8a..515963a 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -841,6 +841,8 @@
code = BTN_0 + ((usage->hid - 1) & HID_USAGE);
hid_map_usage(hi, usage, bit, max, EV_KEY, code);
+ if (!*bit)
+ return -1;
input_set_capability(hi->input, EV_KEY, code);
return 1;
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 3eefce3..12ba31a 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -474,6 +474,10 @@
__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
+static int __qseecom_alloc_coherent_buf(
+ uint32_t size, u8 **vaddr, phys_addr_t *paddr);
+static void __qseecom_free_coherent_buf(uint32_t size,
+ u8 *vaddr, phys_addr_t paddr);
#define QSEECOM_SCM_EBUSY_WAIT_MS 30
#define QSEECOM_SCM_EBUSY_MAX_RETRY 67
@@ -3664,7 +3668,8 @@
}
static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
- struct qseecom_send_cmd_req *req)
+ struct qseecom_send_cmd_req *req,
+ bool is_phys_adr)
{
int ret = 0;
u32 reqd_len_sb_in = 0;
@@ -3706,11 +3711,20 @@
if (qseecom.qsee_version < QSEE_VERSION_40) {
send_data_req.app_id = data->client.app_id;
- send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
- data, (uintptr_t)req->cmd_req_buf));
+
+ if (!is_phys_adr) {
+ send_data_req.req_ptr =
+ (uint32_t)(__qseecom_uvirt_to_kphys
+ (data, (uintptr_t)req->cmd_req_buf));
+ send_data_req.rsp_ptr =
+ (uint32_t)(__qseecom_uvirt_to_kphys(
+ data, (uintptr_t)req->resp_buf));
+ } else {
+ send_data_req.req_ptr = (uint32_t)req->cmd_req_buf;
+ send_data_req.rsp_ptr = (uint32_t)req->resp_buf;
+ }
+
send_data_req.req_len = req->cmd_req_len;
- send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
- data, (uintptr_t)req->resp_buf));
send_data_req.rsp_len = req->resp_len;
send_data_req.sglistinfo_ptr =
(uint32_t)data->sglistinfo_shm.paddr;
@@ -3721,11 +3735,21 @@
cmd_len = sizeof(struct qseecom_client_send_data_ireq);
} else {
send_data_req_64bit.app_id = data->client.app_id;
- send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
- (uintptr_t)req->cmd_req_buf);
+
+ if (!is_phys_adr) {
+ send_data_req_64bit.req_ptr =
+ __qseecom_uvirt_to_kphys(data,
+ (uintptr_t)req->cmd_req_buf);
+ send_data_req_64bit.rsp_ptr =
+ __qseecom_uvirt_to_kphys(data,
+ (uintptr_t)req->resp_buf);
+ } else {
+ send_data_req_64bit.req_ptr =
+ (uintptr_t)req->cmd_req_buf;
+ send_data_req_64bit.rsp_ptr =
+ (uintptr_t)req->resp_buf;
+ }
send_data_req_64bit.req_len = req->cmd_req_len;
- send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
- (uintptr_t)req->resp_buf);
send_data_req_64bit.rsp_len = req->resp_len;
/* check if 32bit app's phys_addr region is under 4GB.*/
if ((data->client.app_arch == ELFCLASS32) &&
@@ -3823,7 +3847,7 @@
if (__validate_send_cmd_inputs(data, &req))
return -EINVAL;
- ret = __qseecom_send_cmd(data, &req);
+ ret = __qseecom_send_cmd(data, &req, false);
return ret;
}
@@ -4342,6 +4366,9 @@
int i;
struct qseecom_send_modfd_cmd_req req;
struct qseecom_send_cmd_req send_cmd_req;
+ void *origin_req_buf_kvirt, *origin_rsp_buf_kvirt;
+ phys_addr_t pa;
+ u8 *va = NULL;
ret = copy_from_user(&req, argp, sizeof(req));
if (ret) {
@@ -4365,33 +4392,57 @@
return -EINVAL;
}
}
- req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
- (uintptr_t)req.cmd_req_buf);
- req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
- (uintptr_t)req.resp_buf);
+
+ /*Back up original address */
+ origin_req_buf_kvirt = (void *)__qseecom_uvirt_to_kvirt(data,
+ (uintptr_t)req.cmd_req_buf);
+ origin_rsp_buf_kvirt = (void *)__qseecom_uvirt_to_kvirt(data,
+ (uintptr_t)req.resp_buf);
+
+ /* Allocate kernel buffer for request and response*/
+ ret = __qseecom_alloc_coherent_buf(req.cmd_req_len + req.resp_len,
+ &va, &pa);
+ req.cmd_req_buf = va;
+ send_cmd_req.cmd_req_buf = (void *)pa;
+
+ req.resp_buf = va + req.cmd_req_len;
+ send_cmd_req.resp_buf = (void *)pa + req.cmd_req_len;
+
+ /* Copy the data to kernel request and response buffers*/
+ memcpy(req.cmd_req_buf, origin_req_buf_kvirt, req.cmd_req_len);
+ memcpy(req.resp_buf, origin_rsp_buf_kvirt, req.resp_len);
if (!is_64bit_addr) {
ret = __qseecom_update_cmd_buf(&req, false, data);
if (ret)
- return ret;
- ret = __qseecom_send_cmd(data, &send_cmd_req);
+ goto out;
+ ret = __qseecom_send_cmd(data, &send_cmd_req, true);
if (ret)
- return ret;
+ goto out;
ret = __qseecom_update_cmd_buf(&req, true, data);
if (ret)
- return ret;
+ goto out;
} else {
ret = __qseecom_update_cmd_buf_64(&req, false, data);
if (ret)
- return ret;
- ret = __qseecom_send_cmd(data, &send_cmd_req);
+ goto out;
+ ret = __qseecom_send_cmd(data, &send_cmd_req, true);
if (ret)
- return ret;
+ goto out;
ret = __qseecom_update_cmd_buf_64(&req, true, data);
if (ret)
- return ret;
+ goto out;
}
+ /*Copy the response back to the userspace buffer*/
+ memcpy(origin_rsp_buf_kvirt, req.resp_buf, req.resp_len);
+ memcpy(origin_req_buf_kvirt, req.cmd_req_buf, req.cmd_req_len);
+
+out:
+ if (req.cmd_req_buf)
+ __qseecom_free_coherent_buf(req.cmd_req_len + req.resp_len,
+ req.cmd_req_buf, (phys_addr_t)send_cmd_req.cmd_req_buf);
+
return ret;
}
@@ -5258,7 +5309,7 @@
dmac_flush_range(req.cmd_req_buf, req.cmd_req_buf + req.cmd_req_len);
- ret = __qseecom_send_cmd(data, &req);
+ ret = __qseecom_send_cmd(data, &req, false);
dmac_flush_range(req.resp_buf, req.resp_buf + req.resp_len);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 6a76e99..0a46ce1 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -588,12 +588,13 @@
int i, retval;
spin_lock_irqsave(&io->lock, flags);
- if (io->status) {
+ if (io->status || io->count == 0) {
spin_unlock_irqrestore(&io->lock, flags);
return;
}
/* shut everything down */
io->status = -ECONNRESET;
+ io->count++; /* Keep the request alive until we're done */
spin_unlock_irqrestore(&io->lock, flags);
for (i = io->entries - 1; i >= 0; --i) {
@@ -607,6 +608,12 @@
dev_warn(&io->dev->dev, "%s, unlink --> %d\n",
__func__, retval);
}
+
+ spin_lock_irqsave(&io->lock, flags);
+ io->count--;
+ if (!io->count)
+ complete(&io->complete);
+ spin_unlock_irqrestore(&io->lock, flags);
}
EXPORT_SYMBOL_GPL(usb_sg_cancel);
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 8506637..a46b683 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -956,34 +956,49 @@
* @max: maximal valid usage->code to consider later (out parameter)
* @type: input event type (EV_KEY, EV_REL, ...)
* @c: code which corresponds to this usage and type
+ *
+ * The value pointed to by @bit will be set to NULL if either @type is
+ * an unhandled event type, or if @c is out of range for @type. This
+ * can be used as an error condition.
*/
static inline void hid_map_usage(struct hid_input *hidinput,
struct hid_usage *usage, unsigned long **bit, int *max,
- __u8 type, __u16 c)
+ __u8 type, unsigned int c)
{
struct input_dev *input = hidinput->input;
-
- usage->type = type;
- usage->code = c;
+ unsigned long *bmap = NULL;
+ unsigned int limit = 0;
switch (type) {
case EV_ABS:
- *bit = input->absbit;
- *max = ABS_MAX;
+ bmap = input->absbit;
+ limit = ABS_MAX;
break;
case EV_REL:
- *bit = input->relbit;
- *max = REL_MAX;
+ bmap = input->relbit;
+ limit = REL_MAX;
break;
case EV_KEY:
- *bit = input->keybit;
- *max = KEY_MAX;
+ bmap = input->keybit;
+ limit = KEY_MAX;
break;
case EV_LED:
- *bit = input->ledbit;
- *max = LED_MAX;
+ bmap = input->ledbit;
+ limit = LED_MAX;
break;
}
+
+ if (unlikely(c > limit || !bmap)) {
+ pr_warn_ratelimited("%s: Invalid code %d type %d\n",
+ input->name, c, type);
+ *bit = NULL;
+ return;
+ }
+
+ usage->type = type;
+ usage->code = c;
+ *max = limit;
+ *bit = bmap;
}
/**
@@ -997,7 +1012,8 @@
__u8 type, __u16 c)
{
hid_map_usage(hidinput, usage, bit, max, type, c);
- clear_bit(c, *bit);
+ if (*bit)
+ clear_bit(usage->code, *bit);
}
/**
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 2efc0f3..ceb4621 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -5596,40 +5596,60 @@
static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb)
{
- int err = 0;
- u32 perm;
+ int rc = 0;
+ unsigned int msg_len;
+ unsigned int data_len = skb->len;
+ unsigned char *data = skb->data;
struct nlmsghdr *nlh;
struct sk_security_struct *sksec = sk->sk_security;
+ u16 sclass = sksec->sclass;
+ u32 perm;
- if (skb->len < NLMSG_HDRLEN) {
- err = -EINVAL;
- goto out;
- }
- nlh = nlmsg_hdr(skb);
+ while (data_len >= nlmsg_total_size(0)) {
+ nlh = (struct nlmsghdr *)data;
- err = selinux_nlmsg_lookup(sksec->sclass, nlh->nlmsg_type, &perm);
- if (err) {
- if (err == -EINVAL) {
+ /* NOTE: the nlmsg_len field isn't reliably set by some netlink
+ * users which means we can't reject skb's with bogus
+ * length fields; our solution is to follow what
+ * netlink_rcv_skb() does and simply skip processing at
+ * messages with length fields that are clearly junk
+ */
+ if (nlh->nlmsg_len < NLMSG_HDRLEN || nlh->nlmsg_len > data_len)
+ return 0;
+
+ rc = selinux_nlmsg_lookup(sclass, nlh->nlmsg_type, &perm);
+ if (rc == 0) {
+ rc = sock_has_perm(sk, perm);
+ if (rc)
+ return rc;
+ } else if (rc == -EINVAL) {
+ /* -EINVAL is a missing msg/perm mapping */
pr_warn_ratelimited("SELinux: unrecognized netlink"
- " message: protocol=%hu nlmsg_type=%hu sclass=%s"
- " pig=%d comm=%s\n",
- sk->sk_protocol, nlh->nlmsg_type,
- secclass_map[sksec->sclass - 1].name,
- task_pid_nr(current), current->comm);
- if (!enforcing_enabled(&selinux_state) ||
- security_get_allow_unknown(&selinux_state))
- err = 0;
+ " message: protocol=%hu nlmsg_type=%hu sclass=%s"
+ " pid=%d comm=%s\n",
+ sk->sk_protocol, nlh->nlmsg_type,
+ secclass_map[sclass - 1].name,
+ task_pid_nr(current), current->comm);
+ if (enforcing_enabled(&selinux_state) &&
+ !security_get_allow_unknown(&selinux_state))
+ return rc;
+ rc = 0;
+ } else if (rc == -ENOENT) {
+ /* -ENOENT is a missing socket/class mapping, ignore */
+ rc = 0;
+ } else {
+ return rc;
}
- /* Ignore */
- if (err == -ENOENT)
- err = 0;
- goto out;
+ /* move to the next message after applying netlink padding */
+ msg_len = NLMSG_ALIGN(nlh->nlmsg_len);
+ if (msg_len >= data_len)
+ return 0;
+ data_len -= msg_len;
+ data += msg_len;
}
- err = sock_has_perm(sk, perm);
-out:
- return err;
+ return rc;
}
#ifdef CONFIG_NETFILTER