Merge branch 'LA.UM.9.1.R1.10.00.00.604.035' via branch 'qcom-msm-4.14' into android-msm-floral-4.14

Bug: 152368391
Change-Id: Ife9545fb8abb9ab82b2d44370e0b8dbee0103269
Signed-off-by: Wilson Sung <wilsonsung@google.com>
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_dev.c b/drivers/emac-dwc-eqos/DWC_ETH_QOS_dev.c
index 7ce0a31..b262b23 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_dev.c
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_dev.c
@@ -3126,7 +3126,7 @@
 
 static INT write_phy_regs(INT phy_id, INT phy_reg, INT phy_reg_data)
 {
-	ULONG RETRYCOUNT = 1000;
+	ULONG RETRYCOUNT = 5000;
 	ULONG vy_count;
 	volatile ULONG VARMAC_GMIIAR;
 
@@ -3139,7 +3139,7 @@
 			return -Y_FAILURE;
 
 		vy_count++;
-		mdelay(1);
+		udelay(200);
 
 		MAC_GMIIAR_RGRD(VARMAC_GMIIAR);
 		if (GET_VALUE(
@@ -3173,7 +3173,7 @@
 			return -Y_FAILURE;
 
 		vy_count++;
-		mdelay(1);
+		udelay(200);
 
 		MAC_GMIIAR_RGRD(VARMAC_GMIIAR);
 		if (GET_VALUE(
@@ -3197,7 +3197,7 @@
 
 static INT read_phy_regs(INT phy_id, INT phy_reg, INT *phy_reg_data)
 {
-	ULONG RETRYCOUNT = 1000;
+	ULONG RETRYCOUNT = 5000;
 	ULONG vy_count;
 	volatile ULONG VARMAC_GMIIAR;
 	ULONG VARMAC_GMIIDR;
@@ -3211,8 +3211,7 @@
 			return -Y_FAILURE;
 
 		vy_count++;
-		mdelay(1);
-
+		udelay(200);
 		MAC_GMIIAR_RGRD(VARMAC_GMIIAR);
 		if (GET_VALUE(
 				VARMAC_GMIIAR, MAC_GMIIAR_GB_LPOS,
@@ -3243,7 +3242,7 @@
 			return -Y_FAILURE;
 
 		vy_count++;
-		mdelay(1);
+		udelay(200);
 
 		MAC_GMIIAR_RGRD(VARMAC_GMIIAR);
 		if (GET_VALUE(
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_mdio.c b/drivers/emac-dwc-eqos/DWC_ETH_QOS_mdio.c
index faac49e..9ee36cd 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_mdio.c
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_mdio.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -445,6 +445,43 @@
 	pr_alert("\n****************************************************\n");
 }
 
+static void DWC_ETH_QOS_request_phy_wol(struct DWC_ETH_QOS_prv_data *pdata)
+{
+	pdata->phy_wol_supported = 0;
+	pdata->phy_wol_wolopts = 0;
+
+	/* Check if phydev is valid*/
+	/* Check and enable Wake-on-LAN functionality in PHY*/
+	if (pdata->phydev) {
+		struct ethtool_wolinfo wol = {.cmd = ETHTOOL_GWOL};
+		wol.supported = 0;
+		wol.wolopts= 0;
+
+		phy_ethtool_get_wol(pdata->phydev, &wol);
+		pdata->phy_wol_supported = wol.supported;
+
+		/* Try to enable supported Wake-on-LAN features in PHY*/
+		if (wol.supported) {
+
+			device_set_wakeup_capable(&pdata->pdev->dev, 1);
+
+			wol.cmd = ETHTOOL_SWOL;
+			wol.wolopts = wol.supported;
+
+			if (!phy_ethtool_set_wol(pdata->phydev, &wol)){
+				pdata->phy_wol_wolopts = wol.wolopts;
+
+				enable_irq_wake(pdata->phy_irq);
+
+				device_set_wakeup_enable(&pdata->pdev->dev, 1);
+				EMACDBG("Enabled WoL[0x%x] in %s\n", wol.wolopts,
+						 pdata->phydev->drv->name);
+				pdata->wol_enabled = 1;
+			}
+		}
+	}
+}
+
 /*!
  * \brief API to enable or disable PHY hibernation mode
  *
@@ -1007,6 +1044,9 @@
 		}
 #endif
 
+		if (pdata->phy_intr_en && !pdata->wol_enabled)
+			DWC_ETH_QOS_request_phy_wol(pdata);
+
 		if (pdata->ipa_enabled && netif_running(dev)) {
 			if (phydev->link == 1)
 				 DWC_ETH_QOS_ipa_offload_event_handler(pdata, EV_PHY_LINK_UP);
@@ -1030,42 +1070,6 @@
 	DBGPR_MDIO("<--DWC_ETH_QOS_adjust_link\n");
 }
 
-static void DWC_ETH_QOS_request_phy_wol(struct DWC_ETH_QOS_prv_data *pdata)
-{
-	pdata->phy_wol_supported = 0;
-	pdata->phy_wol_wolopts = 0;
-
-	/* Check if phydev is valid*/
-	/* Check and enable Wake-on-LAN functionality in PHY*/
-	if (pdata->phydev) {
-		struct ethtool_wolinfo wol = {.cmd = ETHTOOL_GWOL};
-		wol.supported = 0;
-		wol.wolopts= 0;
-
-		phy_ethtool_get_wol(pdata->phydev, &wol);
-		pdata->phy_wol_supported = wol.supported;
-
-		/* Try to enable supported Wake-on-LAN features in PHY*/
-		if (wol.supported) {
-
-			device_set_wakeup_capable(&pdata->pdev->dev, 1);
-
-			wol.cmd = ETHTOOL_SWOL;
-			wol.wolopts = wol.supported;
-
-			if (!phy_ethtool_set_wol(pdata->phydev, &wol)){
-				pdata->phy_wol_wolopts = wol.wolopts;
-
-				enable_irq_wake(pdata->phy_irq);
-
-				device_set_wakeup_enable(&pdata->pdev->dev, 1);
-				EMACDBG("Enabled WoL[0x%x] in %s\n", wol.wolopts,
-						 pdata->phydev->drv->name);
-			}
-		}
-	}
-}
-
 bool DWC_ETH_QOS_is_phy_link_up(struct DWC_ETH_QOS_prv_data *pdata)
 {
 	/* PHY driver initializes phydev->link=1.
@@ -1093,8 +1097,6 @@
 {
 	struct DWC_ETH_QOS_prv_data *pdata = netdev_priv(dev);
 	struct phy_device *phydev = NULL;
-	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
-	char bus_id[MII_BUS_ID_SIZE];
 	u32 phydata = 0;
 	int ret = 0;
 
@@ -1104,22 +1106,24 @@
 	pdata->speed = 0;
 	pdata->oldduplex = -1;
 
-	snprintf(bus_id, MII_BUS_ID_SIZE, "dwc_phy-%x", pdata->bus_id);
-
-	snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
-		 pdata->phyaddr);
-
-	DBGPR_MDIO("trying to attach to %s\n", phy_id_fmt);
-
-	phydev = phy_connect(dev, phy_id_fmt, &DWC_ETH_QOS_adjust_link,
-			     pdata->interface);
-
+	phydev = mdiobus_get_phy(pdata->mii, pdata->phyaddr);
 	if (IS_ERR(phydev)) {
 		pr_alert("%s: Could not attach to PHY\n", dev->name);
 		return PTR_ERR(phydev);
 	}
 
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+	phydev->skip_sw_reset = true;
+#endif
+	ret = phy_connect_direct(dev, phydev, &DWC_ETH_QOS_adjust_link,
+							pdata->interface);
+	if (ret) {
+		EMACERR("phy_connect_direct failed\n");
+		return ret;
+	}
+
 	if (phydev->phy_id == 0) {
+		pr_alert("%s: Invalid phy id\n", dev->name);
 		phy_disconnect(phydev);
 		return -ENODEV;
 	}
@@ -1194,10 +1198,8 @@
 		phydev->irq = PHY_IGNORE_INTERRUPT;
 		phydev->interrupts =  PHY_INTERRUPT_ENABLED;
 
-		if (phydev->drv->config_intr &&
-			!phydev->drv->config_intr(phydev)){
-			DWC_ETH_QOS_request_phy_wol(pdata);
-		} else {
+		if (!(phydev->drv->config_intr &&
+			!phydev->drv->config_intr(phydev))){
 			EMACERR("Failed to configure PHY interrupts");
 			BUG();
 		}
@@ -1250,6 +1252,21 @@
 
 	DBGPR_MDIO("-->DWC_ETH_QOS_mdio_register\n");
 
+	if (pdata->res_data->phy_addr != -1) {
+		phy_reg_read_status =
+		   DWC_ETH_QOS_mdio_read_direct(pdata, pdata->res_data->phy_addr, MII_BMSR,
+										&mii_status);
+		if (phy_reg_read_status == 0) {
+			if (mii_status != 0x0000 && mii_status != 0xffff) {
+				phy_detected = 1;
+				phyaddr = pdata->res_data->phy_addr;
+				EMACINFO("skip_phy_detection (phyaddr)%d\n", phyaddr);
+				goto skip_phy_detection;
+			} else
+				EMACERR("Invlaid phy address specified in device tree\n");
+		}
+	}
+
 	/* find the phy ID or phy address which is connected to our MAC */
 	for (phyaddr = 0; phyaddr < 32; phyaddr++) {
 
@@ -1276,6 +1293,8 @@
 		return -ENOLINK;
 	}
 
+	skip_phy_detection:
+
 	pdata->phyaddr = phyaddr;
 	pdata->bus_id = 0x1;
 	pdata->phy_intr_en = false;
@@ -1307,7 +1326,7 @@
 	snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x", new_bus->name,
 		 pdata->bus_id);
 	new_bus->priv = dev;
-	new_bus->phy_mask = 0;
+	new_bus->phy_mask = ~(1 << phyaddr);
 	new_bus->parent = &pdata->pdev->dev;
 	ret = mdiobus_register(new_bus);
 	if (ret != 0) {
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_platform.c b/drivers/emac-dwc-eqos/DWC_ETH_QOS_platform.c
index 50d1e55..32b6a57 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_platform.c
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_platform.c
@@ -920,6 +920,16 @@
 		dwc_eth_qos_res_data.is_pinctrl_names = true;
 		EMACDBG("qcom,pinctrl-names present\n");
 	}
+	dwc_eth_qos_res_data.phy_addr = -1;
+	if (of_property_read_bool(pdev->dev.of_node, "emac-phy-addr")) {
+		ret = of_property_read_u32(pdev->dev.of_node, "emac-phy-addr",
+			&dwc_eth_qos_res_data.phy_addr);
+		if (ret) {
+			EMACINFO("Pphy_addr not specified, using dynamic phy detection\n");
+			dwc_eth_qos_res_data.phy_addr = -1;
+		}
+		EMACINFO("phy_addr = %d\n", dwc_eth_qos_res_data.phy_addr);
+	}
 
 	return ret;
 
@@ -1491,6 +1501,8 @@
 
 		gpio_set_value(dwc_eth_qos_res_data.gpio_phy_reset, PHY_RESET_GPIO_HIGH);
 		EMACDBG("PHY is out of reset successfully\n");
+		/* Add delay of 50ms so that phy should get sufficient time*/
+		mdelay(50);
 	}
 
 	return ret;
@@ -2111,6 +2123,11 @@
 			goto err_out_dev_failed;
 	}
 	EMACDBG("<-- DWC_ETH_QOS_probe\n");
+
+#if defined DWC_ETH_QOS_BUILTIN && defined CONFIG_MSM_BOOT_TIME_MARKER
+	place_marker("M - Ethernet probe end");
+#endif
+
 	return ret;
 
  err_out_dev_failed:
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_yheader.h b/drivers/emac-dwc-eqos/DWC_ETH_QOS_yheader.h
index ee29121..b0a1210 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_yheader.h
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_yheader.h
@@ -1584,6 +1584,7 @@
 	unsigned int emac_hw_version_type;
 	bool early_eth_en;
 	bool pps_lpass_conn_en;
+	int phy_addr;
 };
 
 struct DWC_ETH_QOS_prv_ipa_data {
@@ -1878,6 +1879,7 @@
 	struct class* avb_class_b_class;
 	struct delayed_work ipv6_addr_assign_wq;
 	bool print_kpi;
+	bool wol_enabled;
 };
 
 struct ip_params {
diff --git a/drivers/rmnet/shs/rmnet_shs.h b/drivers/rmnet/shs/rmnet_shs.h
index b7bf773..99ca7e4 100644
--- a/drivers/rmnet/shs/rmnet_shs.h
+++ b/drivers/rmnet/shs/rmnet_shs.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -156,6 +156,8 @@
 	RMNET_SHS_SWITCH_WQ_RATE,
 	RMNET_SHS_OOO_PACKET_SWITCH,
 	RMNET_SHS_OOO_PACKET_TOTAL,
+	RMNET_SHS_SWITCH_PACKET_BURST,
+	RMNET_SHS_SWITCH_CORE_BACKLOG,
 	RMNET_SHS_SWITCH_MAX_REASON
 };
 
@@ -195,6 +197,7 @@
 	u32 qtail;
 	u32 qdiff;
 	u32 parkedlen;
+	u32 seg;
 	u8 prio;
 	u8 wqprio;
 };
diff --git a/drivers/rmnet/shs/rmnet_shs_config.h b/drivers/rmnet/shs/rmnet_shs_config.h
index dc385e4..e55f5f8 100644
--- a/drivers/rmnet/shs/rmnet_shs_config.h
+++ b/drivers/rmnet/shs/rmnet_shs_config.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -46,6 +46,7 @@
 	RMNET_SHS_WQ_INVALID_PTR_ERR,
 	RMNET_SHS_WQ_NODE_MALLOC_ERR,
 	RMNET_SHS_WQ_NL_SOCKET_ERR,
+	RMNET_SHS_CPU_FLOWS_BNDS_ERR,
 	RMNET_SHS_CRIT_ERR_MAX
 };
 
diff --git a/drivers/rmnet/shs/rmnet_shs_main.c b/drivers/rmnet/shs/rmnet_shs_main.c
index 2accd29..bb2f175 100755
--- a/drivers/rmnet/shs/rmnet_shs_main.c
+++ b/drivers/rmnet/shs/rmnet_shs_main.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -30,6 +30,8 @@
 #define NS_IN_MS 1000000
 #define LPWR_CLUSTER 0
 #define PERF_CLUSTER 4
+#define DEF_CORE_WAIT 10
+
 #define PERF_CORES 4
 
 #define INVALID_CPU -1
@@ -42,6 +44,8 @@
 #define GET_CTIMER(CPU) rmnet_shs_cfg.core_flush[CPU].core_timer
 
 #define SKB_FLUSH 0
+#define INCREMENT 1
+#define DECREMENT 0
 /* Local Definitions and Declarations */
 DEFINE_SPINLOCK(rmnet_shs_ht_splock);
 DEFINE_HASHTABLE(RMNET_SHS_HT, RMNET_SHS_HT_SIZE);
@@ -65,15 +69,15 @@
 module_param_array(rmnet_shs_flush_reason, ulong, 0, 0444);
 MODULE_PARM_DESC(rmnet_shs_flush_reason, "rmnet shs skb flush trigger type");
 
-unsigned int rmnet_shs_byte_store_limit __read_mostly = 271800 * 8;
+unsigned int rmnet_shs_byte_store_limit __read_mostly = 271800 * 80;
 module_param(rmnet_shs_byte_store_limit, uint, 0644);
 MODULE_PARM_DESC(rmnet_shs_byte_store_limit, "Maximum byte module will park");
 
-unsigned int rmnet_shs_pkts_store_limit __read_mostly = 2100;
+unsigned int rmnet_shs_pkts_store_limit __read_mostly = 2100 * 8;
 module_param(rmnet_shs_pkts_store_limit, uint, 0644);
 MODULE_PARM_DESC(rmnet_shs_pkts_store_limit, "Maximum pkts module will park");
 
-unsigned int rmnet_shs_max_core_wait __read_mostly = 10;
+unsigned int rmnet_shs_max_core_wait __read_mostly = 45;
 module_param(rmnet_shs_max_core_wait, uint, 0644);
 MODULE_PARM_DESC(rmnet_shs_max_core_wait,
 		 "Max wait module will wait during move to perf core in ms");
@@ -93,6 +97,11 @@
 MODULE_PARM_DESC(rmnet_shs_fall_back_timer,
 		 "Option to enable fall back limit for parking");
 
+unsigned int rmnet_shs_backlog_max_pkts __read_mostly = 1200;
+module_param(rmnet_shs_backlog_max_pkts, uint, 0644);
+MODULE_PARM_DESC(rmnet_shs_backlog_max_pkts,
+		 "Max pkts in backlog prioritizing");
+
 unsigned int rmnet_shs_inst_rate_max_pkts __read_mostly = 2500;
 module_param(rmnet_shs_inst_rate_max_pkts, uint, 0644);
 MODULE_PARM_DESC(rmnet_shs_inst_rate_max_pkts,
@@ -110,17 +119,29 @@
 module_param_array(rmnet_shs_cpu_max_qdiff, uint, 0, 0644);
 MODULE_PARM_DESC(rmnet_shs_cpu_max_qdiff, "Max queue length seen of each core");
 
+unsigned int rmnet_shs_cpu_ooo_count[MAX_CPUS];
+module_param_array(rmnet_shs_cpu_ooo_count, uint, 0, 0644);
+MODULE_PARM_DESC(rmnet_shs_cpu_ooo_count, "OOO count for each cpu");
+
 unsigned int rmnet_shs_cpu_max_coresum[MAX_CPUS];
 module_param_array(rmnet_shs_cpu_max_coresum, uint, 0, 0644);
 MODULE_PARM_DESC(rmnet_shs_cpu_max_coresum, "Max coresum seen of each core");
 
+static void rmnet_shs_change_cpu_num_flows(u16 map_cpu, bool inc)
+{
+	if (map_cpu < MAX_CPUS)
+		(inc) ? cpu_num_flows[map_cpu]++: cpu_num_flows[map_cpu]--;
+	else
+		rmnet_shs_crit_err[RMNET_SHS_CPU_FLOWS_BNDS_ERR]++;
+}
+
 void rmnet_shs_cpu_node_remove(struct rmnet_shs_skbn_s *node)
 {
 	SHS_TRACE_LOW(RMNET_SHS_CPU_NODE, RMNET_SHS_CPU_NODE_FUNC_REMOVE,
 			    0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
 
 	list_del_init(&node->node_id);
-	cpu_num_flows[node->map_cpu]--;
+	rmnet_shs_change_cpu_num_flows(node->map_cpu, DECREMENT);
 
 }
 
@@ -131,7 +152,7 @@
 			    0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
 
 	list_add(&node->node_id, hd);
-	cpu_num_flows[node->map_cpu]++;
+	rmnet_shs_change_cpu_num_flows(node->map_cpu, INCREMENT);
 }
 
 void rmnet_shs_cpu_node_move(struct rmnet_shs_skbn_s *node,
@@ -141,10 +162,17 @@
 			    0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
 
 	list_move(&node->node_id, hd);
-	cpu_num_flows[node->map_cpu]++;
-	cpu_num_flows[oldcpu]--;
+	rmnet_shs_change_cpu_num_flows(node->map_cpu, INCREMENT);
+	rmnet_shs_change_cpu_num_flows((u16) oldcpu, DECREMENT);
 }
 
+static void rmnet_shs_cpu_ooo(u8 cpu, int count)
+{
+	if (cpu < MAX_CPUS)
+	{
+		rmnet_shs_cpu_ooo_count[cpu]+=count;
+	}
+}
 /* Evaluates the incoming transport protocol of the incoming skb. Determines
  * if the skb transport protocol will be supported by SHS module
  */
@@ -282,14 +310,6 @@
 
 }
 
-static int rmnet_shs_is_core_loaded(int cpu)
-{
-
-	return rmnet_shs_cfg.core_flush[cpu].coresum >=
-		rmnet_shs_inst_rate_max_pkts;
-
-}
-
 /* We deliver packets to GRO module only for TCP traffic*/
 static int rmnet_shs_check_skb_can_gro(struct sk_buff *skb)
 {
@@ -423,18 +443,9 @@
 	return ret;
 }
 
-int rmnet_shs_is_lpwr_cpu(u16 cpu)
+inline int rmnet_shs_is_lpwr_cpu(u16 cpu)
 {
-	int ret = 1;
-	u32 big_cluster_mask = (1 << PERF_CLUSTER) - 1;
-
-	if ((1 << cpu) >= big_cluster_mask)
-		ret = 0;
-
-	SHS_TRACE_LOW(RMNET_SHS_CORE_CFG,
-			    RMNET_SHS_CORE_CFG_CHK_LO_CPU,
-			    ret, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
-	return ret;
+	return !((1 << cpu) & PERF_MASK);
 }
 
 /* Forms a new hash from the incoming hash based on the number of cores
@@ -665,6 +676,22 @@
 
 	return ret;
 }
+
+static int rmnet_shs_is_core_loaded(int cpu, int backlog_check, int parked_pkts)
+{
+	int ret = 0;
+
+	if (rmnet_shs_cfg.core_flush[cpu].coresum >=
+            rmnet_shs_inst_rate_max_pkts) {
+		ret = RMNET_SHS_SWITCH_PACKET_BURST;
+	}
+	if (backlog_check && ((rmnet_shs_get_cpu_qdiff(cpu) + parked_pkts) >=
+	    rmnet_shs_backlog_max_pkts))
+		ret = RMNET_SHS_SWITCH_CORE_BACKLOG;
+
+	return ret;
+}
+
 /* Takes a snapshot of absolute value of the CPU Qhead and Qtail counts for
  * a given core.
  *
@@ -784,6 +811,7 @@
 					rmnet_shs_switch_reason[RMNET_SHS_OOO_PACKET_TOTAL] +=
 							(node_qhead -
 							cur_cpu_qhead);
+					rmnet_shs_cpu_ooo(cpu_num, node_qhead - cur_cpu_qhead);
 				}
 				/* Mark gold core as prio to prevent
 				 * flows from moving in wq
@@ -866,6 +894,8 @@
 	rmnet_shs_cfg.num_bytes_parked -= total_bytes_flush;
 	rmnet_shs_cfg.num_pkts_parked -= total_pkts_flush;
 	rmnet_shs_cpu_node_tbl[cpu_num].prio = 0;
+	/* Reset coresum in case of instant rate switch */
+	rmnet_shs_cfg.core_flush[cpu_num].coresum = 0;
 	rmnet_shs_cpu_node_tbl[cpu_num].parkedlen = 0;
 	spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags);
 	local_bh_enable();
@@ -1043,6 +1073,35 @@
 			     node, NULL);
 	return ret_val;
 }
+
+/* Check if cpu_num should be marked as a priority core and  take care of
+ * marking it as priority and configuring  all the changes need for a core
+ * switch.
+ */
+static void rmnet_shs_core_prio_check(u8 cpu_num, u8 segmented, u32 parked_pkts)
+{
+	u32 wait = (!rmnet_shs_max_core_wait) ? 1 : rmnet_shs_max_core_wait;
+	int load_reason;
+
+	if ((load_reason = rmnet_shs_is_core_loaded(cpu_num, segmented, parked_pkts)) &&
+	    rmnet_shs_is_lpwr_cpu(cpu_num) &&
+	    !rmnet_shs_cpu_node_tbl[cpu_num].prio) {
+
+
+		wait = (!segmented)? DEF_CORE_WAIT: wait;
+		rmnet_shs_cpu_node_tbl[cpu_num].prio = 1;
+		rmnet_shs_boost_cpus();
+		if (hrtimer_active(&GET_CTIMER(cpu_num)))
+			hrtimer_cancel(&GET_CTIMER(cpu_num));
+
+		hrtimer_start(&GET_CTIMER(cpu_num),
+				ns_to_ktime(wait * NS_IN_MS),
+				HRTIMER_MODE_REL);
+
+		rmnet_shs_switch_reason[load_reason]++;
+	}
+}
+
 /* Flushes all the packets that have been parked so far across all the flows
  * The order of flushing depends on the CPU<=>flow association
  * The flows associated with low power cores are flushed before flushing
@@ -1063,13 +1122,13 @@
 	u32 cpu_tail;
 	u32 num_pkts_flush = 0;
 	u32 num_bytes_flush = 0;
+	u32 skb_seg_pending = 0;
 	u32 total_pkts_flush = 0;
 	u32 total_bytes_flush = 0;
 	u32 total_cpu_gro_flushed = 0;
 	u32 total_node_gro_flushed = 0;
-
 	u8 is_flushed = 0;
-	u32 wait = (!rmnet_shs_max_core_wait) ? 1 : rmnet_shs_max_core_wait;
+	u8 cpu_segment = 0;
 
 	/* Record a qtail + pkts flushed or move if reqd
 	 * currently only use qtail for non TCP flows
@@ -1083,10 +1142,22 @@
 	for (cpu_num = 0; cpu_num < MAX_CPUS; cpu_num++) {
 
 		cpu_tail = rmnet_shs_get_cpu_qtail(cpu_num);
-
+		cpu_segment = 0;
 		total_cpu_gro_flushed = 0;
+		skb_seg_pending = 0;
 		list_for_each_safe(ptr, next,
-			   &rmnet_shs_cpu_node_tbl[cpu_num].node_list_id) {
+				   &rmnet_shs_cpu_node_tbl[cpu_num].node_list_id) {
+			n = list_entry(ptr, struct rmnet_shs_skbn_s, node_id);
+			skb_seg_pending += n->skb_list.skb_load;
+		}
+		if (rmnet_shs_inst_rate_switch) {
+			cpu_segment = rmnet_shs_cpu_node_tbl[cpu_num].seg;
+			rmnet_shs_core_prio_check(cpu_num, cpu_segment,
+						  skb_seg_pending);
+		}
+
+		list_for_each_safe(ptr, next,
+				   &rmnet_shs_cpu_node_tbl[cpu_num].node_list_id) {
 			n = list_entry(ptr, struct rmnet_shs_skbn_s, node_id);
 
 			if (n != NULL && n->skb_list.num_parked_skbs) {
@@ -1111,31 +1182,21 @@
 					}
 				}
 			}
+
 		}
 
 		/* If core is loaded set core flows as priority and
 		 * start a 10ms hard flush timer
 		 */
 		if (rmnet_shs_inst_rate_switch) {
+			/* Update cpu load with prev flush for check */
 			if (rmnet_shs_is_lpwr_cpu(cpu_num) &&
 			    !rmnet_shs_cpu_node_tbl[cpu_num].prio)
 				rmnet_shs_update_core_load(cpu_num,
 				total_cpu_gro_flushed);
 
-			if (rmnet_shs_is_core_loaded(cpu_num) &&
-			    rmnet_shs_is_lpwr_cpu(cpu_num) &&
-			    !rmnet_shs_cpu_node_tbl[cpu_num].prio) {
+			rmnet_shs_core_prio_check(cpu_num, cpu_segment, 0);
 
-				rmnet_shs_cpu_node_tbl[cpu_num].prio = 1;
-				rmnet_shs_boost_cpus();
-				if (hrtimer_active(&GET_CTIMER(cpu_num)))
-					hrtimer_cancel(&GET_CTIMER(cpu_num));
-
-				hrtimer_start(&GET_CTIMER(cpu_num),
-					      ns_to_ktime(wait * NS_IN_MS),
-					      HRTIMER_MODE_REL);
-
-			}
 		}
 
 		if (rmnet_shs_cpu_node_tbl[cpu_num].parkedlen < 0)
@@ -1178,6 +1239,21 @@
 	spin_lock_irqsave(&rmnet_shs_ht_splock, ht_flags);
 
 	rmnet_shs_flush_lock_table(flsh, ctxt);
+	if (ctxt == RMNET_WQ_CTXT) {
+		/* If packets remain restart the timer in case there are no
+		* more NET_RX flushes coming so pkts are no lost
+		*/
+		if (rmnet_shs_fall_back_timer &&
+		rmnet_shs_cfg.num_bytes_parked &&
+		rmnet_shs_cfg.num_pkts_parked){
+			if (hrtimer_active(&rmnet_shs_cfg.hrtimer_shs))
+				hrtimer_cancel(&rmnet_shs_cfg.hrtimer_shs);
+			hrtimer_start(&rmnet_shs_cfg.hrtimer_shs,
+				ns_to_ktime(rmnet_shs_timeout * NS_IN_MS),
+				HRTIMER_MODE_REL);
+		}
+		rmnet_shs_flush_reason[RMNET_SHS_FLUSH_WQ_FB_FLUSH]++;
+	}
 
 	spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags);
 
@@ -1262,21 +1338,6 @@
 		local_bh_disable();
 		rmnet_shs_flush_table(is_force_flush,
 				      RMNET_WQ_CTXT);
-
-		/* If packets remain restart the timer in case there are no
-		 * more NET_RX flushes coming so pkts are no lost
-		 */
-		if (rmnet_shs_fall_back_timer &&
-		    rmnet_shs_cfg.num_bytes_parked &&
-		    rmnet_shs_cfg.num_pkts_parked){
-			if (hrtimer_active(&rmnet_shs_cfg.hrtimer_shs))
-				hrtimer_cancel(&rmnet_shs_cfg.hrtimer_shs);
-
-			hrtimer_start(&rmnet_shs_cfg.hrtimer_shs,
-				      ns_to_ktime(rmnet_shs_timeout * NS_IN_MS),
-				      HRTIMER_MODE_REL);
-		}
-		rmnet_shs_flush_reason[RMNET_SHS_FLUSH_WQ_FB_FLUSH]++;
 		local_bh_enable();
 	}
 	SHS_TRACE_HIGH(RMNET_SHS_FLUSH,
@@ -1649,9 +1710,9 @@
 		break;
 
 	} while (0);
-	spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags);
 
 	if (!is_shs_reqd) {
+		spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags);
 		rmnet_shs_crit_err[RMNET_SHS_MAIN_SHS_NOT_REQD]++;
 		rmnet_shs_deliver_skb(skb);
 		SHS_TRACE_ERR(RMNET_SHS_ASSIGN,
@@ -1683,6 +1744,7 @@
 				    RMNET_SHS_FORCE_FLUSH_TIME_NSEC,
 				    0xDEF, 0xDEF, 0xDEF, skb, NULL);
 	}
+	spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags);
 
 	if (rmnet_shs_cfg.num_pkts_parked >
 						rmnet_shs_pkts_store_limit) {
diff --git a/drivers/rmnet/shs/rmnet_shs_wq.c b/drivers/rmnet/shs/rmnet_shs_wq.c
index 4c69b57..53f5826 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq.c
+++ b/drivers/rmnet/shs/rmnet_shs_wq.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -28,25 +28,23 @@
 #define RMNET_SHS_NSEC_TO_SEC(x) ((x)/1000000000)
 #define RMNET_SHS_BYTE_TO_BIT(x) ((x)*8)
 #define RMNET_SHS_MIN_HSTAT_NODES_REQD 16
+#define RMNET_SHS_FILTER_PKT_LIMIT 200
+#define RMNET_SHS_FILTER_FLOW_RATE 100
 
 #define PERIODIC_CLEAN 0
-/* FORCE_CLEAN should only used during module de-ini.*/
+/* FORCE_CLEAN should only used during module de-init.*/
 #define FORCE_CLEAN 1
-/* Time to wait (in time ticks) before re-triggering the workqueue
- *	1   tick  = 10 ms (Maximum possible resolution)
- *	100 ticks = 1 second
- */
 
 /* Local Definitions and Declarations */
 unsigned int rmnet_shs_cpu_prio_dur __read_mostly = 3;
 module_param(rmnet_shs_cpu_prio_dur, uint, 0644);
-MODULE_PARM_DESC(rmnet_shs_cpu_prio_dur, "Priority ignore duration(ticks)");
+MODULE_PARM_DESC(rmnet_shs_cpu_prio_dur, "Priority ignore duration (wq intervals)");
 
 #define PRIO_BACKOFF ((!rmnet_shs_cpu_prio_dur) ? 2 : rmnet_shs_cpu_prio_dur)
 
-unsigned int rmnet_shs_wq_frequency __read_mostly = RMNET_SHS_WQ_DELAY_TICKS;
-module_param(rmnet_shs_wq_frequency, uint, 0644);
-MODULE_PARM_DESC(rmnet_shs_wq_frequency, "Priodicity of Wq trigger(in ticks)");
+unsigned int rmnet_shs_wq_interval_ms __read_mostly = RMNET_SHS_WQ_INTERVAL_MS;
+module_param(rmnet_shs_wq_interval_ms, uint, 0644);
+MODULE_PARM_DESC(rmnet_shs_wq_interval_ms, "Interval between wq runs (ms)");
 
 unsigned long rmnet_shs_max_flow_inactivity_sec __read_mostly =
 						RMNET_SHS_MAX_SKB_INACTIVE_TSEC;
@@ -88,6 +86,10 @@
 module_param_array(rmnet_shs_cpu_rx_flows, uint, 0, 0444);
 MODULE_PARM_DESC(rmnet_shs_cpu_rx_flows, "Num flows processed per core");
 
+unsigned int rmnet_shs_cpu_rx_filter_flows[MAX_CPUS];
+module_param_array(rmnet_shs_cpu_rx_filter_flows, uint, 0, 0444);
+MODULE_PARM_DESC(rmnet_shs_cpu_rx_filter_flows, "Num filtered flows per core");
+
 unsigned long long rmnet_shs_cpu_rx_bytes[MAX_CPUS];
 module_param_array(rmnet_shs_cpu_rx_bytes, ullong, 0, 0444);
 MODULE_PARM_DESC(rmnet_shs_cpu_rx_bytes, "SHS stamp bytes per CPU");
@@ -177,8 +179,7 @@
 static struct list_head rmnet_shs_wq_hstat_tbl =
 				LIST_HEAD_INIT(rmnet_shs_wq_hstat_tbl);
 static int rmnet_shs_flow_dbg_stats_idx_cnt;
-static struct list_head rmnet_shs_wq_ep_tbl =
-				LIST_HEAD_INIT(rmnet_shs_wq_ep_tbl);
+struct list_head rmnet_shs_wq_ep_tbl = LIST_HEAD_INIT(rmnet_shs_wq_ep_tbl);
 
 /* Helper functions to add and remove entries to the table
  * that maintains a list of all endpoints (vnd's) available on this device.
@@ -538,6 +539,17 @@
 			hstat_p->rps_config_msk = ep->rps_config_msk;
 			hstat_p->def_core_msk = ep->default_core_msk;
 			hstat_p->pri_core_msk = ep->pri_core_msk;
+
+			/* Update ep tput stats while we're here */
+			if (hstat_p->skb_tport_proto == IPPROTO_TCP) {
+				rm_err("SHS_UDP: adding TCP bps %lu to ep_total %lu ep name %s",
+				       hstat_p->rx_bps, ep->tcp_rx_bps, node_p->dev->name);
+				ep->tcp_rx_bps += hstat_p->rx_bps;
+			} else if (hstat_p->skb_tport_proto == IPPROTO_UDP) {
+				rm_err("SHS_UDP: adding UDP rx_bps %lu to ep_total %lu ep name %s",
+				       hstat_p->rx_bps, ep->udp_rx_bps, node_p->dev->name);
+				ep->udp_rx_bps += hstat_p->rx_bps;
+			}
 			break;
 		}
 	}
@@ -1234,6 +1246,7 @@
 int rmnet_shs_wq_try_to_move_flow(u16 cur_cpu, u16 dest_cpu, u32 hash_to_move,
 				  u32 sugg_type)
 {
+	unsigned long flags;
 	struct rmnet_shs_wq_ep_s *ep;
 
 	if (cur_cpu >= MAX_CPUS || dest_cpu >= MAX_CPUS) {
@@ -1245,6 +1258,7 @@
 	 * on it if is online, rps mask, isolation, etc. then make
 	 * suggestion to change the cpu for the flow by passing its hash
 	 */
+	spin_lock_irqsave(&rmnet_shs_ep_lock, flags);
 	list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
 		if (!ep)
 			continue;
@@ -1266,9 +1280,13 @@
 			rm_err("SHS_FDESC: >> flow 0x%x was suggested to"
 			       " move from cpu[%d] to cpu[%d] sugg_type [%d]",
 			       hash_to_move, cur_cpu, dest_cpu, sugg_type);
+
+			spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);
 			return 1;
 		}
 	}
+
+	spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);
 	return 0;
 }
 
@@ -1277,8 +1295,10 @@
 {
 	struct rmnet_shs_skbn_s *node_p;
 	struct rmnet_shs_wq_hstat_s *hstat_p;
+	unsigned long ht_flags;
 	u16 bkt;
 
+	spin_lock_irqsave(&rmnet_shs_ht_splock, ht_flags);
 	hash_for_each(RMNET_SHS_HT, bkt, node_p, list) {
 		if (!node_p)
 			continue;
@@ -1300,8 +1320,10 @@
 				0xDEF, 0xDEF, hstat_p, NULL);
 
 		node_p->hstats->segment_enable = seg_enable;
+		spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags);
 		return 1;
 	}
+	spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags);
 
 	rm_err("SHS_HT: >> segmentation on hash 0x%x enable %u not set - hash not found",
 	       hash_to_set, seg_enable);
@@ -1446,6 +1468,7 @@
 	rmnet_shs_wq_mem_update_cached_cpu_caps(cpu_caps);
 	rmnet_shs_wq_mem_update_cached_sorted_gold_flows(gold_flows);
 	rmnet_shs_wq_mem_update_cached_sorted_ss_flows(ss_flows);
+	rmnet_shs_wq_mem_update_cached_netdevs();
 
 	rmnet_shs_genl_send_int_to_userspace_no_info(RMNET_SHS_SYNC_RESP_INT);
 
@@ -1608,12 +1631,14 @@
 	int cpu_assigned = -1;
 	u8 is_match_found = 0;
 	struct rmnet_shs_wq_ep_s *ep = NULL;
+	unsigned long flags;
 
 	if (!dev) {
 		rmnet_shs_crit_err[RMNET_SHS_NETDEV_ERR]++;
 		return cpu_assigned;
 	}
 
+	spin_lock_irqsave(&rmnet_shs_ep_lock, flags);
 	list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
 		if (!ep)
 			continue;
@@ -1629,6 +1654,7 @@
 
 	if (!is_match_found) {
 		rmnet_shs_crit_err[RMNET_SHS_WQ_EP_ACCESS_ERR]++;
+		spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);
 		return cpu_assigned;
 	}
 
@@ -1646,6 +1672,7 @@
 	/* Increment CPU assignment idx to be ready for next flow assignment*/
 	if ((cpu_assigned >= 0) || ((ep->new_lo_idx + 1) >= ep->new_lo_max))
 		ep->new_lo_idx = ((ep->new_lo_idx + 1) % ep->new_lo_max);
+	spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);
 
 	return cpu_assigned;
 }
@@ -1657,12 +1684,14 @@
 	u8 hi_idx;
 	u8 hi_max;
 	u8 is_match_found = 0;
+	unsigned long flags;
 
 	if (!dev) {
 		rmnet_shs_crit_err[RMNET_SHS_NETDEV_ERR]++;
 		return cpu_assigned;
 	}
 
+	spin_lock_irqsave(&rmnet_shs_ep_lock, flags);
 	list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
 		if (!ep)
 			continue;
@@ -1678,6 +1707,7 @@
 
 	if (!is_match_found) {
 		rmnet_shs_crit_err[RMNET_SHS_WQ_EP_ACCESS_ERR]++;
+		spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);
 		return cpu_assigned;
 	}
 
@@ -1694,6 +1724,7 @@
 	/* Increment CPU assignment idx to be ready for next flow assignment*/
 	if (cpu_assigned >= 0)
 		ep->new_hi_idx = ((hi_idx + 1) % hi_max);
+	spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);
 
 	return cpu_assigned;
 }
@@ -1868,6 +1899,11 @@
 		if (!ep->is_ep_active)
 			continue;
 		rmnet_shs_wq_update_ep_rps_msk(ep);
+
+		/* These tput totals get re-added as we go through each flow */
+		ep->udp_rx_bps = 0;
+		ep->tcp_rx_bps = 0;
+
 	}
 }
 
@@ -1875,19 +1911,65 @@
 {
 	/* Start with most avaible mask all eps could share*/
 	u8 mask = UPDATE_MASK;
+	u8 rps_enabled = 0;
 	struct rmnet_shs_wq_ep_s *ep;
 
 	list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
 
 		if (!ep->is_ep_active)
 			continue;
-		/* Bitwise and to get common mask  VNDs with different mask
-		 * will have UNDEFINED behavior
+		/* Bitwise and to get common mask from non-null masks.
+		 * VNDs with different mask  will have UNDEFINED behavior
 		 */
-		mask &= ep->rps_config_msk;
+		if (ep->rps_config_msk) {
+			mask &= ep->rps_config_msk;
+			rps_enabled = 1;
+		}
 	}
-	rmnet_shs_cfg.map_mask = mask;
-	rmnet_shs_cfg.map_len = rmnet_shs_get_mask_len(mask);
+
+	if (!rps_enabled) {
+		rmnet_shs_cfg.map_mask = 0;
+		rmnet_shs_cfg.map_len = 0;
+		return;
+        } else if (rmnet_shs_cfg.map_mask != mask) {
+		rmnet_shs_cfg.map_mask = mask;
+		rmnet_shs_cfg.map_len = rmnet_shs_get_mask_len(mask);
+	}
+}
+
+void rmnet_shs_wq_filter(void)
+{
+	int cpu, cur_cpu;
+	int temp;
+	struct rmnet_shs_wq_hstat_s *hnode = NULL;
+
+	for (cpu = 0; cpu < MAX_CPUS; cpu++) {
+		rmnet_shs_cpu_rx_filter_flows[cpu] = 0;
+		rmnet_shs_cpu_node_tbl[cpu].seg = 0;
+	}
+
+	/* Filter out flows with low pkt count and
+	 * mark CPUS with slowstart flows
+	 */
+	list_for_each_entry(hnode, &rmnet_shs_wq_hstat_tbl, hstat_node_id) {
+
+		if (hnode->in_use == 0)
+			continue;
+		if (hnode->avg_pps > RMNET_SHS_FILTER_FLOW_RATE &&
+		    hnode->rx_skb > RMNET_SHS_FILTER_PKT_LIMIT)
+			if (hnode->current_cpu < MAX_CPUS){
+				temp = hnode->current_cpu;
+				rmnet_shs_cpu_rx_filter_flows[temp]++;
+			}
+		cur_cpu = hnode->current_cpu;
+		if (cur_cpu >= MAX_CPUS) {
+			continue;
+		}
+
+		if (hnode->node->hstats->segment_enable) {
+			rmnet_shs_cpu_node_tbl[cur_cpu].seg++;
+		}
+	}
 }
 
 void rmnet_shs_wq_update_stats(void)
@@ -1941,14 +2023,13 @@
 	}
 
 	rmnet_shs_wq_refresh_new_flow_list();
-	/*Invoke after both the locks are released*/
-	rmnet_shs_wq_cleanup_hash_tbl(PERIODIC_CLEAN);
-	rmnet_shs_wq_debug_print_flows();
+	rmnet_shs_wq_filter();
 }
 
 void rmnet_shs_wq_process_wq(struct work_struct *work)
 {
 	unsigned long flags;
+	unsigned long jiffies;
 
 	trace_rmnet_shs_wq_high(RMNET_SHS_WQ_PROCESS_WQ,
 				RMNET_SHS_WQ_PROCESS_WQ_START,
@@ -1958,8 +2039,14 @@
 	rmnet_shs_wq_update_stats();
 	spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);
 
+        /*Invoke after both the locks are released*/
+        rmnet_shs_wq_cleanup_hash_tbl(PERIODIC_CLEAN);
+        rmnet_shs_wq_debug_print_flows();
+
+	jiffies = msecs_to_jiffies(rmnet_shs_wq_interval_ms);
+
 	queue_delayed_work(rmnet_shs_wq, &rmnet_shs_delayed_wq->wq,
-					rmnet_shs_wq_frequency);
+			   jiffies);
 
 	trace_rmnet_shs_wq_high(RMNET_SHS_WQ_PROCESS_WQ,
 				RMNET_SHS_WQ_PROCESS_WQ_END,
@@ -1993,6 +2080,7 @@
 		return;
 
 	rmnet_shs_wq_mem_deinit();
+	rmnet_shs_genl_send_int_to_userspace_no_info(RMNET_SHS_SYNC_WQ_EXIT);
 
 	trace_rmnet_shs_wq_high(RMNET_SHS_WQ_EXIT, RMNET_SHS_WQ_EXIT_START,
 				   0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
@@ -2031,8 +2119,13 @@
 
 void rmnet_shs_wq_pause(void)
 {
+	int cpu;
+
 	if (rmnet_shs_wq && rmnet_shs_delayed_wq)
 		cancel_delayed_work_sync(&rmnet_shs_delayed_wq->wq);
+
+	for (cpu = 0; cpu < MAX_CPUS; cpu++)
+		rmnet_shs_cpu_rx_filter_flows[cpu] = 0;
 }
 
 void rmnet_shs_wq_restart(void)
diff --git a/drivers/rmnet/shs/rmnet_shs_wq.h b/drivers/rmnet/shs/rmnet_shs_wq.h
index 0d86200..aa0265c 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq.h
+++ b/drivers/rmnet/shs/rmnet_shs_wq.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -31,14 +31,18 @@
 #define RMNET_SHS_NSEC_TO_SEC(x) ((x)/1000000000)
 #define RMNET_SHS_BYTE_TO_BIT(x) ((x)*8)
 #define RMNET_SHS_MIN_HSTAT_NODES_REQD 16
-#define RMNET_SHS_WQ_DELAY_TICKS  10
+#define RMNET_SHS_WQ_INTERVAL_MS  100
 
 extern unsigned long long rmnet_shs_cpu_rx_max_pps_thresh[MAX_CPUS]__read_mostly;
 extern unsigned long long rmnet_shs_cpu_rx_min_pps_thresh[MAX_CPUS]__read_mostly;
 
+extern struct list_head rmnet_shs_wq_ep_tbl;
+
 /* stores wq and end point details */
 
 struct rmnet_shs_wq_ep_s {
+	u64 tcp_rx_bps;
+	u64 udp_rx_bps;
 	struct list_head ep_list_id;
 	struct net_device *ep;
 	int  new_lo_core[MAX_CPUS];
@@ -161,6 +165,7 @@
 	struct list_head cpu_cap_list;
 	u64 pps_capacity;
 	u64 avg_pps_capacity;
+	u64 bps;
 	u16 cpu_num;
 };
 
diff --git a/drivers/rmnet/shs/rmnet_shs_wq_genl.c b/drivers/rmnet/shs/rmnet_shs_wq_genl.c
index b28f0c2..2dff48a 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq_genl.c
+++ b/drivers/rmnet/shs/rmnet_shs_wq_genl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -349,6 +349,8 @@
 {
 	int ret;
 
+	rmnet_shs_genl_send_int_to_userspace_no_info(RMNET_SHS_SYNC_WQ_EXIT);
+
 	ret = genl_unregister_family(&rmnet_shs_genl_family);
 	if(ret != 0){
 		rm_err("SHS_GNL: unregister family failed: %i\n",ret);
diff --git a/drivers/rmnet/shs/rmnet_shs_wq_genl.h b/drivers/rmnet/shs/rmnet_shs_wq_genl.h
index 333de48..9901d38 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq_genl.h
+++ b/drivers/rmnet/shs/rmnet_shs_wq_genl.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -23,6 +23,7 @@
 #define RMNET_SHS_GENL_VERSION 1
 #define RMNET_SHS_GENL_FAMILY_NAME "RMNET_SHS"
 #define RMNET_SHS_SYNC_RESP_INT 828 /* Any number, sent after mem update */
+#define RMNET_SHS_SYNC_WQ_EXIT  42
 
 extern int rmnet_shs_userspace_connected;
 
diff --git a/drivers/rmnet/shs/rmnet_shs_wq_mem.c b/drivers/rmnet/shs/rmnet_shs_wq_mem.c
index 1675517..062edb7 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq_mem.c
+++ b/drivers/rmnet/shs/rmnet_shs_wq_mem.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -15,6 +15,7 @@
 
 #include "rmnet_shs_wq_mem.h"
 #include <linux/proc_fs.h>
+#include <linux/refcount.h>
 
 MODULE_LICENSE("GPL v2");
 
@@ -24,6 +25,7 @@
 struct rmnet_shs_wq_cpu_cap_usr_s rmnet_shs_wq_cap_list_usr[MAX_CPUS];
 struct rmnet_shs_wq_gflows_usr_s rmnet_shs_wq_gflows_usr[RMNET_SHS_MAX_USRFLOWS];
 struct rmnet_shs_wq_ssflows_usr_s rmnet_shs_wq_ssflows_usr[RMNET_SHS_MAX_USRFLOWS];
+struct rmnet_shs_wq_netdev_usr_s rmnet_shs_wq_netdev_usr[RMNET_SHS_MAX_NETDEVS];
 
 struct list_head gflows   = LIST_HEAD_INIT(gflows);   /* gold flows */
 struct list_head ssflows  = LIST_HEAD_INIT(ssflows);  /* slow start flows */
@@ -32,6 +34,7 @@
 struct rmnet_shs_mmap_info *cap_shared;
 struct rmnet_shs_mmap_info *gflow_shared;
 struct rmnet_shs_mmap_info *ssflow_shared;
+struct rmnet_shs_mmap_info *netdev_shared;
 
 /* Static Functions and Definitions */
 static void rmnet_shs_vm_open(struct vm_area_struct *vma)
@@ -44,32 +47,163 @@
 	return;
 }
 
-static int rmnet_shs_vm_fault(struct vm_fault *vmf)
+static int rmnet_shs_vm_fault_caps(struct vm_fault *vmf)
 {
 	struct page *page = NULL;
 	struct rmnet_shs_mmap_info *info;
 
 	rmnet_shs_wq_ep_lock_bh();
-	info = (struct rmnet_shs_mmap_info *) vmf->vma->vm_private_data;
-	if (info->data) {
-		page = virt_to_page(info->data);
-		get_page(page);
-		vmf->page = page;
+	if (cap_shared) {
+		info = (struct rmnet_shs_mmap_info *) vmf->vma->vm_private_data;
+		if (info->data) {
+			page = virt_to_page(info->data);
+			get_page(page);
+			vmf->page = page;
+		} else {
+			rmnet_shs_wq_ep_unlock_bh();
+			return VM_FAULT_SIGSEGV;
+		}
+	} else {
+		rmnet_shs_wq_ep_unlock_bh();
+		return VM_FAULT_SIGSEGV;
 	}
 	rmnet_shs_wq_ep_unlock_bh();
 
 	return 0;
 }
 
-static const struct vm_operations_struct rmnet_shs_vm_ops = {
+
+static int rmnet_shs_vm_fault_g_flows(struct vm_fault *vmf)
+{
+	struct page *page = NULL;
+	struct rmnet_shs_mmap_info *info;
+
+	rmnet_shs_wq_ep_lock_bh();
+	if (gflow_shared) {
+		info = (struct rmnet_shs_mmap_info *) vmf->vma->vm_private_data;
+		if (info->data) {
+			page = virt_to_page(info->data);
+			get_page(page);
+			vmf->page = page;
+		} else {
+			rmnet_shs_wq_ep_unlock_bh();
+			return VM_FAULT_SIGSEGV;
+		}
+	} else {
+		rmnet_shs_wq_ep_unlock_bh();
+		return VM_FAULT_SIGSEGV;
+
+	}
+	rmnet_shs_wq_ep_unlock_bh();
+
+	return 0;
+}
+
+static int rmnet_shs_vm_fault_ss_flows(struct vm_fault *vmf)
+{
+	struct page *page = NULL;
+	struct rmnet_shs_mmap_info *info;
+
+	rmnet_shs_wq_ep_lock_bh();
+	if (ssflow_shared) {
+		info = (struct rmnet_shs_mmap_info *) vmf->vma->vm_private_data;
+		if (info->data) {
+			page = virt_to_page(info->data);
+			get_page(page);
+			vmf->page = page;
+		} else {
+			rmnet_shs_wq_ep_unlock_bh();
+			return VM_FAULT_SIGSEGV;
+		}
+	} else {
+		rmnet_shs_wq_ep_unlock_bh();
+		return VM_FAULT_SIGSEGV;
+	}
+	rmnet_shs_wq_ep_unlock_bh();
+
+	return 0;
+}
+
+static int rmnet_shs_vm_fault_netdev(struct vm_fault *vmf)
+{
+	struct page *page = NULL;
+	struct rmnet_shs_mmap_info *info;
+
+	rmnet_shs_wq_ep_lock_bh();
+	if (netdev_shared) {
+		info = (struct rmnet_shs_mmap_info *) vmf->vma->vm_private_data;
+		if (info->data) {
+			page = virt_to_page(info->data);
+			get_page(page);
+			vmf->page = page;
+		} else {
+			rmnet_shs_wq_ep_unlock_bh();
+			return VM_FAULT_SIGSEGV;
+		}
+	} else {
+		rmnet_shs_wq_ep_unlock_bh();
+		return VM_FAULT_SIGSEGV;
+	}
+	rmnet_shs_wq_ep_unlock_bh();
+
+	return 0;
+}
+
+
+static const struct vm_operations_struct rmnet_shs_vm_ops_caps = {
 	.close = rmnet_shs_vm_close,
 	.open = rmnet_shs_vm_open,
-	.fault = rmnet_shs_vm_fault,
+	.fault = rmnet_shs_vm_fault_caps,
 };
 
-static int rmnet_shs_mmap(struct file *filp, struct vm_area_struct *vma)
+static const struct vm_operations_struct rmnet_shs_vm_ops_g_flows = {
+	.close = rmnet_shs_vm_close,
+	.open = rmnet_shs_vm_open,
+	.fault = rmnet_shs_vm_fault_g_flows,
+};
+
+static const struct vm_operations_struct rmnet_shs_vm_ops_ss_flows = {
+	.close = rmnet_shs_vm_close,
+	.open = rmnet_shs_vm_open,
+	.fault = rmnet_shs_vm_fault_ss_flows,
+};
+
+static const struct vm_operations_struct rmnet_shs_vm_ops_netdev = {
+	.close = rmnet_shs_vm_close,
+	.open = rmnet_shs_vm_open,
+	.fault = rmnet_shs_vm_fault_netdev,
+};
+
+static int rmnet_shs_mmap_caps(struct file *filp, struct vm_area_struct *vma)
 {
-	vma->vm_ops = &rmnet_shs_vm_ops;
+	vma->vm_ops = &rmnet_shs_vm_ops_caps;
+	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+	vma->vm_private_data = filp->private_data;
+
+	return 0;
+}
+
+static int rmnet_shs_mmap_g_flows(struct file *filp, struct vm_area_struct *vma)
+{
+	vma->vm_ops = &rmnet_shs_vm_ops_g_flows;
+	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+	vma->vm_private_data = filp->private_data;
+
+	return 0;
+}
+
+static int rmnet_shs_mmap_ss_flows(struct file *filp, struct vm_area_struct *vma)
+{
+	vma->vm_ops = &rmnet_shs_vm_ops_ss_flows;
+	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+	vma->vm_private_data = filp->private_data;
+
+	return 0;
+}
+
+static int rmnet_shs_mmap_netdev(struct file *filp, struct vm_area_struct *vma)
+{
+	vma->vm_ops = &rmnet_shs_vm_ops_netdev;
 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 	vma->vm_private_data = filp->private_data;
 
@@ -95,9 +229,12 @@
 		}
 
 		cap_shared = info;
+		refcount_set(&cap_shared->refcnt, 1);
 		rm_err("SHS_MEM: virt_to_phys = 0x%llx cap_shared = 0x%llx\n",
 		       (unsigned long long)virt_to_phys((void *)info),
 		       (unsigned long long)virt_to_phys((void *)cap_shared));
+	} else {
+		refcount_inc(&cap_shared->refcnt);
 	}
 
 	filp->private_data = cap_shared;
@@ -132,10 +269,14 @@
 		}
 
 		gflow_shared = info;
+		refcount_set(&gflow_shared->refcnt, 1);
 		rm_err("SHS_MEM: virt_to_phys = 0x%llx gflow_shared = 0x%llx\n",
 		       (unsigned long long)virt_to_phys((void *)info),
 		       (unsigned long long)virt_to_phys((void *)gflow_shared));
+	} else {
+		refcount_inc(&gflow_shared->refcnt);
 	}
+
 	filp->private_data = gflow_shared;
 	rmnet_shs_wq_ep_unlock_bh();
 
@@ -166,10 +307,14 @@
 		}
 
 		ssflow_shared = info;
+		refcount_set(&ssflow_shared->refcnt, 1);
 		rm_err("SHS_MEM: virt_to_phys = 0x%llx ssflow_shared = 0x%llx\n",
 		       (unsigned long long)virt_to_phys((void *)info),
 		       (unsigned long long)virt_to_phys((void *)ssflow_shared));
+	} else {
+		refcount_inc(&ssflow_shared->refcnt);
 	}
+
 	filp->private_data = ssflow_shared;
 	rmnet_shs_wq_ep_unlock_bh();
 
@@ -181,40 +326,59 @@
 	return -ENOMEM;
 }
 
-static ssize_t rmnet_shs_read(struct file *filp, char __user *buf, size_t len, loff_t *off)
+static int rmnet_shs_open_netdev(struct inode *inode, struct file *filp)
 {
 	struct rmnet_shs_mmap_info *info;
-	int ret = 0;
 
-	rm_err("%s", "SHS_MEM: rmnet_shs_read - entry\n");
+	rm_err("%s", "SHS_MEM: rmnet_shs_open netdev - entry\n");
 
 	rmnet_shs_wq_ep_lock_bh();
-	info = filp->private_data;
-	ret = min_t(size_t, len, RMNET_SHS_BUFFER_SIZE);
-	if (copy_to_user(buf, info->data, ret))
-		ret = -EFAULT;
+	if (!netdev_shared) {
+		info = kzalloc(sizeof(struct rmnet_shs_mmap_info), GFP_ATOMIC);
+		if (!info)
+			goto fail;
+
+		info->data = (char *)get_zeroed_page(GFP_ATOMIC);
+		if (!info->data) {
+			kfree(info);
+			goto fail;
+		}
+
+		netdev_shared = info;
+		refcount_set(&netdev_shared->refcnt, 1);
+		rm_err("SHS_MEM: virt_to_phys = 0x%llx netdev_shared = 0x%llx\n",
+		       (unsigned long long)virt_to_phys((void *)info),
+		       (unsigned long long)virt_to_phys((void *)netdev_shared));
+	} else {
+		refcount_inc(&netdev_shared->refcnt);
+	}
+
+	filp->private_data = netdev_shared;
 	rmnet_shs_wq_ep_unlock_bh();
 
-	return ret;
+	return 0;
+
+fail:
+	rmnet_shs_wq_ep_unlock_bh();
+	return -ENOMEM;
+}
+
+static ssize_t rmnet_shs_read(struct file *filp, char __user *buf, size_t len, loff_t *off)
+{
+	/*
+	 * Decline to expose file value and simply return benign value
+	 */
+	return RMNET_SHS_READ_VAL;
 }
 
 static ssize_t rmnet_shs_write(struct file *filp, const char __user *buf, size_t len, loff_t *off)
 {
-	struct rmnet_shs_mmap_info *info;
-	int ret;
-
-	rm_err("%s", "SHS_MEM: rmnet_shs_write - entry\n");
-
-	rmnet_shs_wq_ep_lock_bh();
-	info = filp->private_data;
-	ret = min_t(size_t, len, RMNET_SHS_BUFFER_SIZE);
-	if (copy_from_user(info->data, buf, ret))
-		ret = -EFAULT;
-	else
-		ret = len;
-	rmnet_shs_wq_ep_unlock_bh();
-
-	return ret;
+	/*
+	 * Returning zero here would result in echo commands hanging
+	 * Instead return len and simply decline to allow echo'd values to 
+	 * take effect
+	 */
+	return len;
 }
 
 static int rmnet_shs_release_caps(struct inode *inode, struct file *filp)
@@ -226,10 +390,14 @@
 	rmnet_shs_wq_ep_lock_bh();
 	if (cap_shared) {
 		info = filp->private_data;
-		cap_shared = NULL;
-		free_page((unsigned long)info->data);
-		kfree(info);
-		filp->private_data = NULL;
+		if (refcount_read(&info->refcnt) <= 1) {
+			free_page((unsigned long)info->data);
+			kfree(info);
+			cap_shared = NULL;
+			filp->private_data = NULL;
+		} else {
+			refcount_dec(&info->refcnt);
+		}
 	}
 	rmnet_shs_wq_ep_unlock_bh();
 
@@ -245,10 +413,14 @@
 	rmnet_shs_wq_ep_lock_bh();
 	if (gflow_shared) {
 		info = filp->private_data;
-		gflow_shared = NULL;
-		free_page((unsigned long)info->data);
-		kfree(info);
-		filp->private_data = NULL;
+		if (refcount_read(&info->refcnt) <= 1) {
+			free_page((unsigned long)info->data);
+			kfree(info);
+			gflow_shared = NULL;
+			filp->private_data = NULL;
+		} else {
+			refcount_dec(&info->refcnt);
+		}
 	}
 	rmnet_shs_wq_ep_unlock_bh();
 
@@ -264,10 +436,37 @@
 	rmnet_shs_wq_ep_lock_bh();
 	if (ssflow_shared) {
 		info = filp->private_data;
-		ssflow_shared = NULL;
-		free_page((unsigned long)info->data);
-		kfree(info);
-		filp->private_data = NULL;
+		if (refcount_read(&info->refcnt) <= 1) {
+			free_page((unsigned long)info->data);
+			kfree(info);
+			ssflow_shared = NULL;
+			filp->private_data = NULL;
+		} else {
+			refcount_dec(&info->refcnt);
+		}
+	}
+	rmnet_shs_wq_ep_unlock_bh();
+
+	return 0;
+}
+
+static int rmnet_shs_release_netdev(struct inode *inode, struct file *filp)
+{
+	struct rmnet_shs_mmap_info *info;
+
+	rm_err("%s", "SHS_MEM: rmnet_shs_release netdev - entry\n");
+
+	rmnet_shs_wq_ep_lock_bh();
+	if (netdev_shared) {
+		info = filp->private_data;
+		if (refcount_read(&info->refcnt) <= 1) {
+			free_page((unsigned long)info->data);
+			kfree(info);
+			netdev_shared = NULL;
+			filp->private_data = NULL;
+		} else {
+			refcount_dec(&info->refcnt);
+		}
 	}
 	rmnet_shs_wq_ep_unlock_bh();
 
@@ -276,7 +475,7 @@
 
 static const struct file_operations rmnet_shs_caps_fops = {
 	.owner   = THIS_MODULE,
-	.mmap    = rmnet_shs_mmap,
+	.mmap    = rmnet_shs_mmap_caps,
 	.open    = rmnet_shs_open_caps,
 	.release = rmnet_shs_release_caps,
 	.read    = rmnet_shs_read,
@@ -285,7 +484,7 @@
 
 static const struct file_operations rmnet_shs_g_flows_fops = {
 	.owner   = THIS_MODULE,
-	.mmap    = rmnet_shs_mmap,
+	.mmap    = rmnet_shs_mmap_g_flows,
 	.open    = rmnet_shs_open_g_flows,
 	.release = rmnet_shs_release_g_flows,
 	.read    = rmnet_shs_read,
@@ -294,13 +493,21 @@
 
 static const struct file_operations rmnet_shs_ss_flows_fops = {
 	.owner   = THIS_MODULE,
-	.mmap    = rmnet_shs_mmap,
+	.mmap    = rmnet_shs_mmap_ss_flows,
 	.open    = rmnet_shs_open_ss_flows,
 	.release = rmnet_shs_release_ss_flows,
 	.read    = rmnet_shs_read,
 	.write   = rmnet_shs_write,
 };
 
+static const struct file_operations rmnet_shs_netdev_fops = {
+	.owner   = THIS_MODULE,
+	.mmap    = rmnet_shs_mmap_netdev,
+	.open    = rmnet_shs_open_netdev,
+	.release = rmnet_shs_release_netdev,
+	.read    = rmnet_shs_read,
+	.write   = rmnet_shs_write,
+};
 
 /* Global Functions */
 /* Add a flow to the slow start flow list */
@@ -432,6 +639,7 @@
 	if (flows <= 0) {
 		cap_node->pps_capacity = pps_uthresh;
 		cap_node->avg_pps_capacity = pps_uthresh;
+		cap_node->bps = 0;
 		list_add(&cap_node->cpu_cap_list, cpu_caps);
 		return;
 	}
@@ -452,6 +660,8 @@
 		cap_node->avg_pps_capacity = 0;
 	}
 
+	cap_node->bps = cpu_node->rx_bps;
+
 	list_add(&cap_node->cpu_cap_list, cpu_caps);
 }
 
@@ -503,12 +713,13 @@
 			break;
 
 		rm_err("SHS_SCAPS: > cpu[%d] with pps capacity = %llu | "
-		       "avg pps cap = %llu",
+		       "avg pps cap = %llu bps = %llu",
 		       cap_node->cpu_num, cap_node->pps_capacity,
-		       cap_node->avg_pps_capacity);
+		       cap_node->avg_pps_capacity, cap_node->bps);
 
 		rmnet_shs_wq_cap_list_usr[idx].avg_pps_capacity = cap_node->avg_pps_capacity;
 		rmnet_shs_wq_cap_list_usr[idx].pps_capacity = cap_node->pps_capacity;
+		rmnet_shs_wq_cap_list_usr[idx].bps = cap_node->bps;
 		rmnet_shs_wq_cap_list_usr[idx].cpu_num = cap_node->cpu_num;
 		idx += 1;
 	}
@@ -650,13 +861,97 @@
 	rm_err("SHS_SLOW: num ss flows = %u\n", idx);
 
 	/* Copy num ss flows into first 2 bytes,
-	   then copy in the cached gold flow array */
+	   then copy in the cached ss flow array */
 	memcpy(((char *)ssflow_shared->data), &idx, sizeof(idx));
 	memcpy(((char *)ssflow_shared->data + sizeof(uint16_t)),
 	       (void *) &rmnet_shs_wq_ssflows_usr[0],
 	       sizeof(rmnet_shs_wq_ssflows_usr));
 }
 
+
+/* Extract info required from the rmnet_port array then memcpy to shared mem.
+ * > Add number of active netdevices/endpoints at the start.
+ * > After memcpy is complete, send userspace a message indicating that memcpy
+ *   has just completed.
+ * > The netdev is formated like this:
+ *    | num_netdevs | data_format | {rmnet_data0,ip_miss,rx_pkts} | ... |
+ *    |  16 bits    |   32 bits   |                                     |
+ */
+void rmnet_shs_wq_mem_update_cached_netdevs(void)
+{
+	struct rmnet_priv *priv;
+	struct rmnet_shs_wq_ep_s *ep = NULL;
+	u16 idx = 0;
+	u16 count = 0;
+
+	rm_err("SHS_NETDEV: function enter %u\n", idx);
+	list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
+		count += 1;
+		rm_err("SHS_NETDEV: function enter ep %u\n", count);
+		if (!ep)
+			continue;
+
+		if (!ep->is_ep_active) {
+			rm_err("SHS_NETDEV: ep %u is NOT active\n", count);
+			continue;
+		}
+
+		rm_err("SHS_NETDEV: ep %u is active and not null\n", count);
+		if (idx >= RMNET_SHS_MAX_NETDEVS) {
+			break;
+		}
+
+		priv = netdev_priv(ep->ep);
+		if (!priv) {
+			rm_err("SHS_NETDEV: priv for ep %u is null\n", count);
+			continue;
+		}
+
+		rm_err("SHS_NETDEV: ep %u has name = %s \n", count,
+		       ep->ep->name);
+		rm_err("SHS_NETDEV: ep %u has mux_id = %u \n", count,
+		       priv->mux_id);
+		rm_err("SHS_NETDEV: ep %u has ip_miss = %lu \n", count,
+		       priv->stats.coal.close.ip_miss);
+		rm_err("SHS_NETDEV: ep %u has coal_rx_pkts = %lu \n", count,
+		       priv->stats.coal.coal_pkts);
+		rm_err("SHS_NETDEV: ep %u has udp_rx_bps = %lu \n", count,
+		       ep->udp_rx_bps);
+		rm_err("SHS_NETDEV: ep %u has tcp_rx_bps = %lu \n", count,
+		       ep->tcp_rx_bps);
+
+		/* Set netdev name and ip mismatch count */
+		rmnet_shs_wq_netdev_usr[idx].coal_ip_miss = priv->stats.coal.close.ip_miss;
+		rmnet_shs_wq_netdev_usr[idx].hw_evict = priv->stats.coal.close.hw_evict;
+		rmnet_shs_wq_netdev_usr[idx].coal_tcp = priv->stats.coal.coal_tcp;
+		rmnet_shs_wq_netdev_usr[idx].coal_tcp_bytes = priv->stats.coal.coal_tcp_bytes;
+		rmnet_shs_wq_netdev_usr[idx].coal_udp = priv->stats.coal.coal_udp;
+		rmnet_shs_wq_netdev_usr[idx].coal_udp_bytes = priv->stats.coal.coal_udp_bytes;
+		rmnet_shs_wq_netdev_usr[idx].mux_id = priv->mux_id;
+		strlcpy(rmnet_shs_wq_netdev_usr[idx].name,
+			ep->ep->name,
+			sizeof(rmnet_shs_wq_netdev_usr[idx].name));
+
+		/* Set rx pkt from netdev stats */
+		rmnet_shs_wq_netdev_usr[idx].coal_rx_pkts = priv->stats.coal.coal_pkts;
+		rmnet_shs_wq_netdev_usr[idx].tcp_rx_bps = ep->tcp_rx_bps;
+		rmnet_shs_wq_netdev_usr[idx].udp_rx_bps = ep->udp_rx_bps;
+		idx += 1;
+	}
+
+	rm_err("SHS_MEM: netdev_shared = 0x%llx addr = 0x%pK\n",
+	       (unsigned long long)virt_to_phys((void *)netdev_shared), netdev_shared);
+	if (!netdev_shared) {
+		rm_err("%s", "SHS_WRITE: netdev_shared is NULL");
+		return;
+	}
+
+	memcpy(((char *)netdev_shared->data), &idx, sizeof(idx));
+	memcpy(((char *)netdev_shared->data + sizeof(uint16_t)),
+	       (void *) &rmnet_shs_wq_netdev_usr[0],
+	       sizeof(rmnet_shs_wq_netdev_usr));
+}
+
 /* Creates the proc folder and files for shs shared memory */
 void rmnet_shs_wq_mem_init(void)
 {
@@ -665,11 +960,13 @@
 	proc_create(RMNET_SHS_PROC_CAPS, 0644, shs_proc_dir, &rmnet_shs_caps_fops);
 	proc_create(RMNET_SHS_PROC_G_FLOWS, 0644, shs_proc_dir, &rmnet_shs_g_flows_fops);
 	proc_create(RMNET_SHS_PROC_SS_FLOWS, 0644, shs_proc_dir, &rmnet_shs_ss_flows_fops);
+	proc_create(RMNET_SHS_PROC_NETDEV, 0644, shs_proc_dir, &rmnet_shs_netdev_fops);
 
 	rmnet_shs_wq_ep_lock_bh();
 	cap_shared = NULL;
 	gflow_shared = NULL;
 	ssflow_shared = NULL;
+	netdev_shared = NULL;
 	rmnet_shs_wq_ep_unlock_bh();
 }
 
@@ -679,11 +976,13 @@
 	remove_proc_entry(RMNET_SHS_PROC_CAPS, shs_proc_dir);
 	remove_proc_entry(RMNET_SHS_PROC_G_FLOWS, shs_proc_dir);
 	remove_proc_entry(RMNET_SHS_PROC_SS_FLOWS, shs_proc_dir);
+	remove_proc_entry(RMNET_SHS_PROC_NETDEV, shs_proc_dir);
 	remove_proc_entry(RMNET_SHS_PROC_DIR, NULL);
 
 	rmnet_shs_wq_ep_lock_bh();
 	cap_shared = NULL;
 	gflow_shared = NULL;
 	ssflow_shared = NULL;
+	netdev_shared = NULL;
 	rmnet_shs_wq_ep_unlock_bh();
 }
diff --git a/drivers/rmnet/shs/rmnet_shs_wq_mem.h b/drivers/rmnet/shs/rmnet_shs_wq_mem.h
index 2e5e889..e955606 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq_mem.h
+++ b/drivers/rmnet/shs/rmnet_shs_wq_mem.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -23,16 +23,23 @@
 #define RMNET_SHS_PROC_CAPS     "rmnet_shs_caps"
 #define RMNET_SHS_PROC_G_FLOWS  "rmnet_shs_flows"
 #define RMNET_SHS_PROC_SS_FLOWS "rmnet_shs_ss_flows"
+#define RMNET_SHS_PROC_NETDEV   "rmnet_shs_netdev"
 
 #define RMNET_SHS_MAX_USRFLOWS (128)
+#define RMNET_SHS_MAX_NETDEVS (40)
+#define RMNET_SHS_IFNAMSIZ (16)
+#define RMNET_SHS_READ_VAL (0)
 
+/* NOTE: Make sure these structs fit in one page */
+/* 26 bytes * 8 max cpus = 208 bytes < 4096 */
 struct __attribute__((__packed__)) rmnet_shs_wq_cpu_cap_usr_s {
 	u64 pps_capacity;
 	u64 avg_pps_capacity;
-	u64 bps_capacity;
+	u64 bps;
 	u16 cpu_num;
 };
 
+/* 30 bytes * 128 max = 3840 bytes < 4096 */
 struct __attribute__((__packed__)) rmnet_shs_wq_gflows_usr_s {
 	u64 rx_pps;
 	u64 avg_pps;
@@ -41,6 +48,7 @@
 	u16 cpu_num;
 };
 
+/* 30 bytes * 128 max = 3840 bytes < 4096 */
 struct __attribute__((__packed__)) rmnet_shs_wq_ssflows_usr_s {
 	u64 rx_pps;
 	u64 avg_pps;
@@ -49,6 +57,21 @@
 	u16 cpu_num;
 };
 
+/* 16 + 8*9 + 8 = 89 bytes, 89*40 netdev = 3560 bytes < 4096 */
+struct __attribute__((__packed__)) rmnet_shs_wq_netdev_usr_s {
+	char name[RMNET_SHS_IFNAMSIZ];
+	u64  coal_ip_miss;
+	u64  hw_evict;
+	u64  coal_rx_pkts;
+	u64  coal_tcp;
+	u64  coal_tcp_bytes;
+	u64  coal_udp;
+	u64  coal_udp_bytes;
+	u64  udp_rx_bps;
+	u64  tcp_rx_bps;
+	u8   mux_id;
+};
+
 extern struct list_head gflows;
 extern struct list_head ssflows;
 extern struct list_head cpu_caps;
@@ -58,6 +81,7 @@
 
 struct rmnet_shs_mmap_info {
 	char *data;
+	refcount_t refcnt;
 };
 
 /* Function Definitions */
@@ -81,6 +105,7 @@
 
 void rmnet_shs_wq_mem_update_cached_sorted_gold_flows(struct list_head *gold_flows);
 void rmnet_shs_wq_mem_update_cached_sorted_ss_flows(struct list_head *ss_flows);
+void rmnet_shs_wq_mem_update_cached_netdevs(void);
 
 void rmnet_shs_wq_mem_init(void);