Merge branch 'LA.UM.8.1.C9.09.00.00.518.343' via branch 'qcom-msm-4.14' into android-msm-floral-4.14

Bug: 146759211
Change-Id: I594bc7e2ab1c248a53a1aa2f49604bc37bdab434
Signed-off-by: Wilson Sung <wilsonsung@google.com>
diff --git a/data_dlkm_vendor_board.mk b/data_dlkm_vendor_board.mk
index 7c5b015..5b45408 100644
--- a/data_dlkm_vendor_board.mk
+++ b/data_dlkm_vendor_board.mk
@@ -1,8 +1,11 @@
 #Build rmnet perf & shs
 DATA_DLKM_BOARD_PLATFORMS_LIST := msmnile
 DATA_DLKM_BOARD_PLATFORMS_LIST += kona
-
+DATA_DLKM_BOARD_PLATFORMS_LIST += lito
+ifneq ($(TARGET_BOARD_AUTO),true)
 ifeq ($(call is-board-platform-in-list,$(DATA_DLKM_BOARD_PLATFORMS_LIST)),true)
 BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/rmnet_shs.ko
 BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/rmnet_perf.ko
 endif
+endif
+
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_app.c b/drivers/emac-dwc-eqos/DWC_ETH_QOS_app.c
new file mode 100644
index 0000000..4c6dbe6
--- /dev/null
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_app.c
@@ -0,0 +1,50 @@
+/* Copyright (c) 2017, 2019, The Linux Foundation. All rights reserved.
+
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 and
+* only version 2 as published by the Free Software Foundation.
+
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/if.h>
+
+#include <linux/inetdevice.h>
+#include <net/addrconf.h>
+#include <linux/inet.h>
+#include <asm/uaccess.h>
+
+#define EMAC_DRV_NAME "qcom-emac-dwc-eqos"
+
+static int __init DWC_ETH_QOS_app_init (void)
+{
+	struct net_device *dev;
+	rtnl_lock();
+	for_each_netdev(&init_net, dev) {
+		if(strncmp (EMAC_DRV_NAME, netdev_drivername(dev), strlen(EMAC_DRV_NAME)) == 0)
+			if (dev_change_flags(dev, dev->flags | IFF_UP) < 0)
+				pr_err("EMAC_DRV_NAME:DWC_ETH_QOS_app_init: Failed to open %s\n", dev->name);
+	}
+	rtnl_unlock();
+
+	pr_info("Call DWC_ETH_QOS_open function for test purpose\r\n");
+	return 0;
+}
+
+
+static void __exit DWC_ETH_QOS_app_cleanup (void)
+{
+	return;
+}
+
+
+module_init(DWC_ETH_QOS_app_init);
+module_exit(DWC_ETH_QOS_app_cleanup);
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_desc.c b/drivers/emac-dwc-eqos/DWC_ETH_QOS_desc.c
index ac56b19..ab9b06f 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_desc.c
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_desc.c
@@ -1440,7 +1440,7 @@
 	DBGPR("-->DWC_ETH_QOS_map_page_buffs\n");
 
 	if (size > DWC_ETH_QOS_MAX_DATA_PER_TX_BUF) {
-		if (!prev_buffer->dma2) {
+		if (prev_buffer && !prev_buffer->dma2) {
 			DBGPR("prev_buffer->dma2 is empty\n");
 			/* fill the first buffer pointer in pre_buffer->dma2 */
 			prev_buffer->dma2 =
@@ -1505,7 +1505,7 @@
 			buffer->buf2_mapped_as_page = Y_TRUE;
 		}
 	} else {
-		if (!prev_buffer->dma2) {
+		if (prev_buffer && !prev_buffer->dma2) {
 			DBGPR("prev_buffer->dma2 is empty\n");
 			/* fill the first buffer pointer in pre_buffer->dma2 */
 			prev_buffer->dma2 = dma_map_page(GET_MEM_PDEV_DEV,
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_dev.c b/drivers/emac-dwc-eqos/DWC_ETH_QOS_dev.c
index c2dadaa..7ce0a31 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_dev.c
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_dev.c
@@ -1136,8 +1136,9 @@
 
 static INT config_sub_second_increment(ULONG ptp_clock)
 {
-	ULONG val;
 	ULONG VARMAC_TCR;
+	double ss_inc = 0;
+	double sns_inc = 0;
 
 	MAC_TCR_RGRD(VARMAC_TCR);
 
@@ -1145,30 +1146,69 @@
 	/*  formula is : ((1/ptp_clock) * 1000000000) */
 	/*  where, ptp_clock = 50MHz if FINE correction */
 	/*  and ptp_clock = DWC_ETH_QOS_SYSCLOCK if COARSE correction */
-#ifdef CONFIG_PPS_OUTPUT
 	if (GET_VALUE(VARMAC_TCR, MAC_TCR_TSCFUPDT_LPOS, MAC_TCR_TSCFUPDT_HPOS) == 1) {
 		EMACDBG("Using PTP clock %ld MHz\n", ptp_clock);
-		val = ((1 * 1000000000ull) / ptp_clock);
+		ss_inc = (double)1000000000.0 / (double)ptp_clock;
 	}
 	else {
 		EMACDBG("Using SYSCLOCK for coarse correction\n");
-		val = ((1 * 1000000000ull) / DWC_ETH_QOS_SYSCLOCK );
+		ss_inc = (double)1000000000.0 / (double)DWC_ETH_QOS_SYSCLOCK;
 	}
-#else
-	if (GET_VALUE(VARMAC_TCR, MAC_TCR_TSCFUPDT_LPOS, MAC_TCR_TSCFUPDT_HPOS) == 1) {
-      val = ((1 * 1000000000ull) / 50000000);
-    }
-    else {
-      val = ((1 * 1000000000ull) / ptp_clock);
-    }
-#endif
-	/* 0.465ns accurecy */
+
+	/* 0.465ns accuracy */
 	if (GET_VALUE(
 			VARMAC_TCR, MAC_TCR_TSCTRLSSR_LPOS,
-			MAC_TCR_TSCTRLSSR_HPOS) == 0)
-		val = (val * 1000) / 465;
+			MAC_TCR_TSCTRLSSR_HPOS) == 0) {
+		EMACDBG("using 0.465 ns accuracy");
+		ss_inc /= 0.465;
+ 	}
 
-	MAC_SSIR_SSINC_UDFWR(val);
+	sns_inc = ss_inc - (int)ss_inc; // take remainder
+	sns_inc *= 256.0; // sns_inc needs to be multiplied by 2^8, per spec.
+	sns_inc += 0.5; // round to nearest int value.
+
+	MAC_SSIR_SSINC_UDFWR((int)ss_inc);
+	MAC_SSIR_SNSINC_UDFWR((int)sns_inc);
+	EMACDBG("ss_inc = %d, sns_inc = %d\n", (int)ss_inc, (int)sns_inc);
+
+	return Y_SUCCESS;
+    }
+/*!
+ * \brief
+ * \param[in]
+ * \return Success or Failure
+ * \retval  0 Success
+ * \retval -1 Failure
+ */
+
+static INT config_default_addend(struct DWC_ETH_QOS_prv_data *pdata, ULONG ptp_clock)
+{
+	struct hw_if_struct *hw_if = &pdata->hw_if;
+	u64 temp;
+
+	/* formula is :
+	 * addend = 2^32/freq_div_ratio;
+	 *
+	 * where, freq_div_ratio = DWC_ETH_QOS_SYSCLOCK/50MHz
+	 *
+	 * hence, addend = ((2^32) * 50MHz)/DWC_ETH_QOS_SYSCLOCK;
+	 *
+	 * NOTE: DWC_ETH_QOS_SYSCLOCK should be >= 50MHz to
+	 *       achive 20ns accuracy.
+	 *
+	 * 2^x * y == (y << x), hence
+	 * 2^32 * 50000000 ==> (50000000 << 32)
+	 */
+	if (ptp_clock == DWC_ETH_QOS_SYSCLOCK) {
+		// If PTP_CLOCK == SYS_CLOCK, best we can do is 2^32 - 1
+		pdata->default_addend = 0xFFFFFFFF;
+	} else {
+		temp = (u64)((u64)ptp_clock << 32);
+		pdata->default_addend = div_u64(temp, DWC_ETH_QOS_SYSCLOCK);
+	}
+	hw_if->config_addend(pdata->default_addend);
+	EMACDBG("PPS: PTPCLK_Config: freq=%dHz, addend_reg=0x%x\n",
+				ptp_clock, (unsigned int)pdata->default_addend);
 
 	return Y_SUCCESS;
 }
@@ -4795,8 +4835,6 @@
 	for (QINX = 0; QINX < DWC_ETH_QOS_TX_QUEUE_CNT; QINX++)
 		configure_mtl_queue(QINX, pdata);
 
-	/* Mapping MTL Rx queue and DMA Rx channel. */
-	MTL_RQDCM0R_RGWR(0x3020100);
 #ifdef DWC_ETH_QOS_CERTIFICATION_PKTBURSTCNT
 	/* enable tx drop status */
 	MTL_OMR_DTXSTS_UDFWR(0x1);
@@ -4805,6 +4843,12 @@
 	configure_mac(pdata);
 	configure_dma_sys_bus(pdata);
 
+	/* Mapping MTL Rx queue and DMA Rx channel. */
+	if (pdata->res_data->early_eth_en)
+		MTL_RQDCM0R_RGWR(0x3020101);
+	else /* Mapped RX queue 0 to DMA channel 1 */
+		MTL_RQDCM0R_RGWR(0x3020100);
+
 	for (QINX = 0; QINX < DWC_ETH_QOS_TX_QUEUE_CNT; QINX++) {
 		if (pdata->ipa_enabled && QINX == IPA_DMA_TX_CH)
 			continue;
@@ -5110,6 +5154,7 @@
 	/* for hw time stamping */
 	hw_if->config_hw_time_stamping = config_hw_time_stamping;
 	hw_if->config_sub_second_increment = config_sub_second_increment;
+	hw_if->config_default_addend = config_default_addend;
 	hw_if->init_systime = init_systime;
 	hw_if->config_addend = config_addend;
 	hw_if->adjust_systime = adjust_systime;
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_drv.c b/drivers/emac-dwc-eqos/DWC_ETH_QOS_drv.c
index d6a2986..fda72a8 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_drv.c
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_drv.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -761,6 +761,9 @@
 	struct DWC_ETH_QOS_prv_data *pdata =
 		(struct DWC_ETH_QOS_prv_data *)dev_data;
 
+	/* Set a wakeup event to ensure enough time for processing */
+	pm_wakeup_event(&pdata->pdev->dev, 5000);
+
 	/* Queue the work in system_wq */
 	queue_work(system_wq, &pdata->emac_phy_work);
 
@@ -781,26 +784,50 @@
 	int micrel_intr_status = 0;
 	EMACDBG("Enter\n");
 
-	DWC_ETH_QOS_mdio_read_direct(
-		pdata, pdata->phyaddr, DWC_ETH_QOS_BASIC_STATUS, &phy_intr_status);
-	EMACDBG(
-		"Basic Status Reg (%#x) = %#x\n", DWC_ETH_QOS_BASIC_STATUS, phy_intr_status);
+	if ((pdata->phydev->phy_id & pdata->phydev->drv->phy_id_mask) == MICREL_PHY_ID) {
+		DWC_ETH_QOS_mdio_read_direct(
+			pdata, pdata->phyaddr, DWC_ETH_QOS_BASIC_STATUS, &phy_intr_status);
+		EMACDBG(
+			"Basic Status Reg (%#x) = %#x\n", DWC_ETH_QOS_BASIC_STATUS, phy_intr_status);
 
-	DWC_ETH_QOS_mdio_read_direct(
-		pdata, pdata->phyaddr, DWC_ETH_QOS_MICREL_PHY_INTCS, &micrel_intr_status);
-	EMACDBG(
-		"MICREL PHY Intr EN Reg (%#x) = %#x\n", DWC_ETH_QOS_MICREL_PHY_INTCS, micrel_intr_status);
+		DWC_ETH_QOS_mdio_read_direct(
+			pdata, pdata->phyaddr, DWC_ETH_QOS_MICREL_PHY_INTCS, &micrel_intr_status);
+		EMACDBG(
+			"MICREL PHY Intr EN Reg (%#x) = %#x\n", DWC_ETH_QOS_MICREL_PHY_INTCS, micrel_intr_status);
 
-	/* Interrupt received for link state change */
-	if (phy_intr_status & LINK_STATE_MASK) {
-		EMACDBG("Interrupt received for link UP state\n");
-		phy_mac_interrupt(pdata->phydev, LINK_UP);
-	} else if (!(phy_intr_status & LINK_STATE_MASK)) {
-		EMACDBG("Interrupt received for link DOWN state\n");
-		phy_mac_interrupt(pdata->phydev, LINK_DOWN);
-	} else if (!(phy_intr_status & AUTONEG_STATE_MASK)) {
-		EMACDBG("Interrupt received for link down with"
+		/* Call ack interrupt to clear the WOL interrupt status fields */
+		if (pdata->phydev->drv->ack_interrupt)
+			pdata->phydev->drv->ack_interrupt(pdata->phydev);
+
+		/* Interrupt received for link state change */
+		if (phy_intr_status & LINK_STATE_MASK) {
+			EMACDBG("Interrupt received for link UP state\n");
+			phy_mac_interrupt(pdata->phydev, LINK_UP);
+		} else if (!(phy_intr_status & LINK_STATE_MASK)) {
+			EMACDBG("Interrupt received for link DOWN state\n");
+			phy_mac_interrupt(pdata->phydev, LINK_DOWN);
+		} else if (!(phy_intr_status & AUTONEG_STATE_MASK)) {
+			EMACDBG("Interrupt received for link down with"
+					" auto-negotiation error\n");
+		}
+	} else {
+		DWC_ETH_QOS_mdio_read_direct(
+		pdata, pdata->phyaddr, DWC_ETH_QOS_PHY_INTR_STATUS, &phy_intr_status);
+		EMACDBG("Phy Interrupt status Reg at offset 0x13 = %#x\n", phy_intr_status);
+		/* Interrupt received for link state change */
+		if (phy_intr_status & LINK_UP_STATE) {
+			pdata->hw_if.stop_mac_tx_rx();
+			EMACDBG("Interrupt received for link UP state\n");
+			phy_mac_interrupt(pdata->phydev, LINK_UP);
+		} else if (phy_intr_status & LINK_DOWN_STATE) {
+			EMACDBG("Interrupt received for link DOWN state\n");
+			phy_mac_interrupt(pdata->phydev, LINK_DOWN);
+		} else if (phy_intr_status & AUTO_NEG_ERROR) {
+			EMACDBG("Interrupt received for link down with"
 				" auto-negotiation error\n");
+		} else if (phy_intr_status & PHY_WOL) {
+			EMACDBG("Interrupt received for WoL packet\n");
+		}
 	}
 
 	EMACDBG("Exit\n");
@@ -1841,7 +1868,6 @@
  *
  * \retval 0 on success & negative number on failure.
  */
-
 static int DWC_ETH_QOS_open(struct net_device *dev)
 {
 	struct DWC_ETH_QOS_prv_data *pdata = netdev_priv(dev);
@@ -2423,6 +2449,7 @@
 	struct DWC_ETH_QOS_prv_data *pdata)
 {
 	UINT ret = DEFAULT_INT_MOD;
+	bool is_udp;
 
 #ifdef DWC_ETH_QOS_CONFIG_PTP
 	if (eth_type == ETH_P_1588)
@@ -2433,8 +2460,11 @@
 		ret = AVB_INT_MOD;
 	} else if (eth_type == ETH_P_IP || eth_type == ETH_P_IPV6) {
 #ifdef DWC_ETH_QOS_CONFIG_PTP
-		if (udp_hdr(skb)->dest == htons(PTP_UDP_EV_PORT)
-			|| udp_hdr(skb)->dest == htons(PTP_UDP_GEN_PORT)) {
+		is_udp = (eth_type == ETH_P_IP && ip_hdr(skb)->protocol == IPPROTO_UDP)
+						|| (eth_type == ETH_P_IPV6 && ipv6_hdr(skb)->nexthdr == IPPROTO_UDP);
+
+		if (is_udp && (udp_hdr(skb)->dest == htons(PTP_UDP_EV_PORT)
+			|| udp_hdr(skb)->dest == htons(PTP_UDP_GEN_PORT))) {
 			ret = PTP_INT_MOD;
 		} else
 #endif
@@ -2959,6 +2989,15 @@
 			pdata->xstats.q_tx_pkt_n[qinx]++;
 			pdata->xstats.tx_pkt_n++;
 			dev->stats.tx_packets++;
+#ifdef DWC_ETH_QOS_BUILTIN
+			if (dev->stats.tx_packets == 1)
+				EMACINFO("Transmitted First Rx packet\n");
+#endif
+#ifdef CONFIG_MSM_BOOT_TIME_MARKER
+	if ( dev->stats.tx_packets == 1) {
+		place_marker("M - Ethernet first packet transmitted");
+	}
+#endif
 		}
 #else
 		if ((hw_if->get_tx_desc_ls(txptr)) && !(hw_if->get_tx_desc_ctxt(txptr))) {
@@ -3046,12 +3085,23 @@
 				    struct net_device *dev, struct sk_buff *skb,
 				    UINT qinx)
 {
+#ifdef DWC_ETH_QOS_BUILTIN
+	static int cnt_ipv4 = 0, cnt_ipv6 = 0;
+#endif
+
 	struct DWC_ETH_QOS_rx_queue *rx_queue = GET_RX_QUEUE_PTR(qinx);
 
 	skb_record_rx_queue(skb, qinx);
 	skb->dev = dev;
 	skb->protocol = eth_type_trans(skb, dev);
 
+#ifdef DWC_ETH_QOS_BUILTIN
+	if (skb->protocol == htons(ETH_P_IPV6) && (cnt_ipv6++ == 1)) {
+		EMACINFO("Received first ipv6 packet\n");
+	}
+	if (skb->protocol == htons(ETH_P_IP) && (cnt_ipv4++ == 1))
+		EMACINFO("Received first ipv4 packet\n");
+#endif
 	if (dev->features & NETIF_F_GRO) {
 		napi_gro_receive(&rx_queue->napi, skb);
 	}
@@ -3087,10 +3137,11 @@
 {
 	if (page2_used)
 		buffer->page2 = NULL;
-
-	skb->len += length;
-	skb->data_len += length;
-	skb->truesize += length;
+		if (skb != NULL) {
+			skb->len += length;
+			skb->data_len += length;
+			skb->truesize += length;
+		}
 }
 
 /* Receive Checksum Offload configuration */
@@ -3198,7 +3249,7 @@
 	unsigned short payload_len = 0;
 	unsigned char intermediate_desc_cnt = 0;
 	unsigned char buf2_used = 0;
-	int ret;
+	int ret = 0;
 
 	DBGPR("-->DWC_ETH_QOS_clean_split_hdr_rx_irq: qinx = %u, quota = %d\n",
 	      qinx, quota);
@@ -3297,15 +3348,13 @@
 				} else {
 					/* this is the middle of a chain */
 					payload_len = pdata->rx_buffer_len;
-					skb_fill_page_desc(desc_data->skb_top,
-							   skb_shinfo(desc_data->skb_top)->nr_frags,
-						buffer->page2, 0,
-						payload_len);
-
+					if (desc_data->skb_top != NULL)
+						skb_fill_page_desc(desc_data->skb_top,skb_shinfo(desc_data->skb_top)->nr_frags,buffer->page2, 0,payload_len);
 					/* re-use this skb, as consumed only the page */
 					buffer->skb = skb;
 				}
-				DWC_ETH_QOS_consume_page_split_hdr(buffer,
+				if (desc_data->skb_top != NULL)
+						DWC_ETH_QOS_consume_page_split_hdr(buffer,
 								   desc_data->skb_top,
 							 payload_len, buf2_used);
 				goto next_desc;
@@ -3322,17 +3371,15 @@
 							(pdata->rx_buffer_len * intermediate_desc_cnt) -
 							buffer->rx_hdr_size);
 					}
-
-					skb_fill_page_desc(desc_data->skb_top,
-							   skb_shinfo(desc_data->skb_top)->nr_frags,
-						buffer->page2, 0,
-						payload_len);
-
-					/* re-use this skb, as consumed only the page */
-					buffer->skb = skb;
-					skb = desc_data->skb_top;
+					if (desc_data->skb_top != NULL) {
+						skb_fill_page_desc(desc_data->skb_top,skb_shinfo(desc_data->skb_top)->nr_frags,buffer->page2, 0,payload_len);
+						/* re-use this skb, as consumed only the page */
+						buffer->skb = skb;
+						skb = desc_data->skb_top;
+					}
 					desc_data->skb_top = NULL;
-					DWC_ETH_QOS_consume_page_split_hdr(buffer, skb,
+					if (skb != NULL)
+						DWC_ETH_QOS_consume_page_split_hdr(buffer, skb,
 									   payload_len, buf2_used);
 				} else {
 					/* no chain, got both FD + LD together */
@@ -3376,11 +3423,13 @@
 				hdr_len = 0;
 			}
 
-			DWC_ETH_QOS_config_rx_csum(pdata, skb, RX_NORMAL_DESC);
+			if (skb != NULL) {
+				DWC_ETH_QOS_config_rx_csum(pdata, skb, RX_NORMAL_DESC);
 
 #ifdef DWC_ETH_QOS_ENABLE_VLAN_TAG
-			DWC_ETH_QOS_get_rx_vlan(pdata, skb, RX_NORMAL_DESC);
+				DWC_ETH_QOS_get_rx_vlan(pdata, skb, RX_NORMAL_DESC);
 #endif
+			}
 
 #ifdef YDEBUG_FILTER
 			DWC_ETH_QOS_check_rx_filter_status(RX_NORMAL_DESC);
@@ -3389,14 +3438,16 @@
 			if ((pdata->hw_feat.tsstssel) && (pdata->hwts_rx_en)) {
 				/* get rx tstamp if available */
 				if (hw_if->rx_tstamp_available(RX_NORMAL_DESC)) {
-					ret = DWC_ETH_QOS_get_rx_hwtstamp(pdata,
+					if (skb != NULL )
+						ret = DWC_ETH_QOS_get_rx_hwtstamp(pdata,
 									  skb, desc_data, qinx);
 					if (ret == 0) {
 						/* device has not yet updated the CONTEXT desc to hold the
 						 * time stamp, hence delay the packet reception
 						 */
 						buffer->skb = skb;
-						buffer->dma = dma_map_single(GET_MEM_PDEV_DEV, skb->data,
+						if (skb != NULL)
+							buffer->dma = dma_map_single(GET_MEM_PDEV_DEV, skb->data,
 								pdata->rx_buffer_len, DMA_FROM_DEVICE);
 						if (dma_mapping_error(GET_MEM_PDEV_DEV, buffer->dma))
 							dev_alert(&pdata->pdev->dev, "failed to do the RX dma map\n");
@@ -3416,8 +3467,10 @@
 #endif
 			/* update the statistics */
 			dev->stats.rx_packets++;
-			dev->stats.rx_bytes += skb->len;
-			DWC_ETH_QOS_receive_skb(pdata, dev, skb, qinx);
+			if ( skb != NULL) {
+				dev->stats.rx_bytes += skb->len;
+				DWC_ETH_QOS_receive_skb(pdata, dev, skb, qinx);
+			}
 			received++;
  next_desc:
 			desc_data->dirty_rx++;
@@ -3478,7 +3531,7 @@
 	u16 pkt_len;
 	UCHAR intermediate_desc_cnt = 0;
 	unsigned int buf2_used;
-	int ret;
+	int ret = 0 ;
 
 	DBGPR("-->DWC_ETH_QOS_clean_jumbo_rx_irq: qinx = %u, quota = %d\n",
 	      qinx, quota);
@@ -3549,20 +3602,22 @@
 						pdata->rx_buffer_len);
 				} else {
 					/* this is the middle of a chain */
-					skb_fill_page_desc(desc_data->skb_top,
+					if (desc_data->skb_top != NULL) {
+						skb_fill_page_desc(desc_data->skb_top,
 							   skb_shinfo(desc_data->skb_top)->nr_frags,
 						buffer->page, 0,
 						pdata->rx_buffer_len);
-
-					DBGPR("RX: pkt in second buffer pointer\n");
-					skb_fill_page_desc(desc_data->skb_top,
+						DBGPR("RX: pkt in second buffer pointer\n");
+						skb_fill_page_desc(desc_data->skb_top,
 							   skb_shinfo(desc_data->skb_top)->nr_frags,
 						buffer->page2, 0,
 						pdata->rx_buffer_len);
+					}
 					/* re-use this skb, as consumed only the page */
 					buffer->skb = skb;
 				}
-				DWC_ETH_QOS_consume_page(buffer,
+				if (desc_data->skb_top != NULL )
+					DWC_ETH_QOS_consume_page(buffer,
 							 desc_data->skb_top,
 							 (pdata->rx_buffer_len * 2),
 							 buf2_used);
@@ -3573,19 +3628,21 @@
 					pkt_len =
 						(pkt_len - (pdata->rx_buffer_len * intermediate_desc_cnt));
 					if (pkt_len > pdata->rx_buffer_len) {
-						skb_fill_page_desc(desc_data->skb_top,
+						if (desc_data->skb_top != NULL) {
+							skb_fill_page_desc(desc_data->skb_top,
 								   skb_shinfo(desc_data->skb_top)->nr_frags,
 							buffer->page, 0,
 							pdata->rx_buffer_len);
-
-						DBGPR("RX: pkt in second buffer pointer\n");
-						skb_fill_page_desc(desc_data->skb_top,
+							DBGPR("RX: pkt in second buffer pointer\n");
+							skb_fill_page_desc(desc_data->skb_top,
 								   skb_shinfo(desc_data->skb_top)->nr_frags,
 							buffer->page2, 0,
 							(pkt_len - pdata->rx_buffer_len));
+						}
 						buf2_used = 1;
 					} else {
-						skb_fill_page_desc(desc_data->skb_top,
+						if (desc_data->skb_top != NULL)
+							skb_fill_page_desc(desc_data->skb_top,
 								   skb_shinfo(desc_data->skb_top)->nr_frags,
 							buffer->page, 0,
 							pkt_len);
@@ -3593,9 +3650,11 @@
 					}
 					/* re-use this skb, as consumed only the page */
 					buffer->skb = skb;
-					skb = desc_data->skb_top;
+					if (desc_data->skb_top != NULL)
+						skb = desc_data->skb_top;
 					desc_data->skb_top = NULL;
-					DWC_ETH_QOS_consume_page(buffer, skb,
+					if (skb != NULL)
+						DWC_ETH_QOS_consume_page(buffer, skb,
 								 pkt_len,
 								 buf2_used);
 				} else {
@@ -3645,11 +3704,13 @@
 				intermediate_desc_cnt = 0;
 			}
 
-			DWC_ETH_QOS_config_rx_csum(pdata, skb, RX_NORMAL_DESC);
+			if (skb != NULL) {
+				DWC_ETH_QOS_config_rx_csum(pdata, skb, RX_NORMAL_DESC);
 
 #ifdef DWC_ETH_QOS_ENABLE_VLAN_TAG
-			DWC_ETH_QOS_get_rx_vlan(pdata, skb, RX_NORMAL_DESC);
+				DWC_ETH_QOS_get_rx_vlan(pdata, skb, RX_NORMAL_DESC);
 #endif
+			}
 
 #ifdef YDEBUG_FILTER
 			DWC_ETH_QOS_check_rx_filter_status(RX_NORMAL_DESC);
@@ -3658,15 +3719,16 @@
 			if ((pdata->hw_feat.tsstssel) && (pdata->hwts_rx_en)) {
 				/* get rx tstamp if available */
 				if (hw_if->rx_tstamp_available(RX_NORMAL_DESC)) {
-					ret = DWC_ETH_QOS_get_rx_hwtstamp(pdata,
-									  skb, desc_data, qinx);
+					if (skb != NULL)
+						ret = DWC_ETH_QOS_get_rx_hwtstamp(pdata, skb, desc_data, qinx);
 					if (ret == 0) {
 						/* device has not yet updated the CONTEXT desc to hold the
 						 * time stamp, hence delay the packet reception
 						 */
 						buffer->skb = skb;
-						buffer->dma = dma_map_single(GET_MEM_PDEV_DEV, skb->data,
-								pdata->rx_buffer_len, DMA_FROM_DEVICE);
+						if (skb != NULL)
+							buffer->dma = dma_map_single(GET_MEM_PDEV_DEV, skb->data, pdata->rx_buffer_len, DMA_FROM_DEVICE);
+
 						if (dma_mapping_error(GET_MEM_PDEV_DEV, buffer->dma))
 							dev_alert(&pdata->pdev->dev, "failed to do the RX dma map\n");
 
@@ -3686,16 +3748,16 @@
 #endif
 			/* update the statistics */
 			dev->stats.rx_packets++;
-			dev->stats.rx_bytes += skb->len;
-
-			/* eth type trans needs skb->data to point to something */
-			if (!pskb_may_pull(skb, ETH_HLEN)) {
-				dev_alert(&pdata->pdev->dev, "pskb_may_pull failed\n");
-				dev_kfree_skb_any(skb);
-				goto next_desc;
+			if (skb != NULL) {
+				dev->stats.rx_bytes += skb->len;
+				/* eth type trans needs skb->data to point to something */
+				if (!pskb_may_pull(skb, ETH_HLEN)) {
+					dev_alert(&pdata->pdev->dev, "pskb_may_pull failed\n");
+					dev_kfree_skb_any(skb);
+					goto next_desc;
+				}
+				DWC_ETH_QOS_receive_skb(pdata, dev, skb, qinx);
 			}
-
-			DWC_ETH_QOS_receive_skb(pdata, dev, skb, qinx);
 			received++;
  next_desc:
 			desc_data->dirty_rx++;
@@ -3861,8 +3923,17 @@
 				/* update the statistics */
 				dev->stats.rx_packets++;
 				dev->stats.rx_bytes += skb->len;
+#ifdef DWC_ETH_QOS_BUILTIN
+				if (dev->stats.rx_packets == 1)
+					EMACINFO("Received First Rx packet\n");
+#endif
 				DWC_ETH_QOS_receive_skb(pdata, dev, skb, qinx);
 				received++;
+#ifdef CONFIG_MSM_BOOT_TIME_MARKER
+				if ( dev->stats.rx_packets == 1) {
+					place_marker("M - Ethernet first packet received");
+				}
+#endif
 			} else {
 				dump_rx_desc(qinx, RX_NORMAL_DESC, desc_data->cur_rx);
 				if (!(RX_NORMAL_DESC->RDES3 &
@@ -4887,52 +4958,14 @@
 {
 		struct timespec now;
 		struct hw_if_struct *hw_if = &pdata->hw_if;
-		u64 temp;
 
 		DBGPR("-->DWC_ETH_QOS_config_timer_registers\n");
 
+	pdata->ptpclk_freq = DWC_ETH_QOS_DEFAULT_PTP_CLOCK;
+	/* program default addend */
+	hw_if->config_default_addend(pdata, DWC_ETH_QOS_DEFAULT_PTP_CLOCK);
 		/* program Sub Second Increment Reg */
-#ifdef CONFIG_PPS_OUTPUT
-		/* If default_addend is already programmed, then we expect that
-      * sub_second_increment is also programmed already */
-    if(pdata->default_addend == 0){
-			hw_if->config_sub_second_increment(DWC_ETH_QOS_SYSCLOCK); // Using default 250MHz
-		}
-		else {
-			u64 pclk;
-			pclk = (u64) (pdata->default_addend) *  DWC_ETH_QOS_SYSCLOCK;
-			pclk += 0x8000000;
-			pclk >>= 32;
-			hw_if->config_sub_second_increment((u32)pclk);
-		}
-#else
-		hw_if->config_sub_second_increment(DWC_ETH_QOS_SYSCLOCK);
-#endif
-		/* formula is :
-		 * addend = 2^32/freq_div_ratio;
-		 *
-		 * where, freq_div_ratio = DWC_ETH_QOS_SYSCLOCK/50MHz
-		 *
-		 * hence, addend = ((2^32) * 50MHz)/DWC_ETH_QOS_SYSCLOCK;
-		 *
-		 * NOTE: DWC_ETH_QOS_SYSCLOCK should be >= 50MHz to
-		 *       achive 20ns accuracy.
-		 *
-		 * 2^x * y == (y << x), hence
-		 * 2^32 * 50000000 ==> (50000000 << 32)
-		 */
-#ifdef CONFIG_PPS_OUTPUT
-		if(pdata->default_addend == 0){
-			temp = (u64)(50000000ULL << 32);
-			pdata->default_addend = div_u64(temp, DWC_ETH_QOS_SYSCLOCK);
-			EMACDBG("Using default PTP clock = 250MHz\n");
-		}
-#else
-		temp = (u64)(50000000ULL << 32);
-		pdata->default_addend = div_u64(temp, DWC_ETH_QOS_SYSCLOCK);
-#endif
-		hw_if->config_addend(pdata->default_addend);
-
+		hw_if->config_sub_second_increment(DWC_ETH_QOS_DEFAULT_PTP_CLOCK);
 		/* initialize system time */
 		getnstimeofday(&now);
 		hw_if->init_systime(now.tv_sec, now.tv_nsec);
@@ -5070,7 +5103,6 @@
 	struct ETH_PPS_Config *eth_pps_cfg = (struct ETH_PPS_Config *)req->ptr;
 	struct hw_if_struct *hw_if = &pdata->hw_if;
 	int ret = 0;
-	u64 val;
 
 	if ((eth_pps_cfg->ppsout_ch < 0) ||
 		(eth_pps_cfg->ppsout_ch >= pdata->hw_feat.pps_out_num))
@@ -5080,21 +5112,13 @@
 	}
 
 	if (eth_pps_cfg->ptpclk_freq > DWC_ETH_QOS_SYSCLOCK){
-		EMACINFO("PPS: PTPCLK_Config: freq=%dHz is too high. Cannot config it\n",
+		EMACDBG("PPS: PTPCLK_Config: freq=%dHz is too high. Cannot config it\n",
 			eth_pps_cfg->ptpclk_freq );
 		return -1;
 	}
-	pdata->ptpclk_freq = eth_pps_cfg->ptpclk_freq;
-	val = (u64)(1ULL << 32);
-	val = val * (eth_pps_cfg->ptpclk_freq);
-	val += (DWC_ETH_QOS_SYSCLOCK/2);
-	val = div_u64(val, DWC_ETH_QOS_SYSCLOCK);
-	if ( val > 0xFFFFFFFF) val = 0xFFFFFFFF;
-	EMACINFO("PPS: PTPCLK_Config: freq=%dHz, addend_reg=0x%x\n",
-				eth_pps_cfg->ptpclk_freq, (unsigned int)val);
 
-	pdata->default_addend = val;
-	ret = hw_if->config_addend((unsigned int)val);
+	pdata->ptpclk_freq = eth_pps_cfg->ptpclk_freq;
+	ret = hw_if->config_default_addend(pdata, (ULONG)eth_pps_cfg->ptpclk_freq);
 	ret |= hw_if->config_sub_second_increment( (ULONG)eth_pps_cfg->ptpclk_freq);
 
 	return ret;
@@ -5177,15 +5201,8 @@
 
 	/* Enable timestamping. This is required to start system time generator.*/
 	MAC_TCR_TSENA_UDFWR(0x1);
-
-	/* Configure MAC Sub-second and Sub-nanosecond increment register based on PTP clock. */
-	MAC_SSIR_SSINC_UDFWR(0x4); // Sub-second increment value for 250MHz and 230.4MHz ptp clock
-
-	MAC_SSIR_SNSINC_UDFWR(0x0); // Sub-nanosecond increment value for 250 MHz ptp clock
-	EMACDBG("250 clock\n");
-
 	MAC_TCR_TSUPDT_UDFWR(0x1);
-	MAC_TCR_TSCFUPDT_UDFWR(0x0); // Coarse Timestamp Update method.
+	MAC_TCR_TSCFUPDT_UDFWR(0x1); // Fine Timestamp Update method.
 
 	/* Initialize MAC System Time Update register */
 	MAC_STSUR_TSS_UDFWR(0x0); // MAC system time in seconds
@@ -5244,12 +5261,16 @@
 	struct ETH_PPS_Config *eth_pps_cfg = (struct ETH_PPS_Config *)req->ptr;
 	unsigned int val;
 	int interval, width;
-	int interval_ns; /*interval in nano seconds*/
+	struct hw_if_struct *hw_if = &pdata->hw_if;
 
-	if (pdata->emac_hw_version_type == EMAC_HW_v2_3_1 &&
-		eth_pps_cfg->ptpclk_freq <= 0) {
-		/* Set PTP clock to default 250 */
+	/* For lpass we need 19.2Mhz PPS frequency for PPS0.
+	   If lpass is enabled don't allow to change the PTP clock
+	   becuase if we change PTP clock then addend & subsecond increament
+	   will change & We will not see 19.2Mhz for PPS0.
+	*/
+	if (pdata->res_data->pps_lpass_conn_en ) {
 		eth_pps_cfg->ptpclk_freq = DWC_ETH_QOS_DEFAULT_PTP_CLOCK;
+		EMACDBG("using default ptp clock \n");
 	}
 
 	if ((eth_pps_cfg->ppsout_ch < 0) ||
@@ -5268,6 +5289,12 @@
 		eth_pps_cfg->ppsout_duty = 99;
 	}
 
+	/* Configure increment values */
+	hw_if->config_sub_second_increment(eth_pps_cfg->ptpclk_freq);
+
+	/* Configure addent value as Fine Timestamp method is used */
+	hw_if->config_default_addend(pdata, eth_pps_cfg->ptpclk_freq);
+
 	if(0 < eth_pps_cfg->ptpclk_freq) {
 		pdata->ptpclk_freq = eth_pps_cfg->ptpclk_freq;
 		interval = (eth_pps_cfg->ptpclk_freq + eth_pps_cfg->ppsout_freq/2)
@@ -5280,25 +5307,17 @@
 	if (width >= interval) width = interval - 1;
 	if (width < 0) width = 0;
 
-	EMACINFO("PPS: PPSOut_Config: freq=%dHz, ch=%d, duty=%d\n",
+	EMACDBG("PPS: PPSOut_Config: freq=%dHz, ch=%d, duty=%d\n",
 				eth_pps_cfg->ppsout_freq,
 				eth_pps_cfg->ppsout_ch,
 				eth_pps_cfg->ppsout_duty);
-	EMACINFO(" PPS: with PTP Clock freq=%dHz\n", pdata->ptpclk_freq);
+	EMACDBG(" PPS: with PTP Clock freq=%dHz\n", pdata->ptpclk_freq);
 
-	EMACINFO("PPS: PPSOut_Config: interval=%d, width=%d\n", interval, width);
-
-	if (pdata->emac_hw_version_type == EMAC_HW_v2_3_1) {
-		//calculate interval & width
-		interval_ns = (1000000000/eth_pps_cfg->ppsout_freq) ;
-		interval = ((interval_ns)/4) - 1;
-		width = ((interval_ns)/(2*4)) - 1;
-		EMACDBG("pps_interval=%d,width=%d\n",interval,width);
-	}
+	EMACDBG("PPS: PPSOut_Config: interval=%d, width=%d\n", interval, width);
 
 	switch (eth_pps_cfg->ppsout_ch) {
 	case DWC_ETH_QOS_PPS_CH_0:
-		if (pdata->emac_hw_version_type == EMAC_HW_v2_3_1) {
+		if (pdata->res_data->pps_lpass_conn_en) {
 			if (eth_pps_cfg->ppsout_start == DWC_ETH_QOS_PPS_START) {
 				MAC_PPSC_PPSEN0_UDFWR(0x1);
 				MAC_PPS_INTVAL_PPSINT0_UDFWR(DWC_ETH_QOS_PPS_CH_0, interval);
@@ -5369,7 +5388,7 @@
 		}
 		break;
 	default:
-		EMACINFO("PPS: PPS output channel is invalid (only CH0/CH1/CH2/CH3 is supported).\n");
+		EMACDBG("PPS: PPS output channel is invalid (only CH0/CH1/CH2/CH3 is supported).\n");
 		return -EOPNOTSUPP;
 	}
 
@@ -5406,6 +5425,9 @@
 	struct hw_if_struct *hw_if = &pdata->hw_if;
 	struct net_device *dev = pdata->dev;
 	int ret = 0;
+#ifdef CONFIG_PPS_OUTPUT
+	struct ETH_PPS_Config eth_pps_cfg;
+#endif
 
 	DBGPR("-->DWC_ETH_QOS_handle_prv_ioctl\n");
 
@@ -5779,6 +5801,13 @@
 
 #ifdef CONFIG_PPS_OUTPUT
 	case DWC_ETH_QOS_CONFIG_PTPCLK_CMD:
+
+		if (copy_from_user(&eth_pps_cfg, req->ptr,
+			sizeof(struct ETH_PPS_Config))) {
+			return -EFAULT;
+		}
+		req->ptr = &eth_pps_cfg;
+
 		if(pdata->hw_feat.pps_out_num == 0)
 			ret = -EOPNOTSUPP;
 		else
@@ -5786,6 +5815,13 @@
 		break;
 
 	case DWC_ETH_QOS_CONFIG_PPSOUT_CMD:
+
+		if (copy_from_user(&eth_pps_cfg, req->ptr,
+			sizeof(struct ETH_PPS_Config))) {
+			return -EFAULT;
+		}
+		req->ptr = &eth_pps_cfg;
+
 		if(pdata->hw_feat.pps_out_num == 0)
 			ret = -EOPNOTSUPP;
 		else
@@ -5833,7 +5869,6 @@
 	u32 ts_event_en = 0;
 	u32 av_8021asm_en = 0;
 	u32 VARMAC_TCR = 0;
-	u64 temp = 0;
 	struct timespec now;
 
 	DBGPR_PTP("-->DWC_ETH_QOS_handle_hwtstamp_ioctl\n");
@@ -6004,47 +6039,11 @@
 
 		hw_if->config_hw_time_stamping(VARMAC_TCR);
 
+		/* program default addend */
+		hw_if->config_default_addend(pdata, DWC_ETH_QOS_DEFAULT_PTP_CLOCK);
+
 		/* program Sub Second Increment Reg */
-#ifdef CONFIG_PPS_OUTPUT
-		/* If default_addend is already programmed, then we expect that
-		* sub_second_increment is also programmed already */
-		if (pdata->default_addend == 0) {
-			hw_if->config_sub_second_increment(DWC_ETH_QOS_SYSCLOCK); // Using default 250MHz
-		} else {
-			u64 pclk;
-			pclk = (u64) (pdata->default_addend) *  DWC_ETH_QOS_SYSCLOCK;
-			pclk += 0x8000000;
-			pclk >>= 32;
-			hw_if->config_sub_second_increment((u32)pclk);
-		}
-#else
-		hw_if->config_sub_second_increment(DWC_ETH_QOS_SYSCLOCK);
-#endif
-		/* formula is :
-		 * addend = 2^32/freq_div_ratio;
-		 *
-		 * where, freq_div_ratio = DWC_ETH_QOS_SYSCLOCK/50MHz
-		 *
-		 * hence, addend = ((2^32) * 50MHz)/DWC_ETH_QOS_SYSCLOCK;
-		 *
-		 * NOTE: DWC_ETH_QOS_SYSCLOCK should be >= 50MHz to
-		 *       achive 20ns accuracy.
-		 *
-		 * 2^x * y == (y << x), hence
-		 * 2^32 * 50000000 ==> (50000000 << 32)
-		 *
-		 */
-#ifdef CONFIG_PPS_OUTPUT
-	if(pdata->default_addend == 0){
-		temp = (u64)(50000000ULL << 32);
-		pdata->default_addend = div_u64(temp, DWC_ETH_QOS_SYSCLOCK);
-		EMACINFO("Using default PTP clock = 250MHz\n");
-	}
-#else
-		temp = (u64)(50000000ULL << 32);
-		pdata->default_addend = div_u64(temp, DWC_ETH_QOS_SYSCLOCK);
-#endif
-		hw_if->config_addend(pdata->default_addend);
+		hw_if->config_sub_second_increment(DWC_ETH_QOS_DEFAULT_PTP_CLOCK);
 
 		/* initialize system time */
 		getnstimeofday(&now);
@@ -6147,9 +6146,7 @@
 {
 	struct DWC_ETH_QOS_prv_data *pdata = netdev_priv(dev);
 	struct ifr_data_struct req;
-#ifdef CONFIG_PPS_OUTPUT
-	struct ETH_PPS_Config eth_pps_cfg;
-#endif
+
 	struct mii_ioctl_data *data = if_mii(ifr);
 	unsigned int reg_val = 0;
 	int ret = 0;
@@ -6191,13 +6188,6 @@
 	   if (copy_from_user(&req, ifr->ifr_ifru.ifru_data,
 			   sizeof(struct ifr_data_struct)))
 			return -EFAULT;
-#ifdef CONFIG_PPS_OUTPUT
-		if (copy_from_user(&eth_pps_cfg, req.ptr,
-			sizeof(struct ETH_PPS_Config))) {
-			return -EFAULT;
-		}
-		req.ptr = &eth_pps_cfg;
-#endif
 		ret = DWC_ETH_QOS_handle_prv_ioctl(pdata, &req);
 		req.command_error = ret;
 
@@ -6209,7 +6199,7 @@
 	case DWC_ETH_QOS_PRV_IOCTL_IPA:
 		if (!pdata->prv_ipa.ipa_uc_ready ) {
 			ret = -EAGAIN;
-			EMACINFO("IPA or IPA uc is not ready \n");
+			EMACDBG("IPA or IPA uc is not ready \n");
 			break;
 		}
 		ret = DWC_ETH_QOS_handle_prv_ioctl_ipa(pdata, ifr);
@@ -6448,8 +6438,7 @@
 	int crc32_val = 0;
 	unsigned int enb_12bit_vhash;
 
-	dev_alert(&pdata->pdev->dev, "-->DWC_ETH_QOS_vlan_rx_add_vid: vid = %d\n",
-		  vid);
+	EMACDBG("-->DWC_ETH_QOS_vlan_rx_add_vid: vid = %d\n", vid);
 
 	if (pdata->vlan_hash_filtering) {
 		/* The upper 4 bits of the calculated CRC are used to
@@ -6482,7 +6471,7 @@
 		pdata->vlan_ht_or_id = vid;
 	}
 
-	dev_alert(&pdata->pdev->dev, "<--DWC_ETH_QOS_vlan_rx_add_vid\n");
+	EMACDBG("<--DWC_ETH_QOS_vlan_rx_add_vid\n");
 	return 0;
 }
 
@@ -6549,8 +6538,6 @@
 		hw_if->enable_remote_pmt();
 	if (wakeup_type & DWC_ETH_QOS_MAGIC_WAKEUP)
 		hw_if->enable_magic_pmt();
-	if (wakeup_type & DWC_ETH_QOS_PHY_INTR_WAKEUP)
-		enable_irq_wake(pdata->phy_irq);
 
 	pdata->power_down_type = wakeup_type;
 
@@ -6611,11 +6598,6 @@
 		pdata->power_down_type &= ~DWC_ETH_QOS_REMOTE_WAKEUP;
 	}
 
-	if (pdata->power_down_type & DWC_ETH_QOS_PHY_INTR_WAKEUP) {
-		disable_irq_wake(pdata->phy_irq);
-		pdata->power_down_type &= ~DWC_ETH_QOS_PHY_INTR_WAKEUP;
-	}
-
 	pdata->power_down = 0;
 
 	if (pdata->phydev)
@@ -8098,7 +8080,7 @@
 	pdata->xstats.dma_debug_status1 = DWC_ETH_QOS_reg_read(DMA_DSR1_RGOFFADDR);
 
 	for (qinx = 0; qinx < DWC_ETH_QOS_TX_QUEUE_CNT; qinx++) {
-		if (qinx == IPA_DMA_TX_CH)
+		if (pdata->ipa_enabled && qinx == IPA_DMA_TX_CH)
 			continue;
 		pdata->xstats.dma_ch_status[qinx] = DWC_ETH_QOS_reg_read(DMA_SR_RGOFFADDRESS(qinx));
 		pdata->xstats.dma_ch_intr_enable[qinx] = DWC_ETH_QOS_reg_read(DMA_IER_RGOFFADDRESS(qinx));
@@ -8111,7 +8093,7 @@
 	}
 
 	for (qinx = 0; qinx < DWC_ETH_QOS_RX_QUEUE_CNT; qinx++) {
-		if (qinx == IPA_DMA_RX_CH)
+		if (pdata->ipa_enabled && qinx == IPA_DMA_RX_CH)
 			continue;
 		pdata->xstats.dma_ch_rx_control[qinx] = DWC_ETH_QOS_reg_read(DMA_RCR_RGOFFADDRESS(qinx));
 		pdata->xstats.dma_ch_rxdesc_list_addr[qinx] = DWC_ETH_QOS_reg_read(DMA_RDLAR_RGOFFADDRESS(qinx));
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_ethtool.c b/drivers/emac-dwc-eqos/DWC_ETH_QOS_ethtool.c
index cf3a571..92a02a2 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_ethtool.c
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_ethtool.c
@@ -808,10 +808,15 @@
 			       struct ethtool_wolinfo *wol)
 {
 	struct DWC_ETH_QOS_prv_data *pdata = netdev_priv(dev);
-	u32 support = WAKE_MAGIC | WAKE_UCAST | pdata->phy_wol_supported;
+	u32 emac_wol_support = 0;
 	int ret = 0;
 
-	if (wol->wolopts & ~support)
+	if (pdata->hw_feat.mgk_sel == 1)
+			emac_wol_support |= WAKE_MAGIC;
+	if (pdata->hw_feat.rwk_sel == 1)
+			emac_wol_support |= WAKE_UCAST;
+
+	if (wol->wolopts & ~(emac_wol_support | pdata->phy_wol_supported))
 		return -EOPNOTSUPP;
 
 	if (!device_can_wakeup(&pdata->pdev->dev))
@@ -825,35 +830,43 @@
 	 */
 	spin_lock_irq(&pdata->lock);
 
-	pdata->wolopts = 0;
-
 	if (pdata->hw_feat.mgk_sel == 1)
 		pdata->wolopts |= WAKE_MAGIC;
 	if (pdata->hw_feat.rwk_sel == 1)
 		pdata->wolopts |= WAKE_UCAST;
 
-	if (pdata->wolopts)
-		enable_irq_wake(pdata->irq_number);
-	else
-		disable_irq_wake(pdata->irq_number);
-
 	spin_unlock_irq(&pdata->lock);
 
-	device_set_wakeup_enable(&pdata->pdev->dev, pdata->wolopts ? 1 : 0);
-
-	if (pdata->phy_intr_en && pdata->phy_irq && pdata->phy_wol_supported){
-
-		pdata->phy_wol_wolopts = 0;
-
-		if (!phy_ethtool_set_wol(pdata->phydev, wol))
-			pdata->phy_wol_wolopts = pdata->phy_wol_supported;
-
-		if (pdata->phy_wol_wolopts)
-			enable_irq_wake(pdata->phy_irq);
+	if (emac_wol_support && (pdata->wolopts != wol->wolopts)) {
+		if (pdata->wolopts)
+			enable_irq_wake(pdata->irq_number);
 		else
-			disable_irq_wake(pdata->phy_irq);
+			disable_irq_wake(pdata->irq_number);
 
-		device_set_wakeup_enable(&pdata->pdev->dev, pdata->phy_wol_wolopts ? 1 : 0);
+		device_set_wakeup_enable(&pdata->pdev->dev, pdata->wolopts ? 1 : 0);
+	}
+
+	if (pdata->phy_wol_wolopts != wol->wolopts) {
+		if (pdata->phy_intr_en && pdata->phy_wol_supported){
+
+			pdata->phy_wol_wolopts = 0;
+
+			ret = phy_ethtool_set_wol(pdata->phydev, wol);
+
+			if (ret) {
+				EMACERR("set wol in PHY failed\n");
+				return ret;
+			}
+
+			pdata->phy_wol_wolopts = wol->wolopts;
+
+			if (pdata->phy_wol_wolopts)
+				enable_irq_wake(pdata->phy_irq);
+			else
+				disable_irq_wake(pdata->phy_irq);
+
+			device_set_wakeup_enable(&pdata->pdev->dev, pdata->phy_wol_wolopts ? 1 : 0);
+		}
 	}
 
 	DBGPR("<--DWC_ETH_QOS_set_wol\n");
@@ -1071,7 +1084,7 @@
 	
 	/* Update IPA stats */
 	if (pdata->ipa_enabled) {
-		EMACINFO("Add IPA stats\n");
+		EMACDBG("Add IPA stats\n");
 		DWC_ETH_QOS_ipa_stats_read(pdata);
 		for (i = 0; i < DWC_ETH_QOS_IPA_STAT_LEN; i++) {
 			char *p = (char *)pdata +
@@ -1190,7 +1203,7 @@
 	struct DWC_ETH_QOS_prv_data *pdata = netdev_priv(dev);
 	DBGPR("-->DWC_ETH_QOS_get_ts_info\n");
 	info->phc_index = DWC_ETH_QOS_phc_index(pdata);
-	EMACINFO("PHC index = %d\n", info->phc_index);
+	EMACDBG("PHC index = %d\n", info->phc_index);
 	DBGPR("<--DWC_ETH_QOS_get_ts_info\n");
 	return 0;
 }
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_ipa.c b/drivers/emac-dwc-eqos/DWC_ETH_QOS_ipa.c
index 87bb11f..c09a6f5 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_ipa.c
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_ipa.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are
@@ -99,12 +99,11 @@
 void DWC_ETH_QOS_ipa_offload_event_handler(
    struct DWC_ETH_QOS_prv_data *pdata, IPA_OFFLOAD_EVENT ev)
 {
-	struct hw_if_struct *hw_if = &(pdata->hw_if);
 
 
 	IPA_LOCK();
 
-	EMACINFO("Enter: event=%s\n", IPA_OFFLOAD_EVENT_string[ev]);
+	EMACDBG("Enter: event=%s\n", IPA_OFFLOAD_EVENT_string[ev]);
 	EMACDBG("PHY_link=%d\n"
 	"emac_dev_ready=%d\n"
 	"ipa_ready=%d\n"
@@ -202,7 +201,7 @@
 	case EV_IPA_UC_READY:
 		{
 			pdata->prv_ipa.ipa_uc_ready = true;
-			EMACINFO("%s:%d ipa uC is ready\n", __func__, __LINE__);
+			EMACDBG("%s:%d ipa uC is ready\n", __func__, __LINE__);
 
 			if (!pdata->prv_ipa.emac_dev_ready)
 				break;
@@ -265,7 +264,7 @@
 		break;
 	}
 
-	EMACINFO("Exit: event=%s\n", IPA_OFFLOAD_EVENT_string[ev]);
+	EMACDBG("Exit: event=%s\n", IPA_OFFLOAD_EVENT_string[ev]);
 	IPA_UNLOCK();
 }
 
@@ -281,7 +280,7 @@
 			EMACERR("IPA Offload Init Failed \n");
 			goto fail;
 		}
-		EMACINFO("IPA Offload Initialized Successfully \n");
+		EMACDBG("IPA Offload Initialized Successfully \n");
 		pdata->prv_ipa.ipa_offload_init = true;
 	}
 
@@ -292,7 +291,7 @@
 			pdata->prv_ipa.ipa_offload_conn = false;
 			goto fail;
 		}
-		EMACINFO("IPA Offload Connect Successfully\n");
+		EMACDBG("IPA Offload Connect Successfully\n");
 		pdata->prv_ipa.ipa_offload_conn = true;
 
 		/*Initialize DMA CHs for offload*/
@@ -305,12 +304,12 @@
 
 	if (!pdata->prv_ipa.ipa_debugfs_exists) {
 		if (!DWC_ETH_QOS_ipa_create_debugfs(pdata)) {
-			EMACINFO("eMAC Debugfs created  \n");
+			EMACDBG("eMAC Debugfs created  \n");
 			pdata->prv_ipa.ipa_debugfs_exists = true;
 		} else EMACERR("eMAC Debugfs failed \n");
 	}
 
-	EMACINFO("IPA Offload Enabled successfully\n");
+	EMACDBG("IPA Offload Enabled successfully\n");
 	return ret;
 
 fail:
@@ -318,7 +317,7 @@
 		if( DWC_ETH_QOS_ipa_offload_disconnect(pdata) )
 			EMACERR("IPA Offload Disconnect Failed \n");
 		else
-			EMACINFO("IPA Offload Disconnect Successfully \n");
+			EMACDBG("IPA Offload Disconnect Successfully \n");
 		pdata->prv_ipa.ipa_offload_conn = false;
 	}
 
@@ -326,7 +325,7 @@
 		if ( DWC_ETH_QOS_ipa_offload_cleanup(pdata ))
 			EMACERR("IPA Offload Cleanup Failed \n");
 		else
-			EMACINFO("IPA Offload Cleanup Success \n");
+			EMACDBG("IPA Offload Cleanup Success \n");
 		pdata->prv_ipa.ipa_offload_init = false;
 	}
 
@@ -355,7 +354,7 @@
 			EMACERR("IPA Offload Cleanup Failed, err: %d\n", ret);
 			return ret;
 		}
-		EMACINFO("IPA Offload Cleanup Success \n");
+		EMACDBG("IPA Offload Cleanup Success \n");
 		pdata->prv_ipa.ipa_offload_init = false;
 	}
 
@@ -526,7 +525,7 @@
 
 static int DWC_ETH_QOS_ipa_ready(struct DWC_ETH_QOS_prv_data *pdata)
 {
-	int ret;
+	int ret = 0 ;
 
 	EMACDBG("Enter \n");
 
@@ -534,12 +533,12 @@
 		ret = ipa_register_ipa_ready_cb(DWC_ETH_QOS_ipa_ready_cb,
 										(void *)pdata);
 		if (ret == -ENXIO) {
-			EMACINFO("%s: IPA driver context is not even ready\n", __func__);
+			EMACDBG("%s: IPA driver context is not even ready\n", __func__);
 			return ret;
 		}
 
 		if (ret != -EEXIST) {
-			EMACINFO("%s:%d register ipa ready cb\n", __func__, __LINE__);
+			EMACDBG("%s:%d register ipa ready cb\n", __func__, __LINE__);
 			return ret;
 		}
 	}
@@ -554,7 +553,6 @@
 static int DWC_ETH_QOS_ipa_uc_ready(struct DWC_ETH_QOS_prv_data *pdata)
 {
 	struct ipa_uc_ready_params param;
-	unsigned long flags;
 	int ret;
 
 	EMACDBG("Enter \n");
@@ -566,7 +564,7 @@
 
 	ret = ipa_uc_offload_reg_rdyCB(&param);
 	if (ret == 0 && param.is_uC_ready) {
-		EMACINFO("%s:%d ipa uc ready\n", __func__, __LINE__);
+		EMACDBG("%s:%d ipa uc ready\n", __func__, __LINE__);
 		pdata->prv_ipa.ipa_uc_ready = true;
 	}
 
@@ -748,7 +746,7 @@
 		ipa_vlan_mode = 0;
 	}
 
-	EMACINFO("IPA VLAN mode %d\n", ipa_vlan_mode);
+	EMACDBG("IPA VLAN mode %d\n", ipa_vlan_mode);
 
 	memset(&in, 0, sizeof(in));
 	memset(&out, 0, sizeof(out));
@@ -817,7 +815,7 @@
 	struct DWC_ETH_QOS_prv_ipa_data *ntn_ipa = &pdata->prv_ipa;
 	int ret = 0;
 
-	EMACINFO("%s - begin\n", __func__);
+	EMACDBG("%s - begin\n", __func__);
 
 	if (!pdata) {
 		EMACERR("Null Param %s \n", __func__);
@@ -836,7 +834,7 @@
 		return -1;
 	}
 
-	EMACINFO("%s - end\n", __func__);
+	EMACDBG("%s - end\n", __func__);
 
 	return 0;
 }
@@ -962,14 +960,15 @@
 	struct DWC_ETH_QOS_prv_ipa_data *ntn_ipa = &pdata->prv_ipa;
 	struct ipa_uc_offload_conn_in_params in;
 	struct ipa_uc_offload_conn_out_params out;
-	struct ipa_ntn_setup_info rx_setup_info;
-	struct ipa_ntn_setup_info tx_setup_info;
+	struct ipa_ntn_setup_info rx_setup_info = {0};
+	struct ipa_ntn_setup_info tx_setup_info = {0};
 	struct ipa_perf_profile profile;
 	int ret = 0;
 	int i = 0;
+	u32 reg_val;
 
 
-	EMACINFO("%s - begin\n", __func__);
+	EMACDBG("%s - begin\n", __func__);
 
 	if(!pdata) {
 		EMACERR( "Null Param %s \n", __func__);
@@ -1070,9 +1069,9 @@
 	}
 
 	/* Dump UL and DL Setups */
-	EMACINFO("IPA Offload UL client %d ring_base_pa 0x%x ntn_ring_size %d buff_pool_base_pa 0x%x num_buffers %d data_buff_size %d ntn_reg_base_ptr_pa 0x%x\n",
+	EMACDBG("IPA Offload UL client %d ring_base_pa 0x%x ntn_ring_size %d buff_pool_base_pa 0x%x num_buffers %d data_buff_size %d ntn_reg_base_ptr_pa 0x%x\n",
 		rx_setup_info.client, rx_setup_info.ring_base_pa, rx_setup_info.ntn_ring_size, rx_setup_info.buff_pool_base_pa, rx_setup_info.num_buffers, rx_setup_info.data_buff_size, rx_setup_info.ntn_reg_base_ptr_pa);
-	EMACINFO("IPA Offload DL client %d ring_base_pa 0x%x ntn_ring_size %d buff_pool_base_pa 0x%x num_buffers %d data_buff_size %d ntn_reg_base_ptr_pa 0x%x\n",
+	EMACDBG("IPA Offload DL client %d ring_base_pa 0x%x ntn_ring_size %d buff_pool_base_pa 0x%x num_buffers %d data_buff_size %d ntn_reg_base_ptr_pa 0x%x\n",
 		tx_setup_info.client, tx_setup_info.ring_base_pa, tx_setup_info.ntn_ring_size, tx_setup_info.buff_pool_base_pa, tx_setup_info.num_buffers, tx_setup_info.data_buff_size, tx_setup_info.ntn_reg_base_ptr_pa);
 
 	in.u.ntn.ul = rx_setup_info;
@@ -1085,6 +1084,16 @@
 		goto mem_free;
 	}
 
+    /* Mapped RX queue 0 to DMA channel 0 on successful IPA offload connect */
+    MTL_RQDCM0R_RGWR(0x3020100);
+
+	/* Mapped RX queue 0 to DMA channel 0 on successful IPA offload connect */
+	if (pdata->res_data->early_eth_en) {
+		MTL_RQDCM0R_RGRD(reg_val);
+		reg_val &= ~IPA_RX_TO_DMA_CH_MAP_NUM;
+		MTL_RQDCM0R_RGWR(reg_val);
+	}
+
     ntn_ipa->uc_db_rx_addr = out.u.ntn.ul_uc_db_pa;
     ntn_ipa->uc_db_tx_addr = out.u.ntn.dl_uc_db_pa;
 
@@ -1140,7 +1149,7 @@
 		}
 	}
 
-	EMACINFO("%s - end \n", __func__);
+	EMACDBG("%s - end \n", __func__);
 	return 0;
 }
 
@@ -1160,7 +1169,7 @@
 	struct DWC_ETH_QOS_prv_ipa_data *ntn_ipa = &pdata->prv_ipa;
 	int ret = 0;
 
-	EMACINFO("%s - begin \n", __func__);
+	EMACDBG("%s - begin \n", __func__);
 
 	if(!pdata) {
 		EMACERR( "Null Param %s \n", __func__);
@@ -1173,7 +1182,7 @@
 		return ret;
 	}
 
-	EMACINFO("%s - end \n", __func__);
+	EMACDBG("%s - end \n", __func__);
 	return 0;
 }
 
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_mdio.c b/drivers/emac-dwc-eqos/DWC_ETH_QOS_mdio.c
index 75a27ed..57c9383 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_mdio.c
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_mdio.c
@@ -295,6 +295,9 @@
 
 	DBGPR_MDIO("-->DWC_ETH_QOS_mdio_reset: phyaddr : %d\n", pdata->phyaddr);
 
+	if (pdata->res_data->early_eth_en)
+		return 0;
+
 #if 0 /* def DWC_ETH_QOS_CONFIG_PGTEST */
 	pr_alert("PHY Programming for Autoneg disable\n");
 	hw_if->read_phy_regs(pdata->phyaddr, MII_BMCR, &phydata);
@@ -458,7 +461,7 @@
 								uint mode)
 {
 	u32 phydata = 0;
-	EMACINFO("Enter\n");
+	EMACDBG("Enter\n");
 
 	DWC_ETH_QOS_mdio_write_direct(pdata, pdata->phyaddr,
 				DWC_ETH_QOS_PHY_DEBUG_PORT_ADDR_OFFSET,
@@ -467,7 +470,7 @@
 				DWC_ETH_QOS_PHY_DEBUG_PORT_DATAPORT,
 				&phydata);
 
-	EMACINFO("value read 0x%x\n", phydata);
+	EMACDBG("value read 0x%x\n", phydata);
 
 	phydata = ((phydata & DWC_ETH_QOS_PHY_HIB_CTRL_PS_HIB_EN_WR_MASK)
 			   | ((DWC_ETH_QOS_PHY_HIB_CTRL_PS_HIB_EN_MASK & mode) << 15));
@@ -482,7 +485,7 @@
 				DWC_ETH_QOS_PHY_DEBUG_PORT_DATAPORT,
 				&phydata);
 
-	EMACINFO("Exit value written 0x%x\n", phydata);
+	EMACDBG("Exit value written 0x%x\n", phydata);
 }
 
 /*!
@@ -677,6 +680,7 @@
 					pdata->emac_hw_version_type != EMAC_HW_v2_1_2)
 					set_phy_rx_tx_delay(pdata, DISABLE_RX_DELAY, DISABLE_TX_DELAY);
 			}
+		}
 		break;
 	}
 	EMACDBG("Exit\n");
@@ -795,7 +799,6 @@
 	EMACDBG("Enter\n");
 
 #ifndef DWC_ETH_QOS_EMULATION_PLATFORM
-	if (pdata->emac_hw_version_type == EMAC_HW_v2_0_0 || pdata->emac_hw_version_type == EMAC_HW_v2_3_1)
 	DWC_ETH_QOS_rgmii_io_macro_dll_reset(pdata);
 
 	/* For RGMII ID mode with internal delay*/
@@ -896,6 +899,11 @@
 	if (!phydev)
 		return;
 
+	if (pdata->oldlink == -1 && !phydev->link) {
+		pdata->oldlink = phydev->link;
+		return;
+	}
+
 	DBGPR_MDIO(
 		"-->DWC_ETH_QOS_adjust_link. address %d link %d\n",
 		phydev->mdio.addr, phydev->link);
@@ -987,6 +995,13 @@
 	if (new_state) {
 		phy_print_status(phydev);
 
+#ifdef CONFIG_MSM_BOOT_TIME_MARKER
+		if ((phydev->link == 1) && !pdata->print_kpi) {
+			place_marker("M - Ethernet is Ready.Link is UP");
+			pdata->print_kpi = 1;
+		}
+#endif
+
 		if (pdata->ipa_enabled && netif_running(dev)) {
 			if (phydev->link == 1)
 				 DWC_ETH_QOS_ipa_offload_event_handler(pdata, EV_PHY_LINK_UP);
@@ -994,7 +1009,9 @@
 				DWC_ETH_QOS_ipa_offload_event_handler(pdata, EV_PHY_LINK_DOWN);
 		}
 
-		if (phydev->link == 0 && pdata->io_macro_phy_intf != RMII_MODE)
+		if (phydev->link == 1)
+			pdata->hw_if.start_mac_tx_rx();
+		else if (phydev->link == 0 && pdata->io_macro_phy_intf != RMII_MODE)
 			DWC_ETH_QOS_set_clk_and_bus_config(pdata, SPEED_10);
 	}
 
@@ -1033,8 +1050,11 @@
 
 			if (!phy_ethtool_set_wol(pdata->phydev, &wol)){
 				pdata->phy_wol_wolopts = wol.wolopts;
+
+				enable_irq_wake(pdata->phy_irq);
+
 				device_set_wakeup_enable(&pdata->pdev->dev, 1);
-				EMACINFO("Enabled WoL[0x%x] in %s\n", wol.wolopts,
+				EMACDBG("Enabled WoL[0x%x] in %s\n", wol.wolopts,
 						 pdata->phydev->drv->name);
 			}
 		}
@@ -1109,20 +1129,27 @@
 		EMACDBG("Phy polling enabled\n");
 #endif
 
-	if (pdata->interface == PHY_INTERFACE_MODE_GMII ||
-	    pdata->interface == PHY_INTERFACE_MODE_RGMII) {
+
+	if (pdata->interface == PHY_INTERFACE_MODE_GMII || pdata->interface == PHY_INTERFACE_MODE_RGMII) {
 		phy_set_max_speed(phydev, SPEED_1000);
 		/* Half duplex not supported */
 		phydev->supported &= ~(SUPPORTED_10baseT_Half | SUPPORTED_100baseT_Half | SUPPORTED_1000baseT_Half);
-	} else if ((pdata->interface == PHY_INTERFACE_MODE_MII) ||
-		   (pdata->interface == PHY_INTERFACE_MODE_RMII)) {
+	} else if ((pdata->interface == PHY_INTERFACE_MODE_MII) || (pdata->interface == PHY_INTERFACE_MODE_RMII)) {
 		phy_set_max_speed(phydev, SPEED_100);
 		/* Half duplex is not supported */
 		phydev->supported &= ~(SUPPORTED_10baseT_Half | SUPPORTED_100baseT_Half);
 	}
-
 	phydev->advertising = phydev->supported;
 
+	if (pdata->res_data->early_eth_en ) {
+		phydev->autoneg = AUTONEG_DISABLE;
+		phydev->speed = SPEED_100;
+		phydev->duplex = DUPLEX_FULL;
+		phydev->advertising = phydev->supported;
+		phydev->advertising &= ~(SUPPORTED_1000baseT_Full);
+		EMACDBG("Set max speed to SPEED_100 as early ethernet enabled\n");
+	}
+
 	pdata->phydev = phydev;
 
 	/* Disable smart speed function for AR8035*/
@@ -1147,7 +1174,7 @@
 		DWC_ETH_QOS_set_phy_hibernation_mode(pdata, 0);
 	}
 
-	if (pdata->phy_intr_en && pdata->phy_irq) {
+	if (pdata->phy_intr_en) {
 
 		INIT_WORK(&pdata->emac_phy_work, DWC_ETH_QOS_defer_phy_isr_work);
 		init_completion(&pdata->clk_enable_done);
@@ -1163,8 +1190,12 @@
 		phydev->interrupts =  PHY_INTERRUPT_ENABLED;
 
 		if (phydev->drv->config_intr &&
-			!phydev->drv->config_intr(phydev))
+			!phydev->drv->config_intr(phydev)){
 			DWC_ETH_QOS_request_phy_wol(pdata);
+		} else {
+			EMACERR("Failed to configure PHY interrupts");
+			BUG();
+		}
 	}
 
 	phy_start(pdata->phydev);
@@ -1210,6 +1241,7 @@
 	int ret = Y_SUCCESS;
 	int phy_reg_read_status, mii_status;
 	u32 phy_id, phy_id1, phy_id2;
+	u32 phydata = 0;
 
 	DBGPR_MDIO("-->DWC_ETH_QOS_mdio_register\n");
 
@@ -1244,6 +1276,17 @@
 	pdata->phy_intr_en = false;
 	pdata->always_on_phy = false;
 
+	if(pdata->res_data->early_eth_en) {
+		EMACDBG("Updated speed to 100 in emac\n");
+		pdata->hw_if.set_mii_speed_100();
+
+		phydata = BMCR_SPEED100;
+		phydata |= BMCR_FULLDPLX;
+		EMACDBG("Updated speed to 100 and autoneg disable\n");
+		pdata->hw_if.write_phy_regs(pdata->phyaddr,
+				MII_BMCR, phydata);
+	}
+
 	DBGPHY_REGS(pdata);
 
 	new_bus = mdiobus_alloc();
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_platform.c b/drivers/emac-dwc-eqos/DWC_ETH_QOS_platform.c
index 323c3a2..4d57d7a 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_platform.c
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_platform.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -49,6 +49,8 @@
 #include "DWC_ETH_QOS_yheader.h"
 #include "DWC_ETH_QOS_ipa.h"
 
+void *ipc_emac_log_ctxt;
+
 static UCHAR dev_addr[6] = {0, 0x55, 0x7b, 0xb5, 0x7d, 0xf7};
 struct DWC_ETH_QOS_res_data dwc_eth_qos_res_data = {0, };
 static struct msm_bus_scale_pdata *emac_bus_scale_vec = NULL;
@@ -62,7 +64,7 @@
 static struct qmp_pkt pkt;
 static char qmp_buf[MAX_QMP_MSG_SIZE + 1] = {0};
 extern int create_pps_interrupt_info_device_node(dev_t *pps_dev_t,
-	struct cdev* pps_cdev, struct class* pps_class,
+	struct cdev** pps_cdev, struct class** pps_class,
 	char *pps_dev_node_name);
 extern int remove_pps_interrupt_info_device_node(struct DWC_ETH_QOS_prv_data *pdata);
 
@@ -85,6 +87,105 @@
 MODULE_PARM_DESC(phy_interrupt_en,
 		"Enable PHY interrupt [0-DISABLE, 1-ENABLE]");
 
+struct ip_params pparams = {};
+#ifdef DWC_ETH_QOS_BUILTIN
+/*!
+ * \brief API to extract MAC Address from given string
+ *
+ * \param[in] pointer to MAC Address string
+ *
+ * \return None
+ */
+void DWC_ETH_QOS_extract_macid(char *mac_addr)
+{
+	char *input = NULL;
+	int i = 0;
+	UCHAR mac_id = 0;
+
+	if (!mac_addr)
+		return;
+
+	/* Extract MAC ID byte by byte */
+	input = strsep(&mac_addr, ":");
+	while(input != NULL && i < DWC_ETH_QOS_MAC_ADDR_LEN) {
+		sscanf(input, "%x", &mac_id);
+		pparams.mac_addr[i++] = mac_id;
+		input = strsep(&mac_addr, ":");
+	}
+	if (!is_valid_ether_addr(pparams.mac_addr)) {
+		EMACERR("Invalid Mac address programmed: %s\n", mac_addr);
+		return;
+	} else
+		pparams.is_valid_mac_addr = true;
+
+	return;
+}
+
+static int __init set_early_ethernet_ipv4(char *ipv4_addr_in)
+{
+	int ret = 1;
+	pparams.is_valid_ipv4_addr = false;
+
+	if (!ipv4_addr_in)
+		return ret;
+
+	strlcpy(pparams.ipv4_addr_str, ipv4_addr_in, sizeof(pparams.ipv4_addr_str));
+	EMACDBG("Early ethernet IPv4 addr: %s\n", pparams.ipv4_addr_str);
+
+	ret = in4_pton(pparams.ipv4_addr_str, -1,
+				(u8*)&pparams.ipv4_addr.s_addr, -1, NULL);
+	if (ret != 1 || pparams.ipv4_addr.s_addr == 0) {
+		EMACERR("Invalid ipv4 address programmed: %s\n", ipv4_addr_in);
+		return ret;
+	}
+
+	pparams.is_valid_ipv4_addr = true;
+	return ret;
+}
+__setup("eipv4=", set_early_ethernet_ipv4);
+
+static int __init set_early_ethernet_ipv6(char* ipv6_addr_in)
+{
+	int ret = 1;
+	pparams.is_valid_ipv6_addr = false;
+
+	if (!ipv6_addr_in)
+		return ret;
+
+	strlcpy(pparams.ipv6_addr_str, ipv6_addr_in, sizeof(pparams.ipv6_addr_str));
+	EMACDBG("Early ethernet IPv6 addr: %s\n", pparams.ipv6_addr_str);
+
+	ret = in6_pton(pparams.ipv6_addr_str, -1,
+				   (u8 *)&pparams.ipv6_addr.ifr6_addr.s6_addr32, -1, NULL);
+	if (ret != 1 || pparams.ipv6_addr.ifr6_addr.s6_addr32 == 0)  {
+		EMACERR("Invalid ipv6 address programmed: %s\n", ipv6_addr_in);
+		return ret;
+	}
+
+	pparams.is_valid_ipv6_addr = true;
+	return ret;
+}
+__setup("eipv6=", set_early_ethernet_ipv6);
+
+static int __init set_early_ethernet_mac(char* mac_addr)
+{
+	int ret = 1;
+	char temp_mac_addr[DWC_ETH_QOS_MAC_ADDR_STR_LEN];
+	pparams.is_valid_mac_addr = false;
+
+	if(!mac_addr)
+		return ret;
+
+	strlcpy(temp_mac_addr, mac_addr, sizeof(temp_mac_addr));
+	EMACDBG("Early ethernet MAC address assigned: %s\n", temp_mac_addr);
+	temp_mac_addr[DWC_ETH_QOS_MAC_ADDR_STR_LEN-1] = '\0';
+
+	DWC_ETH_QOS_extract_macid(temp_mac_addr);
+	return ret;
+}
+__setup("ermac=", set_early_ethernet_mac);
+#endif
+
 static ssize_t read_phy_reg_dump(struct file *file,
 	char __user *user_buf, size_t count, loff_t *ppos)
 {
@@ -425,8 +526,10 @@
 			return;
 		}
 		EMACDBG("get pinctrl succeed\n");
+		dwc_eth_qos_res_data.pinctrl = pinctrl;
 
 		if (dwc_eth_qos_res_data.emac_hw_version_type == EMAC_HW_v2_2_0 ||
+			dwc_eth_qos_res_data.emac_hw_version_type == EMAC_HW_v2_1_2 ||
 			dwc_eth_qos_res_data.emac_hw_version_type == EMAC_HW_v2_3_1) {
 			/* PPS0 pin */
 			emac_pps_0 = pinctrl_lookup_state(pinctrl, EMAC_PIN_PPS0);
@@ -618,6 +721,28 @@
 		else
 			EMACDBG("Set rgmii_rxc_state succeed\n");
 
+		dwc_eth_qos_res_data.rgmii_rxc_suspend_state =
+			pinctrl_lookup_state(pinctrl, EMAC_RGMII_RXC_SUSPEND);
+		if (IS_ERR_OR_NULL(dwc_eth_qos_res_data.rgmii_rxc_suspend_state)) {
+			ret = PTR_ERR(dwc_eth_qos_res_data.rgmii_rxc_suspend_state);
+			EMACERR("Failed to get rgmii_rxc_suspend_state, err = %d\n", ret);
+			dwc_eth_qos_res_data.rgmii_rxc_suspend_state = NULL;
+		}
+		else {
+			EMACDBG("Get rgmii_rxc_suspend_state succeed\n");
+		}
+
+		dwc_eth_qos_res_data.rgmii_rxc_resume_state =
+			pinctrl_lookup_state(pinctrl, EMAC_RGMII_RXC_RESUME);
+		if (IS_ERR_OR_NULL(dwc_eth_qos_res_data.rgmii_rxc_resume_state)) {
+			ret = PTR_ERR(dwc_eth_qos_res_data.rgmii_rxc_resume_state);
+			EMACERR("Failed to get rgmii_rxc_resume_state, err = %d\n", ret);
+			dwc_eth_qos_res_data.rgmii_rxc_resume_state = NULL;
+		}
+		else {
+			EMACDBG("Get rgmii_rxc_resume_state succeed\n");
+		}
+
 		rgmii_rx_ctl_state = pinctrl_lookup_state(pinctrl, EMAC_RGMII_RX_CTL);
 		if (IS_ERR_OR_NULL(rgmii_rx_ctl_state)) {
 			ret = PTR_ERR(rgmii_rx_ctl_state);
@@ -723,6 +848,10 @@
 	}
 	EMACDBG(": emac_core_version = %d\n", dwc_eth_qos_res_data.emac_hw_version_type);
 
+	if (dwc_eth_qos_res_data.emac_hw_version_type == EMAC_HW_v2_3_1 ||
+		dwc_eth_qos_res_data.emac_hw_version_type == EMAC_HW_v2_1_2)
+		dwc_eth_qos_res_data.pps_lpass_conn_en = true;
+
 	if (dwc_eth_qos_res_data.emac_hw_version_type == EMAC_HW_v2_3_1) {
 
 		resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
@@ -748,6 +877,17 @@
 
 	}
 
+	dwc_eth_qos_res_data.early_eth_en = 0;
+	if(pparams.is_valid_mac_addr &&
+	   (pparams.is_valid_ipv4_addr || pparams.is_valid_ipv6_addr)) {
+		/* For 1000BASE-T mode, auto-negotiation is required and
+			always used to establish a link.
+			Configure phy and MAC in 100Mbps mode with autoneg disable
+			as link up takes more time with autoneg enabled  */
+		dwc_eth_qos_res_data.early_eth_en = 1;
+		EMACINFO("Early ethernet is enabled\n");
+	}
+
 	ret = DWC_ETH_QOS_get_io_macro_config(pdev);
 	if (ret)
 		goto err_out;
@@ -829,7 +969,7 @@
 	pdata->qmp_mbox_client = devm_kzalloc(
 	   &pdata->pdev->dev, sizeof(*pdata->qmp_mbox_client), GFP_KERNEL);
 
-	if (IS_ERR(pdata->qmp_mbox_client)){
+	if (pdata->qmp_mbox_client == NULL || IS_ERR(pdata->qmp_mbox_client)){
 		EMACERR("qmp alloc client failed\n");
 		return -1;
 	}
@@ -968,8 +1108,15 @@
 	else
 		DWC_ETH_QOS_set_clk_and_bus_config(pdata, SPEED_10);
 
+#ifdef DWC_ETH_QOS_CONFIG_PTP
+	if (dwc_eth_qos_res_data.ptp_clk)
+		clk_prepare_enable(dwc_eth_qos_res_data.ptp_clk);
+#endif
+
 	pdata->clks_suspended = 0;
-	complete_all(&pdata->clk_enable_done);
+
+	if (pdata->phy_intr_en)
+		complete_all(&pdata->clk_enable_done);
 
 	EMACDBG("Exit\n");
 }
@@ -978,7 +1125,9 @@
 {
 	EMACDBG("Enter\n");
 
-	reinit_completion(&pdata->clk_enable_done);
+	if (pdata->phy_intr_en)
+		reinit_completion(&pdata->clk_enable_done);
+
 	pdata->clks_suspended = 1;
 
 	DWC_ETH_QOS_set_clk_and_bus_config(pdata, 0);
@@ -992,6 +1141,11 @@
 	if (dwc_eth_qos_res_data.rgmii_clk)
 		clk_disable_unprepare(dwc_eth_qos_res_data.rgmii_clk);
 
+#ifdef DWC_ETH_QOS_CONFIG_PTP
+	if (dwc_eth_qos_res_data.ptp_clk)
+		clk_disable_unprepare(dwc_eth_qos_res_data.ptp_clk);
+#endif
+
 	EMACDBG("Exit\n");
 }
 
@@ -1320,7 +1474,8 @@
 		}
 	}
 
-	if (dwc_eth_qos_res_data.is_gpio_phy_reset) {
+	if (dwc_eth_qos_res_data.is_gpio_phy_reset &&
+		!dwc_eth_qos_res_data.early_eth_en) {
 		ret = setup_gpio_output_common(
 			dev, EMAC_GPIO_PHY_RESET_NAME,
 			&dwc_eth_qos_res_data.gpio_phy_reset, PHY_RESET_GPIO_LOW);
@@ -1349,6 +1504,104 @@
 	{}
 };
 
+void is_ipv6_NW_stack_ready(struct work_struct *work)
+{
+	struct delayed_work *dwork;
+	struct DWC_ETH_QOS_prv_data *pdata;
+	int ret;
+
+	EMACDBG("\n");
+	dwork = container_of(work, struct delayed_work, work);
+	pdata = container_of(dwork, struct DWC_ETH_QOS_prv_data, ipv6_addr_assign_wq);
+
+	ret = DWC_ETH_QOS_add_ipv6addr(pdata);
+	if (ret)
+		return;
+
+	cancel_delayed_work_sync(&pdata->ipv6_addr_assign_wq);
+	flush_delayed_work(&pdata->ipv6_addr_assign_wq);
+	return;
+}
+
+int DWC_ETH_QOS_add_ipv6addr(struct DWC_ETH_QOS_prv_data *pdata)
+{
+	int ret;
+#ifdef DWC_ETH_QOS_BUILTIN
+	struct in6_ifreq ir6;
+	char* prefix;
+	struct ip_params *ip_info = &pparams;
+	struct net *net = dev_net(pdata->dev);
+
+	EMACDBG("\n");
+	if (!net || !net->genl_sock || !net->genl_sock->sk_socket)
+		EMACERR("Sock is null, unable to assign ipv6 address\n");
+
+	if (!net->ipv6.devconf_dflt) {
+		EMACDBG("ipv6.devconf_dflt is null, schedule wq\n");
+		schedule_delayed_work(&pdata->ipv6_addr_assign_wq, msecs_to_jiffies(1000));
+		return -EFAULT;
+	}
+
+	/*For valid IPv6 address*/
+	memset(&ir6, 0, sizeof(ir6));
+	memcpy(&ir6, &ip_info->ipv6_addr, sizeof(struct in6_ifreq));
+	ir6.ifr6_ifindex = pdata->dev->ifindex;
+
+	if ((prefix = strchr(ip_info->ipv6_addr_str, '/')) == NULL)
+		ir6.ifr6_prefixlen = 0;
+	else {
+		ir6.ifr6_prefixlen = simple_strtoul(prefix + 1, NULL, 0);
+		if (ir6.ifr6_prefixlen > 128)
+			ir6.ifr6_prefixlen = 0;
+	}
+
+	ret = inet6_ioctl(net->genl_sock->sk_socket, SIOCSIFADDR, (unsigned long)(void *)&ir6);
+	if (ret)
+		EMACERR("Can't setup IPv6 address!\r\n");
+	else
+		EMACDBG("Assigned IPv6 address: %s\r\n", ip_info->ipv6_addr_str);
+#else
+	ret = -EFAULT;
+#endif
+	return ret;
+}
+
+int DWC_ETH_QOS_add_ipaddr(struct DWC_ETH_QOS_prv_data *pdata)
+{
+	int ret=0;
+#ifdef DWC_ETH_QOS_BUILTIN
+	struct ip_params *ip_info = &pparams;
+	struct ifreq ir;
+	struct sockaddr_in *sin = (void *) &ir.ifr_ifru.ifru_addr;
+	struct net *net = dev_net(pdata->dev);
+
+	if (!net || !net->genl_sock || !net->genl_sock->sk_socket)
+		EMACERR("Sock is null, unable to assign ipv4 address\n");
+
+	/*For valid Ipv4 address*/
+	memset(&ir, 0, sizeof(ir));
+	memcpy(&sin->sin_addr.s_addr, &ip_info->ipv4_addr,
+		   sizeof(sin->sin_addr.s_addr));
+	strlcpy(ir.ifr_ifrn.ifrn_name, pdata->dev->name, sizeof(ir.ifr_ifrn.ifrn_name) + 1);
+	sin->sin_family = AF_INET;
+	sin->sin_port = 0;
+
+	ret = inet_ioctl(net->genl_sock->sk_socket, SIOCSIFADDR, (unsigned long)(void *)&ir);
+	if (ret)
+		EMACERR( "Can't setup IPv4 address!: %d\r\n", ret);
+	else
+		EMACDBG("Assigned IPv4 address: %s\r\n", ip_info->ipv4_addr_str);
+#endif
+	return ret;
+}
+
+u32 l3mdev_fib_table1 (const struct net_device *dev)
+{
+	return RT_TABLE_LOCAL;
+}
+
+const struct l3mdev_ops l3mdev_op1 = {.l3mdev_fib_table = l3mdev_fib_table1};
+
 static int DWC_ETH_QOS_configure_netdevice(struct platform_device *pdev)
 {
 	struct DWC_ETH_QOS_prv_data *pdata = NULL;
@@ -1371,6 +1624,10 @@
 		ret = -ENOMEM;
 		goto err_out_dev_failed;
 	}
+
+	if (pparams.is_valid_mac_addr == true)
+		ether_addr_copy(dev_addr, pparams.mac_addr);
+
 	dev->dev_addr[0] = dev_addr[0];
 	dev->dev_addr[1] = dev_addr[1];
 	dev->dev_addr[2] = dev_addr[2];
@@ -1420,9 +1677,24 @@
 	/* store emac hw version in pdata*/
 	pdata->emac_hw_version_type = dwc_eth_qos_res_data.emac_hw_version_type;
 
+#ifdef CONFIG_NET_L3_MASTER_DEV
+	if (pdata->res_data->early_eth_en && pdata->emac_hw_version_type == EMAC_HW_v2_3_1) {
+		EMACDBG("l3mdev_op1 set \n");
+		dev->priv_flags = IFF_L3MDEV_MASTER;
+		dev->l3mdev_ops = &l3mdev_op1;
+	}
+#endif
+
+
 	/* Scale the clocks to 10Mbps speed */
-	pdata->speed = SPEED_10;
-	DWC_ETH_QOS_set_clk_and_bus_config(pdata, SPEED_10);
+	if (pdata->res_data->early_eth_en) {
+		pdata->speed = SPEED_100;
+		DWC_ETH_QOS_set_clk_and_bus_config(pdata, SPEED_100);
+	}
+	else {
+		pdata->speed = SPEED_10;
+		DWC_ETH_QOS_set_clk_and_bus_config(pdata, SPEED_10);
+	}
 
 	DWC_ETH_QOS_set_rgmii_func_clk_en();
 
@@ -1440,7 +1712,7 @@
 	pdata->ipa_enabled = 0;
 #endif
 
-	EMACINFO("EMAC IPA enabled: %d\n", pdata->ipa_enabled);
+	EMACDBG("EMAC IPA enabled: %d\n", pdata->ipa_enabled);
 	if (pdata->ipa_enabled) {
 		pdata->prv_ipa.ipa_ver = ipa_get_hw_type();
 		device_init_wakeup(&pdev->dev, 1);
@@ -1580,10 +1852,10 @@
 
 	if (pdata->emac_hw_version_type == EMAC_HW_v2_3_1) {
 		create_pps_interrupt_info_device_node(&pdata->avb_class_a_dev_t,
-			pdata->avb_class_a_cdev, pdata->avb_class_a_class, AVB_CLASS_A_POLL_DEV_NODE_NAME);
+			&pdata->avb_class_a_cdev, &pdata->avb_class_a_class, AVB_CLASS_A_POLL_DEV_NODE_NAME);
 
 		create_pps_interrupt_info_device_node(&pdata->avb_class_b_dev_t,
-			pdata->avb_class_b_cdev ,pdata->avb_class_b_class, AVB_CLASS_B_POLL_DEV_NODE_NAME);
+			&pdata->avb_class_b_cdev ,&pdata->avb_class_b_class, AVB_CLASS_B_POLL_DEV_NODE_NAME);
 	}
 
 	DWC_ETH_QOS_create_debugfs(pdata);
@@ -1596,6 +1868,19 @@
 		queue_work(system_wq, &pdata->qmp_mailbox_work);
 	}
 
+	if (pdata->res_data->early_eth_en) {
+		if (pparams.is_valid_ipv4_addr)
+			ret = DWC_ETH_QOS_add_ipaddr(pdata);
+
+		if (pparams.is_valid_ipv6_addr) {
+			INIT_DELAYED_WORK(&pdata->ipv6_addr_assign_wq, is_ipv6_NW_stack_ready);
+			ret = DWC_ETH_QOS_add_ipv6addr(pdata);
+			if (ret)
+				schedule_delayed_work(&pdata->ipv6_addr_assign_wq, msecs_to_jiffies(1000));
+		}
+
+	}
+
 	EMACDBG("<-- DWC_ETH_QOS_configure_netdevice\n");
 
 	return 0;
@@ -1773,7 +2058,9 @@
 	int ret = 0;
 
 	EMACDBG("--> DWC_ETH_QOS_probe\n");
-
+#ifdef CONFIG_MSM_BOOT_TIME_MARKER
+	place_marker("M - Ethernet probe start");
+#endif
 	if (of_device_is_compatible(pdev->dev.of_node, "qcom,emac-smmu-embedded"))
 		return emac_emb_smmu_cb_probe(pdev);
 
@@ -2001,10 +2288,10 @@
  * \retval 0
  */
 
-static INT DWC_ETH_QOS_suspend(struct platform_device *pdev, pm_message_t state)
+static INT DWC_ETH_QOS_suspend(struct device *dev)
 {
-	struct net_device *dev = platform_get_drvdata(pdev);
-	struct DWC_ETH_QOS_prv_data *pdata = netdev_priv(dev);
+	struct DWC_ETH_QOS_prv_data *pdata = gDWC_ETH_QOS_prv_data;
+	struct net_device *net_dev = pdata->dev;
 	struct hw_if_struct *hw_if = &pdata->hw_if;
 	INT ret, pmt_flags = 0;
 	unsigned int rwk_filter_values[] = {
@@ -2040,7 +2327,7 @@
 
 	EMACDBG("-->DWC_ETH_QOS_suspend\n");
 
-	if (of_device_is_compatible(pdev->dev.of_node, "qcom,emac-smmu-embedded")) {
+	if (of_device_is_compatible(dev->of_node, "qcom,emac-smmu-embedded")) {
 		EMACDBG("<--DWC_ETH_QOS_suspend smmu return\n");
 		return 0;
 	}
@@ -2049,16 +2336,10 @@
 		pdata->power_down_type |= DWC_ETH_QOS_EMAC_INTR_WAKEUP;
 		enable_irq_wake(pdata->irq_number);
 
-		/* Set PHY intr as wakeup-capable to handle change in PHY link status after suspend */
-		if (pdata->phy_intr_en && pdata->phy_irq && pdata->phy_wol_wolopts) {
-			pmt_flags |= DWC_ETH_QOS_PHY_INTR_WAKEUP;
-			enable_irq_wake(pdata->phy_irq);
-		}
-
 		return 0;
 	}
 
-	if (!dev || !netif_running(dev)) {
+	if (!net_dev || !netif_running(net_dev)) {
 		return -EINVAL;
 	}
 
@@ -2070,15 +2351,26 @@
 	if (pdata->hw_feat.mgk_sel && (pdata->wolopts & WAKE_MAGIC))
 		pmt_flags |= DWC_ETH_QOS_MAGIC_WAKEUP;
 
-	if (pdata->phy_intr_en && pdata->phy_irq && pdata->phy_wol_wolopts)
-		pmt_flags |= DWC_ETH_QOS_PHY_INTR_WAKEUP;
-
-	ret = DWC_ETH_QOS_powerdown(dev, pmt_flags, DWC_ETH_QOS_DRIVER_CONTEXT);
+	ret = DWC_ETH_QOS_powerdown(net_dev, pmt_flags, DWC_ETH_QOS_DRIVER_CONTEXT);
 
 	DWC_ETH_QOS_suspend_clks(pdata);
 
-	EMACDBG("<--DWC_ETH_QOS_suspend ret = %d\n", ret);
+	/* Suspend the PHY RXC clock. */
+	if (dwc_eth_qos_res_data.is_pinctrl_names &&
+		(dwc_eth_qos_res_data.rgmii_rxc_suspend_state != NULL)) {
+		/* Remove RXC clock source from Phy.*/
+		ret = pinctrl_select_state(dwc_eth_qos_res_data.pinctrl,
+				dwc_eth_qos_res_data.rgmii_rxc_suspend_state);
+		if (ret)
+			EMACERR("Unable to set rgmii_rxc_suspend_state state, err = %d\n", ret);
+		else
+			EMACDBG("Set rgmii_rxc_suspend_state succeed\n");
+	}
 
+	EMACDBG("<--DWC_ETH_QOS_suspend ret = %d\n", ret);
+#ifdef CONFIG_MSM_BOOT_TIME_MARKER
+	pdata->print_kpi = 0;
+#endif
 	return ret;
 }
 
@@ -2104,18 +2396,18 @@
  * \retval 0
  */
 
-static INT DWC_ETH_QOS_resume(struct platform_device *pdev)
+static INT DWC_ETH_QOS_resume(struct device *dev)
 {
-	struct net_device *dev = platform_get_drvdata(pdev);
-	struct DWC_ETH_QOS_prv_data *pdata = netdev_priv(dev);
+	struct DWC_ETH_QOS_prv_data *pdata = gDWC_ETH_QOS_prv_data;
+	struct net_device *net_dev = pdata->dev;
 	INT ret;
 
-	DBGPR("-->DWC_ETH_QOS_resume\n");
-	if (of_device_is_compatible(pdev->dev.of_node, "qcom,emac-smmu-embedded"))
+	EMACDBG("-->DWC_ETH_QOS_resume\n");
+	if (of_device_is_compatible(dev->of_node, "qcom,emac-smmu-embedded"))
 		return 0;
 
-	if (!dev || !netif_running(dev)) {
-		DBGPR("<--DWC_ETH_QOS_dev_resume\n");
+	if (!net_dev || !netif_running(net_dev)) {
+		EMACERR("<--DWC_ETH_QOS_dev_resume\n");
 		return -EINVAL;
 	}
 
@@ -2132,40 +2424,143 @@
 
 		/* Wakeup reason can be PHY link event or a RX packet */
 		/* Set a wakeup event to ensure enough time for processing */
-		pm_wakeup_event(&pdev->dev, 5000);
+		pm_wakeup_event(dev, 5000);
 		return 0;
 	}
 
+	/* Resume the PhY RXC clock. */
+	if (dwc_eth_qos_res_data.is_pinctrl_names &&
+		(dwc_eth_qos_res_data.rgmii_rxc_resume_state != NULL)) {
+
+		/* Enable RXC clock source from Phy.*/
+		ret = pinctrl_select_state(dwc_eth_qos_res_data.pinctrl,
+				dwc_eth_qos_res_data.rgmii_rxc_resume_state);
+		if (ret)
+			EMACERR("Unable to set rgmii_rxc_resume_state state, err = %d\n", ret);
+		else
+			EMACDBG("Set rgmii_rxc_resume_state succeed\n");
+	}
+
 	DWC_ETH_QOS_resume_clks(pdata);
 
-	ret = DWC_ETH_QOS_powerup(dev, DWC_ETH_QOS_DRIVER_CONTEXT);
+	ret = DWC_ETH_QOS_powerup(net_dev, DWC_ETH_QOS_DRIVER_CONTEXT);
 
 	if (pdata->ipa_enabled)
 		DWC_ETH_QOS_ipa_offload_event_handler(pdata, EV_DPM_RESUME);
 
 	/* Wakeup reason can be PHY link event or a RX packet */
 	/* Set a wakeup event to ensure enough time for processing */
-	pm_wakeup_event(&pdev->dev, 5000);
+	pm_wakeup_event(dev, 5000);
 
-	DBGPR("<--DWC_ETH_QOS_resume\n");
+	EMACDBG("<--DWC_ETH_QOS_resume\n");
 
 	return ret;
 }
 
 #endif /* CONFIG_PM */
 
-static struct platform_driver DWC_ETH_QOS_plat_drv = {
-	.probe = DWC_ETH_QOS_probe,
-	.remove = DWC_ETH_QOS_remove,
-	.shutdown = DWC_ETH_QOS_shutdown,
+static int DWC_ETH_QOS_hib_restore(struct device *dev) {
+	struct DWC_ETH_QOS_prv_data *pdata = gDWC_ETH_QOS_prv_data;
+	int ret = 0;
+
+	if (of_device_is_compatible(dev->of_node, "qcom,emac-smmu-embedded"))
+		return 0;
+
+	EMACINFO(" start\n");
+
+        ret = DWC_ETH_QOS_init_regulators(dev);
+	if (ret)
+		return ret;
+
+	ret = DWC_ETH_QOS_init_gpios(dev);
+	if (ret)
+		return ret;
+
+	ret = DWC_ETH_QOS_get_clks(dev);
+	if (ret)
+		return ret;
+
+	DWC_ETH_QOS_set_clk_and_bus_config(pdata, pdata->speed);
+
+	DWC_ETH_QOS_set_rgmii_func_clk_en();
+
+#ifdef DWC_ETH_QOS_CONFIG_PTP
+	DWC_ETH_QOS_ptp_init(pdata);
+#endif /* end of DWC_ETH_QOS_CONFIG_PTP */
+
+	/* issue software reset to device */
+	pdata->hw_if.exit();
+
+	/* Bypass PHYLIB for TBI, RTBI and SGMII interface */
+	if (pdata->hw_feat.sma_sel == 1) {
+		ret = DWC_ETH_QOS_mdio_register(pdata->dev);
+		if (ret < 0) {
+			EMACERR("MDIO bus (id %d) registration failed\n",
+					  pdata->bus_id);
+			return ret;
+		}
+	}
+
+	if (!(pdata->dev->flags & IFF_UP)) {
+		pdata->dev->netdev_ops->ndo_open(pdata->dev);
+		pdata->dev->flags |= IFF_UP;
+	}
+
+	EMACINFO("end\n");
+
+	return ret;
+}
+
+static int DWC_ETH_QOS_hib_freeze(struct device *dev) {
+	struct DWC_ETH_QOS_prv_data *pdata = gDWC_ETH_QOS_prv_data;
+	int ret = 0;
+
+	if (of_device_is_compatible(dev->of_node, "qcom,emac-smmu-embedded"))
+		return 0;
+
+	EMACINFO(" start\n");
+	if (pdata->dev->flags & IFF_UP) {
+		pdata->dev->netdev_ops->ndo_stop(pdata->dev);
+		pdata->dev->flags &= ~IFF_UP;
+	}
+
+	if (pdata->hw_feat.sma_sel == 1)
+		DWC_ETH_QOS_mdio_unregister(pdata->dev);
+
+#ifdef DWC_ETH_QOS_CONFIG_PTP
+	DWC_ETH_QOS_ptp_remove(pdata);
+#endif /* end of DWC_ETH_QOS_CONFIG_PTP */
+
+	DWC_ETH_QOS_disable_clks(dev);
+
+	DWC_ETH_QOS_disable_regulators();
+
+	DWC_ETH_QOS_free_gpios();
+
+	EMACINFO("end\n");
+
+	return ret;
+}
+
+static const struct dev_pm_ops DWC_ETH_QOS_pm_ops = {
+	.freeze = DWC_ETH_QOS_hib_freeze,
+	.restore = DWC_ETH_QOS_hib_restore,
+	.thaw = DWC_ETH_QOS_hib_restore,
 #ifdef CONFIG_PM
 	.suspend = DWC_ETH_QOS_suspend,
 	.resume = DWC_ETH_QOS_resume,
 #endif
+};
+
+static struct platform_driver DWC_ETH_QOS_plat_drv = {
+	.probe = DWC_ETH_QOS_probe,
+	.remove = DWC_ETH_QOS_remove,
+	.shutdown = DWC_ETH_QOS_shutdown,
 	.driver = {
 		.name = DRV_NAME,
 		.owner = THIS_MODULE,
 		.of_match_table = DWC_ETH_QOS_plat_drv_match,
+		.pm = &DWC_ETH_QOS_pm_ops,
 	},
 };
 
@@ -2182,7 +2577,7 @@
 {
 	INT ret = 0;
 
-	DBGPR("-->DWC_ETH_QOS_init_module\n");
+	EMACDBG("-->DWC_ETH_QOS_init_module\n");
 
 	ret = platform_driver_register(&DWC_ETH_QOS_plat_drv);
 	if (ret < 0) {
@@ -2190,11 +2585,17 @@
 		return ret;
 	}
 
+	ipc_emac_log_ctxt = ipc_log_context_create(IPCLOG_STATE_PAGES,"emac", 0);
+	if (!ipc_emac_log_ctxt)
+		EMACERR("Error creating logging context for emac\n");
+	else
+		EMACDBG("IPC logging has been enabled for emac\n");
+
 #ifdef DWC_ETH_QOS_CONFIG_DEBUGFS
 	create_debug_files();
 #endif
 
-	DBGPR("<--DWC_ETH_QOS_init_module\n");
+	EMACDBG("<--DWC_ETH_QOS_init_module\n");
 
 	return ret;
 }
@@ -2218,6 +2619,9 @@
 
 	platform_driver_unregister(&DWC_ETH_QOS_plat_drv);
 
+	if (ipc_emac_log_ctxt != NULL)
+		ipc_log_context_destroy(ipc_emac_log_ctxt);
+
 	DBGPR("<--DWC_ETH_QOS_exit_module\n");
 }
 
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_poll_support.c b/drivers/emac-dwc-eqos/DWC_ETH_QOS_poll_support.c
index 163427b..1e2c998 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_poll_support.c
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_poll_support.c
@@ -36,7 +36,7 @@
 
 	unsigned int len = 0, buf_len = 5000;
 	char* temp_buf;
-	ssize_t ret_cnt;
+	ssize_t ret_cnt = 0;
 	struct pps_info *info;
 
 	info = filp->private_data;
@@ -54,7 +54,8 @@
 
 		ret_cnt = simple_read_from_buffer(buf, count, f_pos, temp_buf, len);
 		kfree(temp_buf);
-		EMACERR("poll pps2intr info=%d sent by kernel\n", gDWC_ETH_QOS_prv_data->avb_class_a_intr_cnt);
+		if (gDWC_ETH_QOS_prv_data)
+			EMACERR("poll pps2intr info=%d sent by kernel\n", gDWC_ETH_QOS_prv_data->avb_class_a_intr_cnt);
 	} else if (info->channel_no == AVB_CLASS_B_CHANNEL_NUM ) {
 		temp_buf = kzalloc(buf_len, GFP_KERNEL);
 		if (!temp_buf)
@@ -151,8 +152,8 @@
 	.poll = pps_fops_poll,
 };
 
-int create_pps_interrupt_info_device_node(dev_t *pps_dev_t, struct cdev* pps_cdev,
-	struct class* pps_class, char *pps_dev_node_name)
+int create_pps_interrupt_info_device_node(dev_t *pps_dev_t, struct cdev** pps_cdev,
+	struct class** pps_class, char *pps_dev_node_name)
 {
 	int ret;
 	EMACDBG("create_pps_interrupt_info_device_node enter \n");
@@ -164,28 +165,28 @@
 		goto alloc_chrdev1_region_fail;
 	}
 
-	pps_cdev = cdev_alloc();
-	if(!pps_cdev) {
+	*pps_cdev = cdev_alloc();
+	if(!*pps_cdev) {
 		ret = -ENOMEM;
 		EMACERR("failed to alloc cdev\n");
 		goto fail_alloc_cdev;
 	}
-	cdev_init(pps_cdev, &pps_fops);
+	cdev_init(*pps_cdev, &pps_fops);
 
-	ret = cdev_add(pps_cdev, *pps_dev_t, 1);
+	ret = cdev_add(*pps_cdev, *pps_dev_t, 1);
 	if (ret < 0) {
 		EMACERR(":cdev_add err=%d\n", -ret);
 		goto cdev1_add_fail;
 	}
 
-	pps_class = class_create(THIS_MODULE, pps_dev_node_name);
-	if(!pps_class) {
+	*pps_class = class_create(THIS_MODULE, pps_dev_node_name);
+	if(!*pps_class) {
 		ret = -ENODEV;
 		EMACERR("failed to create class\n");
 		goto fail_create_class;
 	}
 
-	if (!device_create(pps_class, NULL,
+	if (!device_create(*pps_class, NULL,
 		*pps_dev_t, NULL, pps_dev_node_name)) {
 		ret = -EINVAL;
 		EMACERR("failed to create device_create\n");
@@ -197,9 +198,9 @@
 	return 0;
 
 	fail_create_device:
-		class_destroy(pps_class);
+		class_destroy(*pps_class);
 	fail_create_class:
-		cdev_del(pps_cdev);
+		cdev_del(*pps_cdev);
 	cdev1_add_fail:
 	fail_alloc_cdev:
 		unregister_chrdev_region(*pps_dev_t, 1);
@@ -210,15 +211,19 @@
 
 int remove_pps_interrupt_info_device_node(struct DWC_ETH_QOS_prv_data *pdata)
 {
+	cdev_del(pdata->avb_class_a_cdev);
 	device_destroy(pdata->avb_class_a_class, pdata->avb_class_a_dev_t);
 	class_destroy(pdata->avb_class_a_class);
-	cdev_del(pdata->avb_class_a_cdev);
 	unregister_chrdev_region(pdata->avb_class_a_dev_t, 1);
+	pdata->avb_class_a_cdev = NULL;
+	pdata->avb_class_a_class = NULL;
 
+	cdev_del(pdata->avb_class_b_cdev);
 	device_destroy(pdata->avb_class_b_class, pdata->avb_class_b_dev_t);
 	class_destroy(pdata->avb_class_b_class);
-	cdev_del(pdata->avb_class_b_cdev);
 	unregister_chrdev_region(pdata->avb_class_b_dev_t, 1);
+	pdata->avb_class_b_cdev = NULL;
+	pdata->avb_class_b_class = NULL;
 	return 0;
 }
 
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_ptp.c b/drivers/emac-dwc-eqos/DWC_ETH_QOS_ptp.c
index 9223244..1bcead3 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_ptp.c
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_ptp.c
@@ -283,8 +283,8 @@
 int DWC_ETH_QOS_ptp_init(struct DWC_ETH_QOS_prv_data *pdata)
 {
 	int ret = 0;
-	struct ifr_data_struct req = {0};
 #ifdef CONFIG_PPS_OUTPUT
+	struct ifr_data_struct req = {0};
 	struct ETH_PPS_Config eth_pps_cfg = {0};
 #endif
 
@@ -315,12 +315,13 @@
 	}
 
 #ifdef CONFIG_PPS_OUTPUT
-	if (pdata->emac_hw_version_type == EMAC_HW_v2_3_1) {
-		/*Configuaring PPS0 PPS output frequency to defualt 19.2 Mhz*/
+	if (pdata->res_data->pps_lpass_conn_en) {
+		/*Configuring PPS0 PPS output frequency to defualt 19.2 Mhz*/
 		eth_pps_cfg.ppsout_ch = 0;
 		eth_pps_cfg.ptpclk_freq = DWC_ETH_QOS_DEFAULT_PTP_CLOCK;
-		eth_pps_cfg.ppsout_freq = 19200000;
+		eth_pps_cfg.ppsout_freq = DWC_ETH_QOS_DEFAULT_LPASS_PPS_FREQUENCY;
 		eth_pps_cfg.ppsout_start = 1;
+		eth_pps_cfg.ppsout_duty = 50;
 		req.ptr = (void*)&eth_pps_cfg;
 
 		DWC_ETH_QOS_pps_timer_init(&req);
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_yapphdr.h b/drivers/emac-dwc-eqos/DWC_ETH_QOS_yapphdr.h
index 6af15b7..1c4980d 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_yapphdr.h
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_yapphdr.h
@@ -50,7 +50,7 @@
 #define DWC_ETH_QOS_MAX_TX_QUEUE_CNT 8
 #define DWC_ETH_QOS_MAX_RX_QUEUE_CNT 8
 
-//#define CONFIG_PPS_OUTPUT   // for PPS Output
+#define CONFIG_PPS_OUTPUT   // for PPS Output
 
 /* Private IOCTL for handling device specific task */
 #define DWC_ETH_QOS_PRV_IOCTL	SIOCDEVPRIVATE
diff --git a/drivers/emac-dwc-eqos/DWC_ETH_QOS_yheader.h b/drivers/emac-dwc-eqos/DWC_ETH_QOS_yheader.h
index adcade2..ee29121 100644
--- a/drivers/emac-dwc-eqos/DWC_ETH_QOS_yheader.h
+++ b/drivers/emac-dwc-eqos/DWC_ETH_QOS_yheader.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights
  * reserved.
  *
  * This program is free software; you can redistribute it and/or modify
@@ -124,7 +124,15 @@
 #include <linux/mailbox_client.h>
 #include <linux/mailbox/qmp.h>
 #include <linux/mailbox_controller.h>
-
+#include <linux/ipc_logging.h>
+#include <linux/inetdevice.h>
+#include <net/inet_common.h>
+#include <net/ipv6.h>
+#include <linux/inet.h>
+#include <asm/uaccess.h>
+#ifdef CONFIG_MSM_BOOT_TIME_MARKER
+#include <soc/qcom/boot_stats.h>
+#endif
 /* QOS Version Control Macros */
 /* #define DWC_ETH_QOS_VER_4_0 */
 /* Default Configuration is for QOS version 4.1 and above */
@@ -133,6 +141,13 @@
 
 #include <asm-generic/errno.h>
 
+extern void *ipc_emac_log_ctxt;
+
+#define IPCLOG_STATE_PAGES 50
+#define __FILENAME__ (strrchr(__FILE__, '/') ? \
+	strrchr(__FILE__, '/') + 1 : __FILE__)
+
+
 #ifdef CONFIG_PGTEST_OBJ
 #define DWC_ETH_QOS_CONFIG_PGTEST
 #endif
@@ -336,6 +351,7 @@
 #define LINK_UP 1
 #define LINK_DOWN 0
 #define ENABLE_PHY_INTERRUPTS 0xcc00
+#define MICREL_LINK_UP_INTR_STATUS		BIT(0)
 
 /* Default MTL queue operation mode values */
 #define DWC_ETH_QOS_Q_DISABLED	0x0
@@ -362,6 +378,7 @@
 		"<error>"))))
 
 #define DWC_ETH_QOS_MAC_ADDR_LEN 6
+#define DWC_ETH_QOS_MAC_ADDR_STR_LEN 18
 #ifndef DWC_ETH_QOS_ENABLE_VLAN_TAG
 #define VLAN_HLEN 0
 #endif
@@ -410,7 +427,8 @@
 #define DWC_ETH_QOS_SYSCLOCK	250000000 /* System clock is 250MHz */
 #define DWC_ETH_QOS_SYSTIMEPERIOD	4 /* System time period is 4ns */
 
-#define DWC_ETH_QOS_DEFAULT_PTP_CLOCK 250000000
+#define DWC_ETH_QOS_DEFAULT_PTP_CLOCK    96000000
+#define DWC_ETH_QOS_DEFAULT_LPASS_PPS_FREQUENCY 19200000
 
 #define DWC_ETH_QOS_TX_QUEUE_CNT (pdata->tx_queue_cnt)
 #define DWC_ETH_QOS_RX_QUEUE_CNT (pdata->rx_queue_cnt)
@@ -645,6 +663,7 @@
 #define IPA_DMA_TX_CH 0
 #define IPA_DMA_RX_CH 0
 
+#define IPA_RX_TO_DMA_CH_MAP_NUM	BIT(0);
 
 #define EMAC_GDSC_EMAC_NAME "gdsc_emac"
 #define EMAC_VREG_RGMII_NAME "vreg_rgmii"
@@ -987,6 +1006,7 @@
 	/* for hw time stamping */
 	INT(*config_hw_time_stamping)(UINT);
 	INT(*config_sub_second_increment)(unsigned long ptp_clock);
+	INT(*config_default_addend)(struct DWC_ETH_QOS_prv_data *pdata, unsigned long ptp_clock);
 	INT(*init_systime)(UINT, UINT);
 	INT(*config_addend)(UINT);
 	INT(*adjust_systime)(UINT, UINT, INT, bool);
@@ -1546,6 +1566,9 @@
 	bool is_pinctrl_names;
 	int gpio_phy_intr_redirect;
 	int gpio_phy_reset;
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *rgmii_rxc_suspend_state;
+	struct pinctrl_state *rgmii_rxc_resume_state;
 
 	/* Regulators */
 	struct regulator *gdsc_emac;
@@ -1559,6 +1582,8 @@
 	struct clk *rgmii_clk;
 	struct clk *ptp_clk;
 	unsigned int emac_hw_version_type;
+	bool early_eth_en;
+	bool pps_lpass_conn_en;
 };
 
 struct DWC_ETH_QOS_prv_ipa_data {
@@ -1851,7 +1876,21 @@
 	dev_t avb_class_b_dev_t;
 	struct cdev* avb_class_b_cdev;
 	struct class* avb_class_b_class;
+	struct delayed_work ipv6_addr_assign_wq;
+	bool print_kpi;
+};
 
+struct ip_params {
+	UCHAR mac_addr[DWC_ETH_QOS_MAC_ADDR_LEN];
+	bool is_valid_mac_addr;
+	char link_speed[32];
+	bool is_valid_link_speed;
+	char ipv4_addr_str[32];
+	struct in_addr ipv4_addr;
+	bool is_valid_ipv4_addr;
+	char ipv6_addr_str[48];
+	struct in6_ifreq ipv6_addr;
+	bool is_valid_ipv6_addr;
 };
 
 typedef enum {
@@ -2003,6 +2042,8 @@
 #define EMAC_PHY_RESET "dev-emac-phy_reset_state"
 #define EMAC_PHY_INTR "dev-emac-phy_intr"
 #define EMAC_PIN_PPS0 "dev-emac_pin_pps_0"
+#define EMAC_RGMII_RXC_SUSPEND "dev-emac-rgmii_rxc_suspend_state"
+#define EMAC_RGMII_RXC_RESUME "dev-emac-rgmii_rxc_resume_state"
 
 #ifdef PER_CH_INT
 void DWC_ETH_QOS_handle_DMA_Int(struct DWC_ETH_QOS_prv_data *pdata, int chinx, bool);
@@ -2016,6 +2057,8 @@
 
 void DWC_ETH_QOS_dma_desc_stats_read(struct DWC_ETH_QOS_prv_data *pdata);
 void DWC_ETH_QOS_dma_desc_stats_init(struct DWC_ETH_QOS_prv_data *pdata);
+int DWC_ETH_QOS_add_ipaddr(struct DWC_ETH_QOS_prv_data *);
+int DWC_ETH_QOS_add_ipv6addr(struct DWC_ETH_QOS_prv_data *);
 
 /* For debug prints*/
 #define DRV_NAME "qcom-emac-dwc-eqos"
@@ -2054,7 +2097,14 @@
 #define EMACINFO(fmt, args...) \
 	pr_info(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
 #define EMACERR(fmt, args...) \
-	pr_err(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+do {\
+	pr_err(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\
+	if (ipc_emac_log_ctxt) { \
+		ipc_log_string(ipc_emac_log_ctxt, \
+		"%s: %s[%u]:[emac] ERROR:" fmt, __FILENAME__ , \
+		__func__, __LINE__, ## args); \
+	} \
+}while(0)
 
 #ifdef YDEBUG
 #define DBGPR(x...) printk(KERN_ALERT x)
diff --git a/drivers/emac-dwc-eqos/Kconfig b/drivers/emac-dwc-eqos/Kconfig
new file mode 100644
index 0000000..b3e1c06
--- /dev/null
+++ b/drivers/emac-dwc-eqos/Kconfig
@@ -0,0 +1,11 @@
+#
+# Synopsis EMAC device configuration
+#
+
+config EMAC_DWC_EQOS
+	tristate "Qualcomm Technologies Inc. EMAC support"
+	depends on (ARM || ARM64)
+	default y
+	help
+	  This driver supports the Synopsis EMAC Gigabit
+	  Ethernet controller.
diff --git a/drivers/emac-dwc-eqos/Kconfig_app b/drivers/emac-dwc-eqos/Kconfig_app
new file mode 100644
index 0000000..1f4d498
--- /dev/null
+++ b/drivers/emac-dwc-eqos/Kconfig_app
@@ -0,0 +1,6 @@
+config EMAC_APP
+       bool "EMAC app support"
+       depends on EMAC_DWC_EQOS
+       default y
+       help
+         Say y here if you want to run Neutrino ethernet open function
\ No newline at end of file
diff --git a/drivers/emac-dwc-eqos/Makefile.builtin b/drivers/emac-dwc-eqos/Makefile.builtin
new file mode 100644
index 0000000..f6a5573
--- /dev/null
+++ b/drivers/emac-dwc-eqos/Makefile.builtin
@@ -0,0 +1,21 @@
+obj-$(CONFIG_EMAC_DWC_EQOS) += DWC_ETH_QOS_dev.o \
+		DWC_ETH_QOS_drv.o \
+		DWC_ETH_QOS_desc.o \
+		DWC_ETH_QOS_ethtool.o \
+		DWC_ETH_QOS_mdio.o \
+		DWC_ETH_QOS_eee.o \
+		DWC_ETH_QOS_platform.o \
+		DWC_ETH_QOS_rgmii_io_macro.o \
+		DWC_ETH_QOS_poll_support.o
+
+ifeq ($(CONFIG_PTP_1588_CLOCK), y)
+EXTRA_CFLAGS+=-DCONFIG_PTPSUPPORT_OBJ
+obj-$(CONFIG_EMAC_DWC_EQOS) += DWC_ETH_QOS_ptp.o
+endif
+
+ifeq ($(CONFIG_IPA_OFFLOAD), y)
+KBUILD_CFLAGS += -DDWC_ETH_QOS_ENABLE_IPA
+obj-$(CONFIG_EMAC_DWC_EQOS) += DWC_ETH_QOS_ipa.o
+endif
+
+KBUILD_CFLAGS += -DDWC_ETH_QOS_BUILTIN
diff --git a/drivers/emac-dwc-eqos/Makefile_app.builtin b/drivers/emac-dwc-eqos/Makefile_app.builtin
new file mode 100644
index 0000000..6b3a7a4
--- /dev/null
+++ b/drivers/emac-dwc-eqos/Makefile_app.builtin
@@ -0,0 +1,4 @@
+#

+# Makefile for the DWC_ETH_QOS app kernel module

+#

+obj-$(CONFIG_EMAC_APP) = DWC_ETH_QOS_app.o

diff --git a/drivers/rmnet/perf/Android.mk b/drivers/rmnet/perf/Android.mk
index 5c7802a..e47fb97 100644
--- a/drivers/rmnet/perf/Android.mk
+++ b/drivers/rmnet/perf/Android.mk
@@ -3,6 +3,7 @@
 
 RMNET_PERF_DLKM_PLATFORMS_LIST := msmnile
 RMNET_PERF_DLKM_PLATFORMS_LIST += kona
+RMNET_PERF_DLKM_PLATFORMS_LIST += lito
 
 ifeq ($(call is-board-platform-in-list, $(RMNET_PERF_DLKM_PLATFORMS_LIST)),true)
 LOCAL_PATH := $(call my-dir)
@@ -27,7 +28,6 @@
 DLKM_DIR := ./device/qcom/common/dlkm
 
 KBUILD_OPTIONS := $(RMNET_PERF_BLD_DIR)
-LOCAL_MODULE_TAGS := debug
 
 $(warning $(DLKM_DIR))
 include $(DLKM_DIR)/AndroidKernelModule.mk
diff --git a/drivers/rmnet/perf/rmnet_perf_config.c b/drivers/rmnet/perf/rmnet_perf_config.c
index 9a0b081..8a5f50e 100644
--- a/drivers/rmnet/perf/rmnet_perf_config.c
+++ b/drivers/rmnet/perf/rmnet_perf_config.c
@@ -22,6 +22,7 @@
 #include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h>
 #include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h>
 #include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h>
+#include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h>
 
 MODULE_LICENSE("GPL v2");
 
@@ -54,18 +55,30 @@
 {
 	int i;
 	struct sk_buff *skbn;
-	struct rmnet_perf_core_64k_buff_pool *pool = perf->core_meta->buff_pool;
 	enum rmnet_perf_resource_management_e return_val;
+	struct rmnet_perf_core_64k_buff_pool *pool = perf->core_meta->buff_pool;
 
 	return_val = RMNET_PERF_RESOURCE_MGMT_SUCCESS;
-
+	memset(pool, 0, sizeof(struct rmnet_perf_core_64k_buff_pool));
+	pool->index = 0;
 	for (i = 0; i < RMNET_PERF_NUM_64K_BUFFS; i++) {
 		skbn = alloc_skb(RMNET_PERF_CORE_RECYCLE_SKB_SIZE, GFP_ATOMIC);
-		if (!skbn)
+		if (!skbn) {
+			int j;
+
 			return_val = RMNET_PERF_RESOURCE_MGMT_FAIL;
+			/* If one skb fails to allocate, dont use feature */
+			for (j = i - 1; j >= 0; j--) {
+				if (pool->available[j]) {
+					kfree_skb(pool->available[j]);
+					pool->available[j] = NULL;
+				}
+			}
+			return return_val;
+		}
 		pool->available[i] = skbn;
 	}
-	pool->index = 0;
+
 	return return_val;
 }
 
@@ -88,11 +101,14 @@
 	/* Free both busy and available because if its truly busy,
 	 * we will simply decrement the users count... This means NW stack
 	 * will still have opportunity to process the packet as it wishes
-	 * and will naturally free the sk_buff when it is done
+	 * and will naturally free the sk_buff when it is done. Available[0]
+	 * being not null means that all indexes of available are filled by
+	 * SKBs from module initialization
 	 */
-
-	for (i = 0; i < RMNET_PERF_NUM_64K_BUFFS; i++)
-		kfree_skb(buff_pool->available[i]);
+	if (buff_pool->available[0]) {
+		for (i = 0; i < RMNET_PERF_NUM_64K_BUFFS; i++)
+			kfree_skb(buff_pool->available[i]);
+	}
 }
 
 /* rmnet_perf_config_free_resources() - on rmnet teardown free all the
@@ -115,16 +131,18 @@
 	}
 
 	/* Free everything flow nodes currently hold */
-	rmnet_perf_opt_flush_all_flow_nodes(perf);
+	rmnet_perf_opt_flush_all_flow_nodes();
 
 	/* Get rid of 64k sk_buff cache */
 	rmnet_perf_config_free_64k_buffs(perf);
 	/* Before we free tcp_opt's structures, make sure we arent holding
 	 * any SKB's hostage
 	 */
-	rmnet_perf_core_free_held_skbs(perf);
+	rmnet_perf_core_free_held_skbs();
 
-	//rmnet_perf_core_timer_exit(perf->core_meta);
+	/* Clean up any remaining nodes in the flow table before freeing */
+	rmnet_perf_free_hash_table();
+
 	/* Since we allocated in one chunk, we will also free in one chunk */
 	kfree(perf);
 
@@ -167,10 +185,10 @@
 
 	/* allocate all the memory in one chunk for cache coherency sake */
 	buffer_head = kmalloc(total_size, GFP_KERNEL);
+	*perf = buffer_head;
 	if (!buffer_head)
 		return RMNET_PERF_RESOURCE_MGMT_FAIL;
 
-	*perf = buffer_head;
 	local_perf = *perf;
 	buffer_head += perf_size;
 
@@ -192,11 +210,11 @@
 		*flow_node = buffer_head;
 		buffer_head += flow_node_size;
 		(*flow_node)->num_pkts_held = 0;
+		(*flow_node)->len = 0;
 	}
 
 	local_perf->core_meta = buffer_head;
 	core_meta = local_perf->core_meta;
-	//rmnet_perf_core_timer_init(core_meta);
 	buffer_head += core_meta_size;
 
 	/* Assign common (not specific to something like opt) structures */
@@ -213,6 +231,7 @@
 	core_meta->bm_state->curr_seq = 0;
 	core_meta->bm_state->expect_packets = 0;
 	core_meta->bm_state->wait_for_start = true;
+	core_meta->bm_state->callbacks_valid = false;
 	buffer_head += bm_state_size;
 
 	return RMNET_PERF_RESOURCE_MGMT_SUCCESS;
@@ -230,16 +249,25 @@
 	perf->core_meta->dev = dev;
 	/* register for DL marker */
 	dl_ind = kzalloc(sizeof(struct rmnet_map_dl_ind), GFP_ATOMIC);
+	perf->core_meta->dl_ind = dl_ind;
 	if (dl_ind) {
 		dl_ind->priority = RMNET_PERF;
-		dl_ind->dl_hdr_handler =
-			&rmnet_perf_core_handle_map_control_start;
-		dl_ind->dl_trl_handler =
-			&rmnet_perf_core_handle_map_control_end;
-		perf->core_meta->dl_ind = dl_ind;
+		if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2) {
+			dl_ind->dl_hdr_handler_v2 =
+				&rmnet_perf_core_handle_map_control_start_v2;
+			dl_ind->dl_trl_handler_v2 =
+				&rmnet_perf_core_handle_map_control_end_v2;
+		} else {
+			dl_ind->dl_hdr_handler =
+				&rmnet_perf_core_handle_map_control_start;
+			dl_ind->dl_trl_handler =
+				&rmnet_perf_core_handle_map_control_end;
+		}
+
 		if (rmnet_map_dl_ind_register(port, dl_ind)) {
 			kfree(dl_ind);
 			pr_err("%s(): Failed to register dl_ind\n", __func__);
+			perf->core_meta->dl_ind = NULL;
 			rc = RMNET_PERF_RESOURCE_MGMT_FAIL;
 		}
 	} else {
@@ -249,13 +277,14 @@
 
 	/* register for PS mode indications */
 	ps_ind = kzalloc(sizeof(struct qmi_rmnet_ps_ind), GFP_ATOMIC);
+	perf->core_meta->ps_ind = ps_ind;
 	if (ps_ind) {
 		ps_ind->ps_on_handler = &rmnet_perf_core_ps_on;
 		ps_ind->ps_off_handler = &rmnet_perf_core_ps_off;
-		perf->core_meta->ps_ind = ps_ind;
 		if (qmi_rmnet_ps_ind_register(port, ps_ind)) {
 			kfree(ps_ind);
 			rc = RMNET_PERF_RESOURCE_MGMT_FAIL;
+			perf->core_meta->ps_ind = NULL;
 			pr_err("%s(): Failed to register ps_ind\n", __func__);
 		}
 	} else {
@@ -263,6 +292,9 @@
 		pr_err("%s(): Failed to allocate ps_ind\n", __func__);
 	}
 
+	if (rc == RMNET_PERF_RESOURCE_MGMT_SUCCESS)
+		perf->core_meta->bm_state->callbacks_valid = true;
+
 	return rc;
 }
 
@@ -287,9 +319,11 @@
 	 */
 	rc = rmnet_perf_config_alloc_64k_buffs(perf);
 	if (rc == RMNET_PERF_RESOURCE_MGMT_FAIL) {
+		/* Since recycling buffers isnt a feature we use, refrain
+		 * from returning with a return failure status
+		 */
 		pr_err("%s(): Failed to allocate 64k buffers for recycling\n",
 		       __func__);
-		return RMNET_PERF_RESOURCE_MGMT_SEMI_FAIL;
 	}
 
 	rc = rmnet_perf_config_register_callbacks(real_dev, port);
@@ -337,6 +371,18 @@
 	return return_val_final;
 }
 
+static bool rmnet_perf_config_hook_registered(void)
+{
+	int (*deag_entry)(struct sk_buff *skb);
+	void (*frag_entry)(struct rmnet_frag_descriptor *frag_desc,
+			   struct rmnet_port *port);
+
+	deag_entry = rcu_dereference(rmnet_perf_deag_entry);
+	frag_entry = rcu_dereference(rmnet_perf_desc_entry);
+
+	return deag_entry || frag_entry;
+}
+
 /* TODO Needs modifying*/
 static int rmnet_perf_config_notify_cb(struct notifier_block *nb,
 				       unsigned long event, void *data)
@@ -352,32 +398,36 @@
 	switch (event) {
 	case NETDEV_UNREGISTER:
 		if (rmnet_is_real_dev_registered(dev) &&
-		    rmnet_perf_deag_entry &&
-		    !strncmp(dev->name, "rmnet_ipa0", 10)) {
+		    rmnet_perf_config_hook_registered() &&
+		    (!strncmp(dev->name, "rmnet_ipa0", 10) ||
+		     !strncmp(dev->name, "rmnet_mhi0", 10))) {
 			struct rmnet_perf_core_meta *core_meta =
 				perf->core_meta;
-			pr_err("%s(): rmnet_perf netdevice unregister\n",
-			       __func__);
+			pr_info("%s(): rmnet_perf netdevice unregister\n",
+				__func__);
 			return_val = rmnet_perf_dereg_callbacks(dev, core_meta);
 			return_val |= rmnet_perf_netdev_down();
 			if (return_val)
 				pr_err("%s(): Error on netdev down event\n",
 				       __func__);
 			RCU_INIT_POINTER(rmnet_perf_deag_entry, NULL);
+			RCU_INIT_POINTER(rmnet_perf_desc_entry, NULL);
+			RCU_INIT_POINTER(rmnet_perf_chain_end, NULL);
 		}
 		break;
 	case NETDEV_REGISTER:
-		pr_err("%s(): rmnet_perf netdevice register, name = %s\n",
-		       __func__, dev->name);
+		pr_info("%s(): rmnet_perf netdevice register, name = %s\n",
+			__func__, dev->name);
 		/* Check prevents us from allocating resources for every
 		 * interface
 		 */
-		if (!rmnet_perf_deag_entry &&
+		if (!rmnet_perf_config_hook_registered() &&
 		    strncmp(dev->name, "rmnet_data", 10) == 0) {
 			struct rmnet_priv *priv = netdev_priv(dev);
+
 			port = rmnet_get_port(priv->real_dev);
-			return_val |= rmnet_perf_netdev_up(priv->real_dev,
-							   port);
+			return_val = rmnet_perf_netdev_up(priv->real_dev,
+							  port);
 			if (return_val == RMNET_PERF_RESOURCE_MGMT_FAIL) {
 				pr_err("%s(): rmnet_perf allocation "
 				       "failed. Falling back on legacy path\n",
@@ -385,15 +435,13 @@
 				goto exit;
 			} else if (return_val ==
 				   RMNET_PERF_RESOURCE_MGMT_SEMI_FAIL) {
-				pr_err("%s(): rmnet_perf recycle buffer "
-				       "allocation or callback registry "
+				pr_err("%s(): rmnet_perf callback registry "
 				       "failed. Continue without them\n",
 					__func__);
 			}
-			RCU_INIT_POINTER(rmnet_perf_deag_entry,
-					 rmnet_perf_core_deaggregate);
-			pr_err("%s(): rmnet_perf registered on "
-			       "name = %s\n", __func__, dev->name);
+			rmnet_perf_core_set_ingress_hook();
+			pr_info("%s(): rmnet_perf registered on name = %s\n",
+				__func__, dev->name);
 		}
 		break;
 	default:
@@ -405,17 +453,18 @@
 
 static struct notifier_block rmnet_perf_dev_notifier __read_mostly = {
 	.notifier_call = rmnet_perf_config_notify_cb,
+	.priority = 1,
 };
 
 int __init rmnet_perf_init(void)
 {
-	pr_err("%s(): initializing rmnet_perf\n", __func__);
+	pr_info("%s(): initializing rmnet_perf\n", __func__);
 	return register_netdevice_notifier(&rmnet_perf_dev_notifier);
 }
 
 void __exit rmnet_perf_exit(void)
 {
-	pr_err("%s(): exiting rmnet_perf\n", __func__);
+	pr_info("%s(): exiting rmnet_perf\n", __func__);
 	unregister_netdevice_notifier(&rmnet_perf_dev_notifier);
 }
 
diff --git a/drivers/rmnet/perf/rmnet_perf_config.h b/drivers/rmnet/perf/rmnet_perf_config.h
index a8bf12f..62672f0 100644
--- a/drivers/rmnet/perf/rmnet_perf_config.h
+++ b/drivers/rmnet/perf/rmnet_perf_config.h
@@ -15,6 +15,7 @@
 
 #include <linux/skbuff.h>
 #include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h>
+#include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.h>
 #include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h>
 #include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h>
 #include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_trace.h>
@@ -32,25 +33,11 @@
 };
 
 /* rmnet based variables that we rely on*/
-extern void rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port);
-extern struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port,
-						 u8 mux_id);
-extern int rmnet_is_real_dev_registered(const struct net_device *real_dev);
-extern void rmnet_set_skb_proto(struct sk_buff *skb);
 extern int (*rmnet_perf_deag_entry)(struct sk_buff *skb);
-extern int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
-extern struct napi_struct *get_current_napi_context(void);
-//extern int napi_gro_complete(struct sk_buff *skb);
+extern void (*rmnet_perf_desc_entry)(struct rmnet_frag_descriptor *frag_desc,
+				     struct rmnet_port *port);
+extern void (*rmnet_perf_chain_end)(void);
 
-extern int rmnet_map_flow_command(struct sk_buff *skb, struct rmnet_port *port,
-				bool rmnet_perf);
-extern int rmnet_map_dl_ind_register(struct rmnet_port *port,
-			      struct rmnet_map_dl_ind *dl_ind);
-extern int rmnet_map_dl_ind_deregister(struct rmnet_port *port,
-				struct rmnet_map_dl_ind *dl_ind);
-extern struct rmnet_port *rmnet_get_port(struct net_device *real_dev);
-extern void rmnet_map_cmd_init(struct rmnet_port *port);
-extern void rmnet_map_cmd_exit(struct rmnet_port *port);
 
 /* Function declarations */
 struct rmnet_perf *rmnet_perf_config_get_perf(void);
diff --git a/drivers/rmnet/perf/rmnet_perf_core.c b/drivers/rmnet/perf/rmnet_perf_core.c
index 703bd87..4166c5d 100644
--- a/drivers/rmnet/perf/rmnet_perf_core.c
+++ b/drivers/rmnet/perf/rmnet_perf_core.c
@@ -18,6 +18,7 @@
 #include <linux/jhash.h>
 #include <linux/module.h>
 #include <linux/skbuff.h>
+#include <linux/spinlock.h>
 #include <net/ip6_checksum.h>
 #include <net/tcp.h>
 #include <net/udp.h>
@@ -56,6 +57,12 @@
 module_param(rmnet_perf_core_bm_flush_on, ulong, 0644);
 MODULE_PARM_DESC(rmnet_perf_core_bm_flush_on, "turn on bm flushing");
 
+/* Number of non-ip packets coming into rmnet_perf */
+unsigned long int rmnet_perf_core_non_ip_count;
+module_param(rmnet_perf_core_non_ip_count, ulong, 0444);
+MODULE_PARM_DESC(rmnet_perf_core_non_ip_count,
+		 "Number of non-ip packets entering rmnet_perf");
+
 /* Number of ip packets coming into rmnet from physical device */
 unsigned long int rmnet_perf_core_pre_ip_count;
 module_param(rmnet_perf_core_pre_ip_count, ulong, 0644);
@@ -103,11 +110,67 @@
 MODULE_PARM_DESC(rmnet_perf_frag_flush,
 		 "Number of packet fragments flushed to stack");
 
-#define SHS_FLUSH 0
+unsigned long int rmnet_perf_qmap_size_mismatch = 0;
+module_param(rmnet_perf_qmap_size_mismatch, ulong, 0444);
+MODULE_PARM_DESC(rmnet_perf_qmap_size_mismatch,
+		 "Number of mismatches b/w QMAP and IP lengths");
+
+/* Handle deag by default for legacy behavior */
+static bool rmnet_perf_ingress_deag = true;
+module_param(rmnet_perf_ingress_deag, bool, 0444);
+MODULE_PARM_DESC(rmnet_perf_ingress_deag,
+		 "If true, rmnet_perf will handle QMAP deaggregation");
+
+#define SHS_FLUSH				0
+#define RECYCLE_BUFF_SIZE_THRESH		51200
+
+/* Lock around flow nodes for syncornization with rmnet_perf_opt_mode changes */
+static DEFINE_SPINLOCK(rmnet_perf_core_lock);
+
+void rmnet_perf_core_grab_lock(void)
+{
+	spin_lock_bh(&rmnet_perf_core_lock);
+}
+
+void rmnet_perf_core_release_lock(void)
+{
+	spin_unlock_bh(&rmnet_perf_core_lock);
+}
+
+/* rmnet_perf_core_set_ingress_hook() - sets appropriate ingress hook
+ *		in the core rmnet driver
+ *
+ * Return:
+ *		- void
+ **/
+void rmnet_perf_core_set_ingress_hook(void)
+{
+	if (rmnet_perf_core_is_deag_mode()) {
+		RCU_INIT_POINTER(rmnet_perf_deag_entry,
+				 rmnet_perf_core_deaggregate);
+		RCU_INIT_POINTER(rmnet_perf_desc_entry, NULL);
+	} else {
+		RCU_INIT_POINTER(rmnet_perf_deag_entry, NULL);
+		RCU_INIT_POINTER(rmnet_perf_desc_entry,
+				 rmnet_perf_core_desc_entry);
+		RCU_INIT_POINTER(rmnet_perf_chain_end,
+				 rmnet_perf_opt_chain_end);
+	}
+}
+
+/* rmnet_perf_core_is_deag_mode() - get the ingress mode of the module
+ *
+ * Return:
+ * 		- true: rmnet_perf is handling deaggregation
+ *		- false: rmnet_perf is not handling deaggregation
+ **/
+inline bool rmnet_perf_core_is_deag_mode(void)
+{
+	return rmnet_perf_ingress_deag;
+}
 
 /* rmnet_perf_core_free_held_skbs() - Free held SKBs given to us by physical
  *		device
- * @perf: allows access to our required global structures
  *
  * Requires caller does any cleanup of protocol specific data structures
  * i.e. for tcp_opt the flow nodes must first be flushed so that we are
@@ -116,8 +179,9 @@
  * Return:
  *		- void
  **/
-void rmnet_perf_core_free_held_skbs(struct rmnet_perf *perf)
+void rmnet_perf_core_free_held_skbs(void)
 {
+	struct rmnet_perf *perf = rmnet_perf_config_get_perf();
 	struct rmnet_perf_core_skb_list *skb_list;
 
 	skb_list = perf->core_meta->skb_needs_free_list;
@@ -164,7 +228,6 @@
 }
 
 /* rmnet_perf_core_elligible_for_cache_skb() - Find elligible recycled skb
- * @perf: allows access to our recycled buffer cache
  * @len: the outgoing packet length we plan to send out
  *
  * Traverse the buffer cache to see if we have any free buffers not
@@ -176,17 +239,18 @@
  *		- NULL: if the length is not elligible or if all buffers
  *				are busy in NW stack
  **/
-struct sk_buff *rmnet_perf_core_elligible_for_cache_skb(struct rmnet_perf *perf,
-							u32 len)
+struct sk_buff *rmnet_perf_core_elligible_for_cache_skb(u32 len)
 {
+	struct rmnet_perf *perf = rmnet_perf_config_get_perf();
 	struct rmnet_perf_core_64k_buff_pool *buff_pool;
 	u8 circ_index, iterations;
 	struct sk_buff *skbn;
 	int user_count;
 
-	if (len < 51200)
-		return NULL;
 	buff_pool = perf->core_meta->buff_pool;
+	if (len < RECYCLE_BUFF_SIZE_THRESH || !buff_pool->available[0])
+		return NULL;
+
 	circ_index = buff_pool->index;
 	iterations = 0;
 	while (iterations < RMNET_PERF_NUM_64K_BUFFS) {
@@ -217,49 +281,38 @@
  *		5 tuple
  * @pkt_info: characteristics of the current packet
  *
- * TODO: expand to 5 tuple once this becomes generic (right now we
- * ignore protocol because we know that we have TCP only for tcp_opt)
- *
  * Return:
  *    - hash_key: unsigned 32 bit integer that is produced
  **/
 u32 rmnet_perf_core_compute_flow_hash(struct rmnet_perf_pkt_info *pkt_info)
 {
 	u32 hash_key;
-	struct tcphdr *tp;
-	struct udphdr *uhdr;
+	struct udphdr *up;
 	u32 hash_five_tuple[11];
+	__be16 src = 0, dest = 0;
+
+	if (pkt_info->trans_proto == IPPROTO_TCP ||
+	    pkt_info->trans_proto == IPPROTO_UDP) {
+		up = pkt_info->trans_hdr.up;
+		src = up->source;
+		dest = up->dest;
+	}
 
 	if (pkt_info->ip_proto == 0x04) {
-		struct iphdr *ip4h = pkt_info->iphdr.v4hdr;
+		struct iphdr *ip4h = pkt_info->ip_hdr.v4hdr;
 
 		hash_five_tuple[0] = ip4h->daddr;
 		hash_five_tuple[1] = ip4h->saddr;
 		hash_five_tuple[2] = ip4h->protocol;
-		switch (pkt_info->trans_proto) {
-		case (IPPROTO_TCP):
-			tp = pkt_info->trns_hdr.tp;
-			hash_five_tuple[3] = tp->dest;
-			hash_five_tuple[4] = tp->source;
-			break;
-		case (IPPROTO_UDP):
-			uhdr = pkt_info->trns_hdr.up;
-			hash_five_tuple[3] = uhdr->dest;
-			hash_five_tuple[4] = uhdr->source;
-			break;
-		default:
-			hash_five_tuple[3] = 0;
-			hash_five_tuple[4] = 0;
-			break;
-		}
+		hash_five_tuple[3] = dest;
+		hash_five_tuple[4] = src;
 		hash_key = jhash2(hash_five_tuple, 5, 0);
 	} else {
-		struct ipv6hdr *ip6h = (struct ipv6hdr *) pkt_info->iphdr.v6hdr;
+		struct ipv6hdr *ip6h = pkt_info->ip_hdr.v6hdr;
+		struct in6_addr daddr = ip6h->daddr;
+		struct in6_addr saddr = ip6h->saddr;
 
-		struct	in6_addr daddr = ip6h->daddr;
-		struct	in6_addr saddr = ip6h->saddr;
-
-		hash_five_tuple[0] =  ((u32 *) &daddr)[0];
+		hash_five_tuple[0] = ((u32 *) &daddr)[0];
 		hash_five_tuple[1] = ((u32 *) &daddr)[1];
 		hash_five_tuple[2] = ((u32 *) &daddr)[2];
 		hash_five_tuple[3] = ((u32 *) &daddr)[3];
@@ -268,29 +321,15 @@
 		hash_five_tuple[6] = ((u32 *) &saddr)[2];
 		hash_five_tuple[7] = ((u32 *) &saddr)[3];
 		hash_five_tuple[8] = ip6h->nexthdr;
-		switch (pkt_info->trans_proto) {
-		case (IPPROTO_TCP):
-			tp = pkt_info->trns_hdr.tp;
-			hash_five_tuple[9] = tp->dest;
-			hash_five_tuple[10] = tp->source;
-			break;
-		case (IPPROTO_UDP):
-			uhdr = pkt_info->trns_hdr.up;
-			hash_five_tuple[9] = uhdr->dest;
-			hash_five_tuple[10] = uhdr->source;
-			break;
-		default:
-			hash_five_tuple[9] = 0;
-			hash_five_tuple[10] = 0;
-			break;
-		}
+		hash_five_tuple[9] = dest;
+		hash_five_tuple[10] = src;
 		hash_key = jhash2(hash_five_tuple, 11, 0);
 	}
+
 	return hash_key;
 }
 
 /* rmnet_perf_core_accept_new_skb() - Add SKB to list to be freed later
- * @perf: allows access to our required global structures
  * @skb: the incoming aggregated MAP frame from PND
  *
  * Adds to a running list of SKBs which we will free at a later
@@ -301,9 +340,9 @@
  * Return:
  *		- void
  **/
-static void rmnet_perf_core_accept_new_skb(struct rmnet_perf *perf,
-					   struct sk_buff *skb)
+void rmnet_perf_core_accept_new_skb(struct sk_buff *skb)
 {
+	struct rmnet_perf *perf = rmnet_perf_config_get_perf();
 	struct rmnet_perf_core_skb_list *skb_needs_free_list;
 
 	skb_needs_free_list = perf->core_meta->skb_needs_free_list;
@@ -343,61 +382,38 @@
 		rmnet_perf_core_pkt_size[RMNET_PERF_CORE_0_PLUS]++;
 }
 
-/* rmnet_perf_core_send_skb() - Send (potentially) tcp_opt'd SKB to NW stack
+/* rmnet_perf_core_send_skb() - Send SKB to the network stack
  * @skb: packet to send
  * @ep: VND to send packet to
- * @perf: allows access to our required global structures
- *
- * Take newly formed linear SKB from tcp_opt and flush it up the stack
- * Also works with a non-tcp_opt'd packet, i.e. regular UDP packet
  *
  * Return:
  *    - void
  **/
-void rmnet_perf_core_send_skb(struct sk_buff *skb, struct rmnet_endpoint *ep,
-			      struct rmnet_perf *perf, struct rmnet_perf_pkt_info *pkt_info)
+void rmnet_perf_core_send_skb(struct sk_buff *skb, struct rmnet_endpoint *ep)
 {
-	unsigned char ip_version;
-	unsigned char *data;
-	struct iphdr *ip4hn;
-	struct ipv6hdr *ip6hn;
+	struct rmnet_perf *perf = rmnet_perf_config_get_perf();
 
+	/* Log our outgoing size */
 	rmnet_perf_core_packet_sz_stats(skb->len);
-	data = (unsigned char *)(skb->data);
+
 	if (perf->rmnet_port->data_format & 8)
 		skb->dev = ep->egress_dev;
-	ip_version = (*data & 0xF0) >> 4;
-	if (ip_version == 0x04) {
-		ip4hn = (struct iphdr *) data;
-		rmnet_set_skb_proto(skb);
-		/* If the checksum is unnecessary, update the header fields.
-		 * Otherwise, we know that this is a single packet that
-		 * either failed checksum validation, or is not coalescable
-		 * (fragment, ICMP, etc), so don't touch the headers.
-		 */
-		if (skb_csum_unnecessary(skb)) {
-			ip4hn->tot_len = htons(skb->len);
-			ip4hn->check = 0;
-			ip4hn->check = ip_fast_csum(ip4hn, (int)ip4hn->ihl);
-		}
-		rmnet_deliver_skb(skb, perf->rmnet_port);
-	} else if (ip_version == 0x06) {
-		ip6hn = (struct ipv6hdr *)data;
-		rmnet_set_skb_proto(skb);
-		if (skb_csum_unnecessary(skb)) {
-			ip6hn->payload_len = htons(skb->len -
-						   sizeof(struct ipv6hdr));
-		}
-		rmnet_deliver_skb(skb, perf->rmnet_port);
-	} else {
-		pr_err("%s(): attempted to send invalid ip packet up stack\n",
-		       __func__);
-	}
+
+	rmnet_set_skb_proto(skb);
+	rmnet_deliver_skb(skb, perf->rmnet_port);
+}
+
+void rmnet_perf_core_send_desc(struct rmnet_frag_descriptor *frag_desc)
+{
+	struct rmnet_perf *perf = rmnet_perf_config_get_perf();
+
+	/* Log our outgoing size */
+	rmnet_perf_core_packet_sz_stats(0);
+
+	rmnet_frag_deliver(frag_desc, perf->rmnet_port);
 }
 
 /* rmnet_perf_core_flush_curr_pkt() - Send a single ip packet up the stack
- * @perf: allows access to our required global structures
- * @skb: packet to send
  * @pkt_info: characteristics of the current packet
  * @packet_len: length of the packet we need to allocate for
  *
@@ -407,43 +423,50 @@
  * Return:
  *    - void
  **/
-void rmnet_perf_core_flush_curr_pkt(struct rmnet_perf *perf,
-				    struct sk_buff *skb,
-				    struct rmnet_perf_pkt_info *pkt_info,
+void rmnet_perf_core_flush_curr_pkt(struct rmnet_perf_pkt_info *pkt_info,
 				    u16 packet_len, bool flush_shs,
 				    bool skip_hash)
 {
-	struct sk_buff *skbn;
-	struct rmnet_endpoint *ep = pkt_info->ep;
-
 	if (packet_len > 65536) {
 		pr_err("%s(): Packet too long", __func__);
 		return;
 	}
 
-	/* allocate the sk_buff of proper size for this packet */
-	skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING,
-			 GFP_ATOMIC);
-	if (!skbn)
-		return;
+	if (!rmnet_perf_core_is_deag_mode()) {
+		struct rmnet_frag_descriptor *frag_desc = pkt_info->frag_desc;
 
-	skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
-	skb_put(skbn, packet_len);
-	memcpy(skbn->data, pkt_info->iphdr.v4hdr, packet_len);
+		/* Only set hash info if we actually calculated it */
+		if (!skip_hash)
+			frag_desc->hash = pkt_info->hash_key;
 
-	/* If the packet passed checksum validation, tell the stack */
-	if (pkt_info->csum_valid)
-		skbn->ip_summed = CHECKSUM_UNNECESSARY;
-	skbn->dev = skb->dev;
+		frag_desc->flush_shs = flush_shs;
+		rmnet_perf_core_send_desc(frag_desc);
+	} else {
+		struct sk_buff *skb;
 
-	/* Only set hash info if we actually calculated it */
-	if (!skip_hash) {
-		skbn->hash = pkt_info->hash_key;
-		skbn->sw_hash = 1;
+		skb = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING,
+				GFP_ATOMIC);
+		if (!skb)
+			return;
+
+		skb_reserve(skb, RMNET_MAP_DEAGGR_HEADROOM);
+		skb_put_data(skb, pkt_info->ip_hdr.v4hdr, packet_len);
+
+		/* If the packet passed checksum validation, tell the stack */
+		if (pkt_info->csum_valid)
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+		skb->dev = pkt_info->skb->dev;
+
+		/* Only set hash information if we actually calculated it */
+		if (!skip_hash) {
+			skb->hash = pkt_info->hash_key;
+			skb->sw_hash = 1;
+		}
+
+		skb->cb[SHS_FLUSH] = flush_shs;
+		rmnet_perf_core_send_skb(skb, pkt_info->ep);
 	}
-
-	skbn->cb[SHS_FLUSH] = (char) flush_shs;
-	rmnet_perf_core_send_skb(skbn, ep, perf, pkt_info);
 }
 
 /* DL marker is off, we need to flush more aggresively at end of chains */
@@ -463,6 +486,13 @@
 }
 
 void
+rmnet_perf_core_handle_map_control_start_v2(struct rmnet_map_dl_ind_hdr *dlhdr,
+				struct rmnet_map_control_command_header *qcmd)
+{
+	rmnet_perf_core_handle_map_control_start(dlhdr);
+}
+
+void
 rmnet_perf_core_handle_map_control_start(struct rmnet_map_dl_ind_hdr *dlhdr)
 {
 	struct rmnet_perf *perf = rmnet_perf_config_get_perf();
@@ -474,7 +504,7 @@
 	 */
 	if (!bm_state->wait_for_start) {
 		/* flush everything, we got a 2nd start */
-		rmnet_perf_opt_flush_all_flow_nodes(perf);
+		rmnet_perf_opt_flush_all_flow_nodes();
 		rmnet_perf_core_flush_reason_cnt[
 					RMNET_PERF_CORE_DL_MARKER_FLUSHES]++;
 	} else {
@@ -484,8 +514,14 @@
 	bm_state->curr_seq = dlhdr->le.seq;
 	bm_state->expect_packets = dlhdr->le.pkts;
 	trace_rmnet_perf_low(RMNET_PERF_MODULE, RMNET_PERF_START_DL_MRK,
-						bm_state->expect_packets, 0xDEF, 0xDEF, 0xDEF, NULL,
-						NULL);
+			     bm_state->expect_packets, 0xDEF, 0xDEF, 0xDEF,
+			     NULL, NULL);
+}
+
+void rmnet_perf_core_handle_map_control_end_v2(struct rmnet_map_dl_ind_trl *dltrl,
+				struct rmnet_map_control_command_header *qcmd)
+{
+	rmnet_perf_core_handle_map_control_end(dltrl);
 }
 
 void rmnet_perf_core_handle_map_control_end(struct rmnet_map_dl_ind_trl *dltrl)
@@ -494,20 +530,21 @@
 	struct rmnet_perf_core_burst_marker_state *bm_state;
 
 	bm_state = perf->core_meta->bm_state;
-	rmnet_perf_opt_flush_all_flow_nodes(perf);
+	rmnet_perf_opt_flush_all_flow_nodes();
 	rmnet_perf_core_flush_reason_cnt[RMNET_PERF_CORE_DL_MARKER_FLUSHES]++;
 	bm_state->wait_for_start = true;
 	bm_state->curr_seq = 0;
 	bm_state->expect_packets = 0;
-	trace_rmnet_perf_low(RMNET_PERF_MODULE, RMNET_PERF_END_DL_MRK, 0xDEF, 0xDEF,
-						0xDEF, 0xDEF, NULL, NULL);
+	trace_rmnet_perf_low(RMNET_PERF_MODULE, RMNET_PERF_END_DL_MRK, 0xDEF,
+			     0xDEF, 0xDEF, 0xDEF, NULL, NULL);
 }
 
 int rmnet_perf_core_validate_pkt_csum(struct sk_buff *skb,
 				      struct rmnet_perf_pkt_info *pkt_info)
 {
 	int result;
-	unsigned int pkt_len = pkt_info->header_len + pkt_info->payload_len;
+	unsigned int pkt_len = pkt_info->ip_len + pkt_info->trans_len +
+			       pkt_info->payload_len;
 
 	skb_pull(skb, sizeof(struct rmnet_map_header));
 	if (pkt_info->ip_proto == 0x04) {
@@ -526,98 +563,442 @@
 	return result;
 }
 
+/* rmnet_perf_core_dissect_pkt() - Extract packet header metadata for easier
+ * lookup later
+ * @payload: the data to analyze
+ * @offset: Offset from start of payload to the IP header
+ * @pkt_info: struct to fill in
+ * @pkt_len: length of the packet
+ * @skip_hash: set to false if rmnet_perf can calculate the hash, true otherwise
+ * @len_mismatch: set to true if there is a mismatch between the IP length and
+ * the QMAP length of the packet
+ *
+ * Return:
+ *		- true if packet needs to be dropped
+ *		- false if rmnet_perf can potentially optimize
+ **/
+bool rmnet_perf_core_dissect_pkt(unsigned char *payload,
+				 struct rmnet_perf_pkt_info *pkt_info,
+				 int offset, u16 pkt_len, bool *skip_hash,
+				 bool *len_mismatch)
+{
+	bool flush = true;
+	bool mismatch = false;
+	u16 ip_pkt_len = 0;
+
+	payload += offset;
+	pkt_info->ip_proto = (*payload & 0xF0) >> 4;
+	/* Set inital IP packet length based on descriptor size if this packet
+	 * has already been segmented for any reason, as the IP header will
+	 * no longer be correct */
+	if (!rmnet_perf_core_is_deag_mode() &&
+	    pkt_info->frag_desc->hdr_ptr !=
+	    rmnet_frag_data_ptr(pkt_info->frag_desc)) {
+		ip_pkt_len = skb_frag_size(&pkt_info->frag_desc->frag);
+		ip_pkt_len += pkt_info->frag_desc->ip_len;
+		ip_pkt_len += pkt_info->frag_desc->trans_len;
+	}
+
+	if (pkt_info->ip_proto == 4) {
+		struct iphdr *iph;
+
+		iph = (struct iphdr *)payload;
+		pkt_info->ip_hdr.v4hdr = iph;
+
+		/* Pass off frags immediately */
+		if (iph->frag_off & htons(IP_MF | IP_OFFSET)) {
+			rmnet_perf_frag_flush++;
+			goto done;
+		}
+
+		if (!ip_pkt_len)
+			ip_pkt_len = ntohs(iph->tot_len);
+
+		mismatch = pkt_len != ip_pkt_len;
+		pkt_info->ip_len = iph->ihl * 4;
+		pkt_info->trans_proto = iph->protocol;
+
+		if (!rmnet_perf_core_is_deag_mode()) {
+			pkt_info->frag_desc->hdrs_valid = 1;
+			pkt_info->frag_desc->ip_proto = 4;
+			pkt_info->frag_desc->ip_len = pkt_info->ip_len;
+			pkt_info->frag_desc->trans_proto =
+				pkt_info->trans_proto;
+		}
+	} else if (pkt_info->ip_proto == 6) {
+		struct ipv6hdr *ip6h;
+		int len;
+		__be16 frag_off;
+		u8 protocol;
+
+		ip6h = (struct ipv6hdr *)payload;
+		pkt_info->ip_hdr.v6hdr = ip6h;
+		protocol = ip6h->nexthdr;
+
+		/* Dive down the header chain */
+		if (!rmnet_perf_core_is_deag_mode())
+			len = rmnet_frag_ipv6_skip_exthdr(pkt_info->frag_desc,
+							  offset +
+							  sizeof(*ip6h),
+							  &protocol, &frag_off);
+		else
+			len = ipv6_skip_exthdr(pkt_info->skb,
+					       offset + sizeof(*ip6h),
+					       &protocol, &frag_off);
+		if (len < 0) {
+			/* Something somewhere has gone horribly wrong...
+			 * Let the stack deal with it.
+			 */
+			goto done;
+		}
+
+		/* Returned length will include the offset value */
+		len -= offset;
+
+		/* Pass off frags immediately */
+		if (frag_off) {
+			/* Add in frag header length for non-first frags.
+			 * ipv6_skip_exthdr() doesn't do that for you.
+			 */
+			if (protocol == NEXTHDR_FRAGMENT)
+				len += sizeof(struct frag_hdr);
+			pkt_info->ip_len = (u16)len;
+			rmnet_perf_frag_flush++;
+			goto done;
+		}
+
+		if (!ip_pkt_len)
+			ip_pkt_len = ntohs(ip6h->payload_len) + sizeof(*ip6h);
+
+		mismatch = pkt_len != ip_pkt_len;
+		pkt_info->ip_len = (u16)len;
+		pkt_info->trans_proto = protocol;
+
+		if (!rmnet_perf_core_is_deag_mode()) {
+			pkt_info->frag_desc->hdrs_valid = 1;
+			pkt_info->frag_desc->ip_proto = 6;
+			pkt_info->frag_desc->ip_len = pkt_info->ip_len;
+			pkt_info->frag_desc->trans_proto =
+				pkt_info->trans_proto;
+		}
+	} else {
+		/* Not a valid IP packet */
+		return true;
+	}
+
+	if (pkt_info->trans_proto == IPPROTO_TCP) {
+		struct tcphdr *tp;
+
+		tp = (struct tcphdr *)(payload + pkt_info->ip_len);
+		pkt_info->trans_len = tp->doff * 4;
+		pkt_info->trans_hdr.tp = tp;
+
+		if (!rmnet_perf_core_is_deag_mode())
+			pkt_info->frag_desc->trans_len = pkt_info->trans_len;
+	} else if (pkt_info->trans_proto == IPPROTO_UDP) {
+		struct udphdr *up;
+
+		up = (struct udphdr *)(payload + pkt_info->ip_len);
+		pkt_info->trans_len = sizeof(*up);
+		pkt_info->trans_hdr.up = up;
+
+		if (!rmnet_perf_core_is_deag_mode())
+			pkt_info->frag_desc->trans_len = pkt_info->trans_len;
+	} else {
+		/* Not a protocol we can optimize */
+		if (!rmnet_perf_core_is_deag_mode())
+			pkt_info->frag_desc->hdrs_valid = 0;
+
+		goto done;
+	}
+
+	flush = false;
+	pkt_info->hash_key = rmnet_perf_core_compute_flow_hash(pkt_info);
+
+done:
+	pkt_info->payload_len = pkt_len - pkt_info->ip_len -
+				pkt_info->trans_len;
+	*skip_hash = flush;
+	*len_mismatch = mismatch;
+	if (mismatch) {
+		rmnet_perf_qmap_size_mismatch++;
+		if (!rmnet_perf_core_is_deag_mode())
+			pkt_info->frag_desc->hdrs_valid = 0;
+	}
+
+	return false;
+}
+
+/* rmnet_perf_core_dissect_skb() - Extract packet header metadata for easier
+ * lookup later
+ * @skb: the skb to analyze
+ * @pkt_info: struct to fill in
+ * @offset: offset from start of skb data to the IP header
+ * @pkt_len: length of the packet
+ * @skip_hash: set to false if rmnet_perf can calculate the hash, true otherwise
+ * @len_mismatch: set to true if there is a mismatch between the IP length and
+ * the QMAP length of the packet
+ *
+ * Return:
+ *		- true if packet needs to be dropped
+ *		- false if rmnet_perf can potentially optimize
+ **/
+
+bool rmnet_perf_core_dissect_skb(struct sk_buff *skb,
+				 struct rmnet_perf_pkt_info *pkt_info,
+				 int offset, u16 pkt_len, bool *skip_hash,
+				 bool *len_mismatch)
+{
+	pkt_info->skb = skb;
+	return rmnet_perf_core_dissect_pkt(skb->data, pkt_info, offset,
+					   pkt_len, skip_hash, len_mismatch);
+}
+
+/* rmnet_perf_core_dissect_desc() - Extract packet header metadata for easier
+ * lookup later
+ * @frag_desc: the descriptor to analyze
+ * @pkt_info: struct to fill in
+ * @offset: offset from start of descriptor payload to the IP header
+ * @pkt_len: length of the packet
+ * @skip_hash: set to false if rmnet_perf can calculate the hash, true otherwise
+ * @len_mismatch: set tp true if there is a mismatch between the IP length and
+ * the QMAP length of the packet
+ *
+ * Return:
+ *		- true if packet needs to be flushed out immediately
+ *		- false if rmnet_perf can potentially optimize
+ **/
+
+bool rmnet_perf_core_dissect_desc(struct rmnet_frag_descriptor *frag_desc,
+				  struct rmnet_perf_pkt_info *pkt_info,
+				  int offset, u16 pkt_len, bool *skip_hash,
+				  bool *len_mismatch)
+{
+	u8 *payload = frag_desc->hdr_ptr;
+
+	/* If this was segmented, the headers aren't in the pkt_len. Add them
+	 * back for consistency.
+	 */
+	if (payload != rmnet_frag_data_ptr(frag_desc))
+		pkt_len += frag_desc->ip_len + frag_desc->trans_len;
+
+	pkt_info->frag_desc = frag_desc;
+	return rmnet_perf_core_dissect_pkt(payload, pkt_info, offset, pkt_len,
+					   skip_hash, len_mismatch);
+}
+
 void rmnet_perf_core_handle_packet_ingress(struct sk_buff *skb,
 					   struct rmnet_endpoint *ep,
 					   struct rmnet_perf_pkt_info *pkt_info,
 					   u32 frame_len, u32 trailer_len)
 {
-	unsigned char *payload = (unsigned char *)
-				 (skb->data + sizeof(struct rmnet_map_header));
-	struct rmnet_perf *perf = rmnet_perf_config_get_perf();
+	unsigned int offset = sizeof(struct rmnet_map_header);
 	u16 pkt_len;
 	bool skip_hash = false;
+	bool len_mismatch = false;
 
-	pkt_len = frame_len - sizeof(struct rmnet_map_header) - trailer_len;
+	pkt_len = frame_len - offset - trailer_len;
+	memset(pkt_info, 0, sizeof(*pkt_info));
 	pkt_info->ep = ep;
-	pkt_info->ip_proto = (*payload & 0xF0) >> 4;
-	if (pkt_info->ip_proto == 4) {
-		struct iphdr *iph = (struct iphdr *)payload;
 
-		pkt_info->iphdr.v4hdr = iph;
-		pkt_info->trans_proto = iph->protocol;
-		pkt_info->header_len = iph->ihl * 4;
-		skip_hash = !!(ntohs(iph->frag_off) & (IP_MF | IP_OFFSET));
-	} else if (pkt_info->ip_proto == 6) {
-		struct ipv6hdr *iph = (struct ipv6hdr *)payload;
-
-		pkt_info->iphdr.v6hdr = iph;
-		pkt_info->trans_proto = iph->nexthdr;
-		pkt_info->header_len = sizeof(*iph);
-		skip_hash = iph->nexthdr == NEXTHDR_FRAGMENT;
-	} else {
+	if (rmnet_perf_core_dissect_skb(skb, pkt_info, offset, pkt_len,
+					&skip_hash, &len_mismatch)) {
+		rmnet_perf_core_non_ip_count++;
+		/* account for the bulk add in rmnet_perf_core_deaggregate() */
+		rmnet_perf_core_pre_ip_count--;
 		return;
 	}
 
-	/* Push out fragments immediately */
 	if (skip_hash) {
-		rmnet_perf_frag_flush++;
+		/* We're flushing anyway, so no need to check result */
 		rmnet_perf_core_validate_pkt_csum(skb, pkt_info);
 		goto flush;
+	} else if (len_mismatch) {
+		/* We're flushing anyway, so no need to check result */
+		rmnet_perf_core_validate_pkt_csum(skb, pkt_info);
+		/* Flush anything in the hash to avoid any OOO */
+		rmnet_perf_opt_flush_flow_by_hash(pkt_info->hash_key);
+		goto flush;
 	}
 
-	if (pkt_info->trans_proto == IPPROTO_TCP) {
-		struct tcphdr *tp = (struct tcphdr *)
-				    (payload + pkt_info->header_len);
-
-		pkt_info->trns_hdr.tp = tp;
-		pkt_info->header_len += tp->doff * 4;
-		pkt_info->payload_len = pkt_len - pkt_info->header_len;
-		pkt_info->hash_key =
-			rmnet_perf_core_compute_flow_hash(pkt_info);
-
-		if (rmnet_perf_core_validate_pkt_csum(skb, pkt_info))
-			goto flush;
-
-		if (!rmnet_perf_opt_ingress(perf, skb, pkt_info))
-			goto flush;
-	} else if (pkt_info->trans_proto == IPPROTO_UDP) {
-		struct udphdr *up = (struct udphdr *)
-				    (payload + pkt_info->header_len);
-
-		pkt_info->trns_hdr.up = up;
-		pkt_info->header_len += sizeof(*up);
-		pkt_info->payload_len = pkt_len - pkt_info->header_len;
-		pkt_info->hash_key =
-			rmnet_perf_core_compute_flow_hash(pkt_info);
-
-		if (rmnet_perf_core_validate_pkt_csum(skb, pkt_info))
-			goto flush;
-
-		if (!rmnet_perf_opt_ingress(perf, skb, pkt_info))
-			goto flush;
-	} else {
-		pkt_info->payload_len = pkt_len - pkt_info->header_len;
-		skip_hash = true;
-		/* We flush anyway, so the result of the validation
-		 * does not need to be checked.
-		 */
-		rmnet_perf_core_validate_pkt_csum(skb, pkt_info);
+	if (rmnet_perf_core_validate_pkt_csum(skb, pkt_info))
 		goto flush;
-	}
+
+	if (!rmnet_perf_opt_ingress(pkt_info))
+		goto flush;
 
 	return;
 
 flush:
-	rmnet_perf_core_flush_curr_pkt(perf, skb, pkt_info, pkt_len, false,
-				       skip_hash);
+	rmnet_perf_core_flush_curr_pkt(pkt_info, pkt_len, false, skip_hash);
 }
 
-/* rmnet_perf_core_deaggregate() - Deaggregated ip packets from map frame
- * @port: allows access to our required global structures
- * @skb: the incoming aggregated MAP frame from PND
+/* rmnet_perf_core_desc_entry() - Entry point for rmnet_perf's non-deag logic
+ * @skb: the incoming skb from core driver
+ * @port: the rmnet_perf struct from core driver
  *
- * If the packet is TCP then send it down the way of tcp_opt.
- * Otherwise we can send it down some other path.
+ * Return:
+ *		- void
+ **/
+void rmnet_perf_core_desc_entry(struct rmnet_frag_descriptor *frag_desc,
+				struct rmnet_port *port)
+{
+	struct rmnet_perf_pkt_info pkt_info;
+	struct rmnet_perf *perf = rmnet_perf_config_get_perf();
+	u16 pkt_len = skb_frag_size(&frag_desc->frag);
+	bool skip_hash = true;
+	bool len_mismatch = false;
+
+	rmnet_perf_core_grab_lock();
+	perf->rmnet_port = port;
+	memset(&pkt_info, 0, sizeof(pkt_info));
+	if (rmnet_perf_core_dissect_desc(frag_desc, &pkt_info, 0, pkt_len,
+					 &skip_hash, &len_mismatch)) {
+		rmnet_perf_core_non_ip_count++;
+		rmnet_recycle_frag_descriptor(frag_desc, port);
+		rmnet_perf_core_release_lock();
+		return;
+	}
+
+	/* We know the packet is an IP packet now */
+	rmnet_perf_core_pre_ip_count++;
+	if (skip_hash) {
+		goto flush;
+	} else if (len_mismatch) {
+		/* Flush everything in the hash to avoid OOO */
+		rmnet_perf_opt_flush_flow_by_hash(pkt_info.hash_key);
+		goto flush;
+	}
+
+	/* Skip packets with bad checksums.
+	 * This check is delayed here to allow packets that won't be
+	 * checksummed by hardware (non-TCP/UDP data, fragments, padding) to be
+	 * flushed by the above checks. This ensures that we report statistics
+	 * correctly (i.e. rmnet_perf_frag_flush increases for each fragment),
+	 * and don't report packets with valid checksums that weren't offloaded
+	 * as "bad checksum" packets.
+	 */
+	if (!frag_desc->csum_valid)
+		goto flush;
+
+	if (!rmnet_perf_opt_ingress(&pkt_info))
+		goto flush;
+
+	rmnet_perf_core_release_lock();
+	return;
+
+flush:
+	rmnet_perf_core_flush_curr_pkt(&pkt_info, pkt_len, false, skip_hash);
+	rmnet_perf_core_release_lock();
+}
+
+int __rmnet_perf_core_deaggregate(struct sk_buff *skb, struct rmnet_port *port)
+{
+	struct rmnet_perf_pkt_info pkt_info;
+	struct timespec curr_time, diff;
+	static struct timespec last_drop_time;
+	struct rmnet_map_header *maph;
+	struct rmnet_endpoint *ep;
+	u32 map_frame_len;
+	u32 trailer_len = 0;
+	int count = 0;
+	u8 mux_id;
+
+	while (skb->len != 0) {
+		maph = (struct rmnet_map_header *)skb->data;
+
+		trace_rmnet_perf_low(RMNET_PERF_MODULE, RMNET_PERF_DEAG_PKT,
+				     0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
+
+		/* Some hardware can send us empty frames. Catch them.
+		 * This includes IPA sending end of rx indications.
+		 */
+		if (ntohs(maph->pkt_len) == 0)
+			goto out;
+
+		map_frame_len = ntohs(maph->pkt_len) +
+				sizeof(struct rmnet_map_header);
+
+		if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
+			trailer_len = sizeof(struct rmnet_map_dl_csum_trailer);
+			map_frame_len += trailer_len;
+		}
+
+		if (((int)skb->len - (int)map_frame_len) < 0)
+			goto out;
+
+		/* Handle any command packets */
+		if (maph->cd_bit) {
+			/* rmnet_perf is only used on targets with DL marker.
+			 * The legacy map commands are not used, so we don't
+			 * check for them. If this changes, rmnet_map_command()
+			 * will need to be called, and that function updated to
+			 * not free SKBs if called from this module.
+			 */
+			if (port->data_format &
+			    RMNET_INGRESS_FORMAT_DL_MARKER)
+			        /* rmnet_map_flow_command() will handle pulling
+				 * the data for us if it's actually a valid DL
+				 * marker.
+				 */
+				if (!rmnet_map_flow_command(skb, port, true))
+					continue;
+
+			goto pull;
+		}
+
+		mux_id = maph->mux_id;
+		if (mux_id >= RMNET_MAX_LOGICAL_EP)
+			goto skip_frame;
+
+
+		ep = rmnet_get_endpoint(port, mux_id);
+		if (!ep)
+			goto skip_frame;
+		skb->dev = ep->egress_dev;
+
+#ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE
+		/* Wakeup PS work on DL packets */
+		if ((port->data_format & RMNET_INGRESS_FORMAT_PS) &&
+		    !maph->cd_bit)
+			qmi_rmnet_work_maybe_restart(port);
+#endif
+
+		if (enable_packet_dropper) {
+			getnstimeofday(&curr_time);
+			if (last_drop_time.tv_sec == 0 &&
+			    last_drop_time.tv_nsec == 0)
+				getnstimeofday(&last_drop_time);
+			diff = timespec_sub(curr_time, last_drop_time);
+			if (diff.tv_sec > packet_dropper_time) {
+				getnstimeofday(&last_drop_time);
+				pr_err("%s(): Dropped a packet!\n",
+				       __func__);
+				goto skip_frame;
+			}
+		}
+
+		/* if we got to this point, we are able to proceed
+		 * with processing the packet i.e. we know we are
+		 * dealing with a packet with no funny business inside
+		 */
+		rmnet_perf_core_handle_packet_ingress(skb, ep,
+						      &pkt_info,
+						      map_frame_len,
+						      trailer_len);
+skip_frame:
+		count++;
+pull:
+		skb_pull(skb, map_frame_len);
+	}
+
+out:
+	return count;
+}
+
+/* rmnet_perf_core_deaggregate() - Deaggregate ip packets from map frame
+ * @skb: the incoming aggregated MAP frame from PND
+ * @port: rmnet_port struct from core driver
  *
  * Return:
  *		- void
@@ -625,132 +1006,45 @@
 void rmnet_perf_core_deaggregate(struct sk_buff *skb,
 				 struct rmnet_port *port)
 {
-	u8 mux_id;
-	struct rmnet_map_header *maph;
-	uint32_t map_frame_len;
-	struct rmnet_endpoint *ep;
-	struct rmnet_perf_pkt_info pkt_info;
 	struct rmnet_perf *perf;
-	struct timespec curr_time, diff;
-	static struct timespec last_drop_time;
-	u32 trailer_len = 0;
+	struct rmnet_perf_core_burst_marker_state *bm_state;
 	int co = 0;
 	int chain_count = 0;
 
 	perf = rmnet_perf_config_get_perf();
 	perf->rmnet_port = port;
+	rmnet_perf_core_grab_lock();
 	while (skb) {
 		struct sk_buff *skb_frag = skb_shinfo(skb)->frag_list;
 
 		skb_shinfo(skb)->frag_list = NULL;
 		chain_count++;
-		rmnet_perf_core_accept_new_skb(perf, skb);
-skip_frame:
-		while (skb->len != 0) {
-			maph = (struct rmnet_map_header *) skb->data;
-			if (port->data_format &
-			    RMNET_INGRESS_FORMAT_DL_MARKER) {
-				if (!rmnet_map_flow_command(skb, port, true))
-					goto skip_frame;
-			}
-
-			trace_rmnet_perf_low(RMNET_PERF_MODULE, RMNET_PERF_DEAG_PKT, 0xDEF,
-								0xDEF, 0xDEF, 0xDEF, NULL, NULL);
-
-			/* Some hardware can send us empty frames. Catch them */
-			/* This includes IPA sending end of rx indications */
-			if (ntohs(maph->pkt_len) == 0) {
-				pr_err("Dropping empty MAP frame, co = %d", co);
-				goto next_chain;
-			}
-
-			map_frame_len = ntohs(maph->pkt_len) +
-					sizeof(struct rmnet_map_header);
-
-			if (port->data_format &
-			    RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
-				trailer_len =
-				    sizeof(struct rmnet_map_dl_csum_trailer);
-				map_frame_len += trailer_len;
-			}
-
-			if ((((int)skb->len) - ((int)map_frame_len)) < 0) {
-				pr_err("%s(): Got malformed packet. Dropping",
-				       __func__);
-				goto next_chain;
-			}
-
-			mux_id = RMNET_MAP_GET_MUX_ID(skb);
-			if (mux_id >= RMNET_MAX_LOGICAL_EP) {
-				pr_err("Got packet on %s with bad mux id %d",
-					skb->dev->name, mux_id);
-				goto drop_packets;
-			}
-
-			ep = rmnet_get_endpoint(port, mux_id);
-			if (!ep)
-				goto bad_data;
-			skb->dev = ep->egress_dev;
-
-#ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE
-			/* Wakeup PS work on DL packets */
-			if ((port->data_format & RMNET_INGRESS_FORMAT_PS) &&
-					!RMNET_MAP_GET_CD_BIT(skb))
-				qmi_rmnet_work_maybe_restart(port);
-#endif
-
-			if (enable_packet_dropper) {
-				getnstimeofday(&curr_time);
-				if (last_drop_time.tv_sec == 0 &&
-				    last_drop_time.tv_nsec == 0)
-					getnstimeofday(&last_drop_time);
-				diff = timespec_sub(curr_time, last_drop_time);
-				if (diff.tv_sec > packet_dropper_time) {
-					getnstimeofday(&last_drop_time);
-					pr_err("%s(): Dropped a packet!\n",
-					       __func__);
-					goto bad_data;
-				}
-			}
-			/* if we got to this point, we are able to proceed
-			 * with processing the packet i.e. we know we are
-			 * dealing with a packet with no funny business inside
-			 */
-			rmnet_perf_core_handle_packet_ingress(skb, ep,
-							      &pkt_info,
-							      map_frame_len,
-							      trailer_len);
-bad_data:
-			skb_pull(skb, map_frame_len);
-			co++;
-		}
-next_chain:
+		rmnet_perf_core_accept_new_skb(skb);
+		co += __rmnet_perf_core_deaggregate(skb, port);
 		skb = skb_frag;
 	}
 
-	perf->core_meta->bm_state->expect_packets -= co;
+	bm_state = perf->core_meta->bm_state;
+	bm_state->expect_packets -= co;
 	/* if we ran out of data and should have gotten an end marker,
 	 * then we can flush everything
 	 */
-	if (!rmnet_perf_core_bm_flush_on ||
-	    (int) perf->core_meta->bm_state->expect_packets <= 0) {
-		rmnet_perf_opt_flush_all_flow_nodes(perf);
-		rmnet_perf_core_free_held_skbs(perf);
+	if (port->data_format == RMNET_INGRESS_FORMAT_DL_MARKER_V2 ||
+	    !bm_state->callbacks_valid || !rmnet_perf_core_bm_flush_on ||
+	    (int) bm_state->expect_packets <= 0) {
+		rmnet_perf_opt_flush_all_flow_nodes();
+		rmnet_perf_core_free_held_skbs();
 		rmnet_perf_core_flush_reason_cnt[
 					RMNET_PERF_CORE_IPA_ZERO_FLUSH]++;
 	} else if (perf->core_meta->skb_needs_free_list->num_skbs_held >=
 		   rmnet_perf_core_num_skbs_max) {
-		rmnet_perf_opt_flush_all_flow_nodes(perf);
-		rmnet_perf_core_free_held_skbs(perf);
+		rmnet_perf_opt_flush_all_flow_nodes();
+		rmnet_perf_core_free_held_skbs();
 		rmnet_perf_core_flush_reason_cnt[
 					RMNET_PERF_CORE_SK_BUFF_HELD_LIMIT]++;
 	}
 
-	goto update_stats;
-drop_packets:
-	rmnet_perf_opt_flush_all_flow_nodes(perf);
-	rmnet_perf_core_free_held_skbs(perf);
-update_stats:
 	rmnet_perf_core_pre_ip_count += co;
 	rmnet_perf_core_chain_count[chain_count]++;
+	rmnet_perf_core_release_lock();
 }
diff --git a/drivers/rmnet/perf/rmnet_perf_core.h b/drivers/rmnet/perf/rmnet_perf_core.h
index 91557fc..735cb3b 100644
--- a/drivers/rmnet/perf/rmnet_perf_core.h
+++ b/drivers/rmnet/perf/rmnet_perf_core.h
@@ -12,6 +12,7 @@
 #include <linux/skbuff.h>
 #include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h>
 #include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h>
+#include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.h>
 
 #ifndef _RMNET_PERF_CORE_H_
 #define _RMNET_PERF_CORE_H_
@@ -25,28 +26,49 @@
 	struct rmnet_port *rmnet_port;
 };
 
-/*Identifying info for the current packet being deaggregated
+/* Identifying info for the current packet being deaggregated
  * this is so we don't have to redundantly check things in the
  * header of the packet. Also prevents excessive parameters
  */
 struct rmnet_perf_pkt_info {
+	/* True if this is the first packet being put into a flow node. */
 	bool first_packet;
 	bool csum_valid;
-	unsigned char ip_proto;
-	unsigned char trans_proto;
-	u16 header_len;
+
+	/* Header protocols */
+	u8 ip_proto;
+	u8 trans_proto;
+
+	/* Header lengths */
+	u16 ip_len;
+	u16 trans_len;
+
+	/* Data length */
 	u16 payload_len;
+
+	/* Hash over standard 5 tuple */
 	u32 hash_key;
+
+	/* TCP timestamp */
 	u32 curr_timestamp;
+
+	/* Headers */
 	union {
 		struct iphdr *v4hdr;
 		struct ipv6hdr *v6hdr;
-	} iphdr;
+	} ip_hdr;
 	union {
 		struct tcphdr *tp;
 		struct udphdr *up;
-	} trns_hdr;
+	} trans_hdr;
+
 	struct rmnet_endpoint *ep;
+
+	/* The base packet itself */
+	union {
+		struct sk_buff *skb;
+		struct rmnet_frag_descriptor *frag_desc;
+	};
 };
 
 struct rmnet_perf_core_64k_buff_pool {
@@ -56,6 +78,10 @@
 
 struct rmnet_perf_core_burst_marker_state {
 	bool wait_for_start;
+	/* If the callbacks fail to register, then we want to flush at the
+	 * end of every chain
+	 */
+	bool callbacks_valid;
 	u32 curr_seq;
 	u32 expect_packets;
 };
@@ -75,8 +101,6 @@
 	/* recycled buffer pool */
 	struct rmnet_perf_core_64k_buff_pool *buff_pool;
 	struct net_device *dev;
-	//struct hrtimer hrtimer;
-	//spinlock_t timer_lock;
 	struct rmnet_perf_core_burst_marker_state *bm_state;
 	struct rmnet_map_dl_ind *dl_ind;
 	struct qmi_rmnet_ps_ind *ps_ind;
@@ -111,25 +135,35 @@
 	RMNET_PERF_DEAG_PKT,
 };
 
+
+void rmnet_perf_core_grab_lock(void);
+void rmnet_perf_core_release_lock(void);
 void rmnet_perf_core_ps_on(void *port);
 void rmnet_perf_core_ps_off(void *port);
+bool rmnet_perf_core_is_deag_mode(void);
+void rmnet_perf_core_set_ingress_hook(void);
 void rmnet_perf_core_reset_recycled_skb(struct sk_buff *skb);
-struct sk_buff *rmnet_perf_core_elligible_for_cache_skb(struct rmnet_perf *perf,
-							u32 len);
-void rmnet_perf_core_free_held_skbs(struct rmnet_perf *perf);
-void rmnet_perf_core_send_skb(struct sk_buff *skb, struct rmnet_endpoint *ep,
-			      struct rmnet_perf *perf,
-			      struct rmnet_perf_pkt_info *pkt_info);
-void rmnet_perf_core_flush_curr_pkt(struct rmnet_perf *perf,
-				    struct sk_buff *skb,
-				    struct rmnet_perf_pkt_info *pkt_info,
+struct sk_buff *rmnet_perf_core_elligible_for_cache_skb(u32 len);
+void rmnet_perf_core_free_held_skbs(void);
+void rmnet_perf_core_send_skb(struct sk_buff *skb, struct rmnet_endpoint *ep);
+void rmnet_perf_core_send_desc(struct rmnet_frag_descriptor *frag_desc);
+void rmnet_perf_core_flush_curr_pkt(struct rmnet_perf_pkt_info *pkt_info,
 				    u16 packet_len, bool flush_shs,
 				    bool skip_hash);
 void rmnet_perf_core_deaggregate(struct sk_buff *skb,
+				 struct rmnet_port *port);
+void rmnet_perf_core_desc_entry(struct rmnet_frag_descriptor *frag_desc,
 				struct rmnet_port *port);
 u32 rmnet_perf_core_compute_flow_hash(struct rmnet_perf_pkt_info *pkt_info);
 void rmnet_perf_core_flush_single_gro_flow(u32 hash_key);
-void rmnet_perf_core_handle_map_control_end(struct rmnet_map_dl_ind_trl *dltrl);
+void
+rmnet_perf_core_handle_map_control_end_v2(struct rmnet_map_dl_ind_trl *dltrl,
+				struct rmnet_map_control_command_header *qcmd);
+void
+rmnet_perf_core_handle_map_control_start_v2(struct rmnet_map_dl_ind_hdr *dlhdr,
+				struct rmnet_map_control_command_header *qcmd);
+void
+rmnet_perf_core_handle_map_control_end(struct rmnet_map_dl_ind_trl *dltrl);
 void
 rmnet_perf_core_handle_map_control_start(struct rmnet_map_dl_ind_hdr *dlhdr);
 
diff --git a/drivers/rmnet/perf/rmnet_perf_opt.c b/drivers/rmnet/perf/rmnet_perf_opt.c
index 991800d..d6b21f7 100644
--- a/drivers/rmnet/perf/rmnet_perf_opt.c
+++ b/drivers/rmnet/perf/rmnet_perf_opt.c
@@ -20,6 +20,7 @@
 #include <linux/spinlock.h>
 #include <net/ip.h>
 #include <net/checksum.h>
+#include <net/ip6_checksum.h>
 #include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h>
 #include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h>
 #include "rmnet_perf_opt.h"
@@ -56,29 +57,24 @@
 /* What protocols we optimize */
 static int rmnet_perf_opt_mode = RMNET_PERF_OPT_MODE_ALL;
 
-/* Lock around flow nodes for syncornization with rmnet_perf_opt_mode changes */
-static DEFINE_SPINLOCK(rmnet_perf_opt_lock);
-
 /* flow hash table */
 DEFINE_HASHTABLE(rmnet_perf_opt_fht, RMNET_PERF_FLOW_HASH_TABLE_BITS);
 
-static void flush_flow_nodes_by_protocol(struct rmnet_perf *perf, u8 protocol)
+static void rmnet_perf_opt_flush_flow_nodes_by_protocol(u8 protocol)
 {
 	struct rmnet_perf_opt_flow_node *flow_node;
 	int bkt_cursor;
 
 	hash_for_each(rmnet_perf_opt_fht, bkt_cursor, flow_node, list) {
 		if (flow_node->num_pkts_held > 0 &&
-		    flow_node->protocol == protocol)
-			rmnet_perf_opt_flush_single_flow_node(perf, flow_node);
+		    flow_node->trans_proto == protocol)
+			rmnet_perf_opt_flush_single_flow_node(flow_node);
 	}
 }
 
 static int rmnet_perf_set_opt_mode(const char *val,
 				   const struct kernel_param *kp)
 {
-	struct rmnet_perf *perf;
-	unsigned long ht_flags;
 	int old_mode = rmnet_perf_opt_mode;
 	int rc = -EINVAL;
 	char value[4];
@@ -86,8 +82,7 @@
 	strlcpy(value, val, 4);
 	value[3] = '\0';
 
-	local_bh_disable();
-	spin_lock_irqsave(&rmnet_perf_opt_lock, ht_flags);
+	rmnet_perf_core_grab_lock();
 
 	if (!strcmp(value, "tcp"))
 		rmnet_perf_opt_mode = RMNET_PERF_OPT_MODE_TCP;
@@ -110,23 +105,20 @@
 		goto out;
 
 	/* Flush out any nodes of the protocol we are no longer optimizing */
-	perf = rmnet_perf_config_get_perf();
 	switch (rmnet_perf_opt_mode) {
 	case RMNET_PERF_OPT_MODE_TCP:
-		flush_flow_nodes_by_protocol(perf, IPPROTO_UDP);
+		rmnet_perf_opt_flush_flow_nodes_by_protocol(IPPROTO_UDP);
 		break;
 	case RMNET_PERF_OPT_MODE_UDP:
-		flush_flow_nodes_by_protocol(perf, IPPROTO_TCP);
+		rmnet_perf_opt_flush_flow_nodes_by_protocol(IPPROTO_TCP);
 		break;
 	case RMNET_PERF_OPT_MODE_NON:
-		flush_flow_nodes_by_protocol(perf, IPPROTO_TCP);
-		flush_flow_nodes_by_protocol(perf, IPPROTO_UDP);
+		rmnet_perf_opt_flush_all_flow_nodes();
 		break;
 	}
 
 out:
-	spin_unlock_irqrestore(&rmnet_perf_opt_lock, ht_flags);
-	local_bh_enable();
+	rmnet_perf_core_release_lock();
 
 	return rc;
 }
@@ -159,14 +151,15 @@
 
 module_param_cb(rmnet_perf_opt_mode, &rmnet_perf_opt_mode_ops, NULL, 0644);
 
-/* optimize_protocol() - Check if we should optimize the given protocol
+/* rmnet_perf_optimize_protocol() - Check if we should optimize the given
+ * protocol
  * @protocol: The IP protocol number to check
  *
  * Return:
  *    - true if protocol should use the flow node infrastructure
  *    - false if packets og the given protocol should be flushed
  **/
-static bool optimize_protocol(u8 protocol)
+static bool rmnet_perf_optimize_protocol(u8 protocol)
 {
 	if (rmnet_perf_opt_mode == RMNET_PERF_OPT_MODE_ALL)
 		return true;
@@ -178,7 +171,7 @@
 	return false;
 }
 
-/* ip_flag_flush() - Check IP header flags to decide if
+/* rmnet_perf_opt_ip_flag_flush() - Check IP header flags to decide if
  *		immediate flush required
  * @pkt_info: characteristics of the current packet
  *
@@ -189,8 +182,9 @@
  *    - true if need flush
  *    - false if immediate flush may not be needed
  **/
-static bool ip_flag_flush(struct rmnet_perf_opt_flow_node *flow_node,
-			  struct rmnet_perf_pkt_info *pkt_info)
+static bool
+rmnet_perf_opt_ip_flag_flush(struct rmnet_perf_opt_flow_node *flow_node,
+			     struct rmnet_perf_pkt_info *pkt_info)
 {
 	struct iphdr *ip4h;
 	struct ipv6hdr *ip6h;
@@ -198,7 +192,7 @@
 
 	switch (pkt_info->ip_proto) {
 	case 0x04:
-		ip4h = pkt_info->iphdr.v4hdr;
+		ip4h = pkt_info->ip_hdr.v4hdr;
 
 		if ((ip4h->ttl ^ flow_node->ip_flags.ip4_flags.ip_ttl) ||
 		    (ip4h->tos ^ flow_node->ip_flags.ip4_flags.ip_tos) ||
@@ -209,7 +203,7 @@
 
 		break;
 	case 0x06:
-		ip6h = (struct ipv6hdr *) pkt_info->iphdr.v6hdr;
+		ip6h = (struct ipv6hdr *) pkt_info->ip_hdr.v6hdr;
 		first_word = *(__be32 *)ip6h ^ flow_node->ip_flags.first_word;
 
 		if (!!(first_word & htonl(0x0FF00000)))
@@ -224,7 +218,7 @@
 	return false;
 }
 
-/* identify_flow() - Tell whether packet corresponds to
+/* rmnet_perf_opt_identify_flow() - Tell whether packet corresponds to
  *		given flow
  * @flow_node: Node we are checking against
  * @pkt_info: characteristics of the current packet
@@ -235,15 +229,16 @@
  *		- true: it is a match
  *		- false: not a match
  **/
-static bool identify_flow(struct rmnet_perf_opt_flow_node *flow_node,
-			  struct rmnet_perf_pkt_info *pkt_info)
+static bool
+rmnet_perf_opt_identify_flow(struct rmnet_perf_opt_flow_node *flow_node,
+			     struct rmnet_perf_pkt_info *pkt_info)
 {
 	struct iphdr *ip4h;
 	struct ipv6hdr *ip6h;
 	/* Actually protocol generic. UDP and TCP headers have the source
 	 * and dest ports in the same location. ;)
 	 */
-	struct udphdr *up = pkt_info->trns_hdr.up;
+	struct udphdr *up = pkt_info->trans_hdr.up;
 
 	/* if pkt count == 0 and hash is the same, then we give this one as
 	 * pass as good enough since at this point there is no address stuff
@@ -254,13 +249,13 @@
 		return true;
 
 	/* protocol must match */
-	if (flow_node->protocol != pkt_info->trans_proto)
+	if (flow_node->trans_proto != pkt_info->trans_proto)
 		return false;
 
 	/* cast iph to right ip header struct for ip_version */
 	switch (pkt_info->ip_proto) {
 	case 0x04:
-		ip4h = pkt_info->iphdr.v4hdr;
+		ip4h = pkt_info->ip_hdr.v4hdr;
 		if (((__force u32)flow_node->saddr.saddr4 ^
 		     (__force u32)ip4h->saddr) |
 		    ((__force u32)flow_node->daddr.daddr4 ^
@@ -272,7 +267,7 @@
 			return false;
 		break;
 	case 0x06:
-		ip6h = pkt_info->iphdr.v6hdr;
+		ip6h = pkt_info->ip_hdr.v6hdr;
 		if ((ipv6_addr_cmp(&(flow_node->saddr.saddr6), &ip6h->saddr)) |
 			(ipv6_addr_cmp(&(flow_node->daddr.daddr6),
 				       &ip6h->daddr)) |
@@ -291,139 +286,196 @@
 	return true;
 }
 
-/* make_flow_skb() - Allocate and populate SKB for
- *		flow node that is being pushed up the stack
- * @perf: allows access to our required global structures
+/* rmnet_perf_opt_add_flow_subfrags() - Associates the frag descriptor held by
+ *		the flow_node to the main descriptor
  * @flow_node: opt structure containing packet we are allocating for
  *
- * Allocate skb of proper size for opt'd packet, and memcpy data
- * into the buffer
- *
  * Return:
- *		- skbn: sk_buff to then push up the NW stack
- *		- NULL: if memory allocation failed
+ *		- void
  **/
-static struct sk_buff *make_flow_skb(struct rmnet_perf *perf,
-				     struct rmnet_perf_opt_flow_node *flow_node)
+
+static void
+rmnet_perf_opt_add_flow_subfrags(struct rmnet_perf_opt_flow_node *flow_node)
 {
-	struct sk_buff *skbn;
+	struct rmnet_perf *perf = rmnet_perf_config_get_perf();
 	struct rmnet_perf_opt_pkt_node *pkt_list;
-	int i;
-	u32 pkt_size;
-	u32 total_pkt_size = 0;
+	struct rmnet_frag_descriptor *head_frag;
+	u8 i;
 
-	if (rmnet_perf_opt_skb_recycle_off) {
-		skbn = alloc_skb(flow_node->len + RMNET_MAP_DEAGGR_SPACING,
-				 GFP_ATOMIC);
-		if (!skbn)
-			return NULL;
-	} else {
-		skbn = rmnet_perf_core_elligible_for_cache_skb(perf,
-							       flow_node->len);
-		if (!skbn) {
-			skbn = alloc_skb(flow_node->len + RMNET_MAP_DEAGGR_SPACING,
-				 GFP_ATOMIC);
-			if (!skbn)
-				return NULL;
-		}
-	}
-
-	skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
 	pkt_list = flow_node->pkt_list;
+	head_frag = pkt_list[0].frag_desc;
 
-	for (i = 0; i < flow_node->num_pkts_held; i++) {
-		pkt_size = pkt_list[i].data_end - pkt_list[i].data_start;
-		memcpy(skbn->data + skbn->len, pkt_list[i].data_start,
-		       pkt_size);
-		skb_put(skbn, pkt_size);
-		total_pkt_size += pkt_size;
+	/* GSO segs might not be initialized yet (i.e. csum offload,
+	 * RSB/RSC frames with only 1 packet, etc)
+	 */
+	if (!head_frag->gso_segs)
+		head_frag->gso_segs = 1;
+
+	head_frag->gso_size = flow_node->gso_len;
+
+	for (i = 1; i < flow_node->num_pkts_held; i++) {
+		struct rmnet_frag_descriptor *new_frag;
+
+		new_frag = pkt_list[i].frag_desc;
+		/* Pull headers if they're there */
+		if (new_frag->hdr_ptr == rmnet_frag_data_ptr(new_frag)) {
+			if (!rmnet_frag_pull(new_frag, perf->rmnet_port,
+					     flow_node->ip_len +
+					     flow_node->trans_len))
+				continue;
+		}
+
+		/* Move the fragment onto the subfrags list */
+		list_move_tail(&new_frag->list, &head_frag->sub_frags);
+		head_frag->gso_segs += (new_frag->gso_segs) ?: 1;
 	}
-	if (flow_node->len != total_pkt_size)
-		pr_err("%s(): skbn = %pK, flow_node->len = %u, pkt_size = %u\n",
-		       __func__, skbn, flow_node->len, total_pkt_size);
-
-	return skbn;
 }
 
-static void flow_skb_fixup(struct sk_buff *skb,
-			   struct rmnet_perf_opt_flow_node *flow_node)
+/* rmnet_perf_opt_alloc_flow_skb() - Allocate a new SKB for holding flow node
+ *		data
+ * @headlen: The amount of space to allocate for linear data. Does not include
+ *		extra deaggregation headeroom.
+ *
+ * Allocates a new SKB large enough to hold the amount of data provided, or
+ * returns a preallocated SKB if recycling is enabled and there are cached
+ * buffers available.
+ *
+ * Return:
+ *		- skb: the new SKb to use
+ *		- NULL: memory failure
+ **/
+static struct sk_buff *rmnet_perf_opt_alloc_flow_skb(u32 headlen)
 {
-	struct skb_shared_info *shinfo;
+	struct sk_buff *skb;
+
+	/* Grab a preallocated SKB if possible */
+	if (!rmnet_perf_opt_skb_recycle_off) {
+		skb = rmnet_perf_core_elligible_for_cache_skb(headlen);
+		if (skb)
+			return skb;
+	}
+
+	skb = alloc_skb(headlen + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
+	if (!skb)
+		return NULL;
+
+	skb_reserve(skb, RMNET_MAP_DEAGGR_HEADROOM);
+	return skb;
+}
+
+/* rmnet_perf_opt_make_flow_skb() - Allocate and populate SKBs for flow node
+ *		that is being pushed up the stack
+ * @flow_node: opt structure containing packet we are allocating for
+ *
+ * Return:
+ *		- skb: The new SKB to use
+ *		- NULL: memory failure
+ **/
+static struct sk_buff *
+rmnet_perf_opt_make_flow_skb(struct rmnet_perf_opt_flow_node *flow_node)
+{
+	struct sk_buff *skb;
+	struct rmnet_perf_opt_pkt_node *pkt_list;
+	int i;
+	u32 alloc_len;
+	u32 total_pkt_size = 0;
+
+	pkt_list = flow_node->pkt_list;
+	alloc_len = flow_node->len + flow_node->ip_len + flow_node->trans_len;
+	skb = rmnet_perf_opt_alloc_flow_skb(alloc_len);
+	if (!skb)
+		return NULL;
+
+	/* Copy the headers over */
+	skb_put_data(skb, pkt_list[0].header_start,
+		     flow_node->ip_len + flow_node->trans_len);
+
+	for (i = 0; i < flow_node->num_pkts_held; i++) {
+		skb_put_data(skb, pkt_list[i].data_start, pkt_list[i].data_len);
+		total_pkt_size += pkt_list[i].data_len;
+	}
+
+	if (flow_node->len != total_pkt_size)
+		pr_err("%s(): flow_node->len = %u, pkt_size = %u\n", __func__,
+		       flow_node->len, total_pkt_size);
+
+	return skb;
+}
+
+static void
+rmnet_perf_opt_flow_skb_fixup(struct sk_buff *skb,
+			      struct rmnet_perf_opt_flow_node *flow_node)
+{
+	struct skb_shared_info *shinfo = skb_shinfo(skb);
 	struct iphdr *iph = (struct iphdr *)skb->data;
 	struct tcphdr *tp;
 	struct udphdr *up;
-	__wsum pseudo;
-	u16 datagram_len, ip_len;
-	u16 proto;
+	__sum16 pseudo;
+	u16 datagram_len;
 	bool ipv4 = (iph->version == 4);
+
+	/* Avoid recalculating the hash later on */
 	skb->hash = flow_node->hash_value;
 	skb->sw_hash = 1;
-	/* We've already validated all data */
+	/* We've already validated all data in the flow nodes */
 	skb->ip_summed = CHECKSUM_UNNECESSARY;
 
-	/* Aggregated flows can be segmented by the stack
-	 * during forwarding/tethering scenarios, so pretend
-	 * we ran through the GRO logic to coalesce the packets
-	 */
-
+	/* GSO information only needs to be added/updated if we actually
+	 * coaleced any packets.
+	*/
 	if (flow_node->num_pkts_held <= 1)
 		return;
 
-	datagram_len = flow_node->gso_len * flow_node->num_pkts_held;
+	datagram_len = skb->len - flow_node->ip_len;
 
-	/* Update transport header fields to reflect new length.
-	 * Checksum is set to the pseudoheader checksum value
-	 * since we'll need to mark the SKB as CHECKSUM_PARTIAL.
+	/* Update headers to reflect the new packet length.
+	 * Transport checksum needs to be set to the pseudo header checksum
+	 * since we need to mark the SKB as CHECKSUM_PARTIAL so the stack can
+	 * segment properly.
 	 */
 	if (ipv4) {
-		ip_len = iph->ihl * 4;
-		pseudo = csum_partial(&iph->saddr,
-				      sizeof(iph->saddr) * 2, 0);
-		proto = iph->protocol;
+		iph->tot_len = htons(datagram_len + flow_node->ip_len);
+		pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+					    datagram_len,
+					    flow_node->trans_proto, 0);
+		iph->check = 0;
+		iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
 	} else {
 		struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
 
-		ip_len = sizeof(*ip6h);
-		pseudo = csum_partial(&ip6h->saddr,
-				      sizeof(ip6h->saddr) * 2, 0);
-		proto = ip6h->nexthdr;
+		/* Payload len includes any extension headers */
+		ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
+		pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+					  datagram_len, flow_node->trans_proto,
+					  0);
 	}
 
-	pseudo = csum16_add(pseudo, htons(proto));
-	switch (proto) {
+	switch (flow_node->trans_proto) {
 	case IPPROTO_TCP:
-		tp = (struct tcphdr *)((char *)iph + ip_len);
-		datagram_len += tp->doff * 4;
-		pseudo = csum16_add(pseudo, htons(datagram_len));
-		tp->check = ~csum_fold(pseudo);
-		skb->csum_start = (unsigned char *) tp - skb->head;
+		tp = (struct tcphdr *)((u8 *)iph + flow_node->ip_len);
+		tp->check = pseudo;
+		skb->csum_start = (u8 *)tp - skb->head;
 		skb->csum_offset = offsetof(struct tcphdr, check);
-		skb_shinfo(skb)->gso_type = (ipv4) ? SKB_GSO_TCPV4:
-					     SKB_GSO_TCPV6;
+		shinfo->gso_type = (ipv4) ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
 		break;
 	case IPPROTO_UDP:
-		up = (struct udphdr *)((char *)iph + ip_len);
-		datagram_len += sizeof(*up);
+		up = (struct udphdr *)((u8 *)iph + flow_node->ip_len);
 		up->len = htons(datagram_len);
-		pseudo = csum16_add(pseudo, up->len);
-		up->check = ~csum_fold(pseudo);
-		skb->csum_start = (unsigned char *)up - skb->head;
+		up->check = pseudo;
+		skb->csum_start = (u8 *)up - skb->head;
 		skb->csum_offset = offsetof(struct udphdr, check);
-		skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
+		shinfo->gso_type = SKB_GSO_UDP_L4;
 		break;
 	default:
 		return;
 	}
 
 	/* Update GSO metadata */
-	shinfo = skb_shinfo(skb);
 	shinfo->gso_size = flow_node->gso_len;
-	shinfo->gso_segs = flow_node->num_pkts_held;
 	skb->ip_summed = CHECKSUM_PARTIAL;
 }
 
-/* get_new_flow_index() - Pull flow node from node pool
- * @perf: allows access to our required global structures
+/* rmnet_perf_opt_get_new_flow_index() - Pull flow node from node pool
  *
  * Fetch the flow node from the node pool. If we have already given
  * out all the flow nodes then we will always hit the else case and
@@ -433,9 +485,9 @@
  * Return:
  *		- flow_node: node to be used by caller function
  **/
-static struct rmnet_perf_opt_flow_node *
-get_new_flow_index(struct rmnet_perf *perf)
+static struct rmnet_perf_opt_flow_node *rmnet_perf_opt_get_new_flow_index(void)
 {
+	struct rmnet_perf *perf = rmnet_perf_config_get_perf();
 	struct rmnet_perf_opt_flow_node_pool *node_pool;
 	struct rmnet_perf_opt_flow_node *flow_node_ejected;
 
@@ -449,7 +501,7 @@
 
 	flow_node_ejected = node_pool->node_list[
 		node_pool->flow_recycle_counter++ % RMNET_PERF_NUM_FLOW_NODES];
-	rmnet_perf_opt_flush_single_flow_node(perf, flow_node_ejected);
+	rmnet_perf_opt_flush_single_flow_node(flow_node_ejected);
 	hash_del(&flow_node_ejected->list);
 	return flow_node_ejected;
 }
@@ -469,7 +521,7 @@
 			   struct rmnet_perf_pkt_info *pkt_info)
 {
 	if (pkt_info->ip_proto == 0x04) {
-		struct iphdr *iph = pkt_info->iphdr.v4hdr;
+		struct iphdr *iph = pkt_info->ip_hdr.v4hdr;
 		/* Frags don't make it this far, so this is all we care about */
 		__be16 flags = iph->frag_off & htons(IP_CE | IP_DF);
 
@@ -477,7 +529,7 @@
 		flow_node->ip_flags.ip4_flags.ip_tos = iph->tos;
 		flow_node->ip_flags.ip4_flags.ip_frag_off = flags;
 	} else if (pkt_info->ip_proto == 0x06) {
-		__be32 *word = (__be32 *)pkt_info->iphdr.v6hdr;
+		__be32 *word = (__be32 *)pkt_info->ip_hdr.v6hdr;
 
 		flow_node->ip_flags.first_word = *word;
 	}
@@ -485,50 +537,68 @@
 
 /* rmnet_perf_opt_flush_single_flow_node() - Send a given flow node up
  *		NW stack.
- * @perf: allows access to our required global structures
  * @flow_node: opt structure containing packet we are allocating for
  *
  * Send a given flow up NW stack via specific VND
  *
  * Return:
- *    - skbn: sk_buff to then push up the NW stack
+ *    - Void
  **/
-void rmnet_perf_opt_flush_single_flow_node(struct rmnet_perf *perf,
+void rmnet_perf_opt_flush_single_flow_node(
 				struct rmnet_perf_opt_flow_node *flow_node)
 {
-	struct sk_buff *skbn;
-	struct rmnet_endpoint *ep;
+	if (flow_node->num_pkts_held) {
+		if (!rmnet_perf_core_is_deag_mode()) {
+			struct rmnet_frag_descriptor *frag_desc;
 
-	/* future change: when inserting the first packet in a flow,
-	 * save away the ep value so we dont have to look it up every flush
-	 */
-	hlist_for_each_entry_rcu(ep,
-				 &perf->rmnet_port->muxed_ep[flow_node->mux_id],
-				 hlnode) {
-		if (ep->mux_id == flow_node->mux_id &&
-		    flow_node->num_pkts_held) {
-			skbn = make_flow_skb(perf, flow_node);
-			if (skbn) {
-				flow_skb_fixup(skbn, flow_node);
-				rmnet_perf_core_send_skb(skbn, ep, perf, NULL);
+			rmnet_perf_opt_add_flow_subfrags(flow_node);
+			frag_desc = flow_node->pkt_list[0].frag_desc;
+			frag_desc->hash = flow_node->hash_value;
+			rmnet_perf_core_send_desc(frag_desc);
+		} else {
+			struct sk_buff *skb;
+
+			skb = rmnet_perf_opt_make_flow_skb(flow_node);
+			if (skb) {
+				rmnet_perf_opt_flow_skb_fixup(skb, flow_node);
+				rmnet_perf_core_send_skb(skb, flow_node->ep);
 			} else {
 				rmnet_perf_opt_oom_drops +=
 					flow_node->num_pkts_held;
 			}
-			/* equivalent to memsetting the flow node */
-			flow_node->num_pkts_held = 0;
 		}
+
+		/* equivalent to memsetting the flow node */
+		flow_node->num_pkts_held = 0;
+		flow_node->len = 0;
+	}
+}
+
+/* rmnet_perf_opt_flush_flow_by_hash() - Iterate through all flow nodes
+ *	that match a certain hash and flush the match
+ * @hash_val: hash value we are looking to match and hence flush
+ *
+ * Return:
+ *    - void
+ **/
+void rmnet_perf_opt_flush_flow_by_hash(u32 hash_val)
+{
+	struct rmnet_perf_opt_flow_node *flow_node;
+
+	hash_for_each_possible(rmnet_perf_opt_fht, flow_node, list, hash_val) {
+		if (hash_val == flow_node->hash_value &&
+		    flow_node->num_pkts_held > 0)
+			rmnet_perf_opt_flush_single_flow_node(flow_node);
 	}
 }
 
 /* rmnet_perf_opt_flush_all_flow_nodes() - Iterate through all flow nodes
  *		and flush them individually
- * @perf: allows access to our required global structures
  *
  * Return:
  *    - void
  **/
-void rmnet_perf_opt_flush_all_flow_nodes(struct rmnet_perf *perf)
+void rmnet_perf_opt_flush_all_flow_nodes(void)
 {
 	struct rmnet_perf_opt_flow_node *flow_node;
 	int bkt_cursor;
@@ -539,82 +609,122 @@
 		hash_val = flow_node->hash_value;
 		num_pkts_held = flow_node->num_pkts_held;
 		if (num_pkts_held > 0) {
-			rmnet_perf_opt_flush_single_flow_node(perf, flow_node);
-			//rmnet_perf_core_flush_single_gro_flow(hash_val);
+			rmnet_perf_opt_flush_single_flow_node(flow_node);
 		}
 	}
 }
 
+/* rmnet_perf_opt_chain_end() - Handle end of SKB chain notification
+ *
+ * Return:
+ *    - void
+ **/
+void rmnet_perf_opt_chain_end(void)
+{
+	rmnet_perf_core_grab_lock();
+	rmnet_perf_opt_flush_reason_cnt[RMNET_PERF_OPT_CHAIN_END]++;
+	rmnet_perf_opt_flush_all_flow_nodes();
+	rmnet_perf_core_release_lock();
+}
+
 /* rmnet_perf_opt_insert_pkt_in_flow() - Inserts single IP packet into
  *		opt meta structure
- * @skb: pointer to packet given to us by physical device
  * @flow_node: flow node we are going to insert the ip packet into
  * @pkt_info: characteristics of the current packet
  *
  * Return:
  *    - void
  **/
-void rmnet_perf_opt_insert_pkt_in_flow(struct sk_buff *skb,
+void rmnet_perf_opt_insert_pkt_in_flow(
 			struct rmnet_perf_opt_flow_node *flow_node,
 			struct rmnet_perf_pkt_info *pkt_info)
 {
 	struct rmnet_perf_opt_pkt_node *pkt_node;
-	struct tcphdr *tp = pkt_info->trns_hdr.tp;
-	void *iph = (void *) pkt_info->iphdr.v4hdr;
-	u16 header_len = pkt_info->header_len;
+	struct tcphdr *tp = pkt_info->trans_hdr.tp;
+	void *iph = (void *)pkt_info->ip_hdr.v4hdr;
+	u16 header_len = pkt_info->ip_len + pkt_info->trans_len;
 	u16 payload_len = pkt_info->payload_len;
 	unsigned char ip_version = pkt_info->ip_proto;
 
 	pkt_node = &flow_node->pkt_list[flow_node->num_pkts_held];
-	pkt_node->data_end = (unsigned char *) iph + header_len + payload_len;
-	if (pkt_info->trans_proto == IPPROTO_TCP)
-		flow_node->next_seq = ntohl(tp->seq) +
-				      (__force u32) payload_len;
+	pkt_node->header_start = (unsigned char *)iph;
+	pkt_node->data_len = payload_len;
+	flow_node->len += payload_len;
+	flow_node->num_pkts_held++;
+
+	/* Set appropriate data pointers based on mode */
+	if (!rmnet_perf_core_is_deag_mode()) {
+		pkt_node->frag_desc = pkt_info->frag_desc;
+		pkt_node->data_start = rmnet_frag_data_ptr(pkt_info->frag_desc);
+		pkt_node->data_start += header_len;
+	} else {
+		pkt_node->data_start = (unsigned char *)iph + header_len;
+	}
 
 	if (pkt_info->first_packet) {
-		pkt_node->ip_start = (unsigned char *) iph;
-		pkt_node->data_start = (unsigned char *) iph;
-		flow_node->len = header_len + payload_len;
-		flow_node->mux_id = RMNET_MAP_GET_MUX_ID(skb);
+		/* Copy over flow information */
+		flow_node->ep = pkt_info->ep;
+		flow_node->ip_proto = ip_version;
+		flow_node->trans_proto = pkt_info->trans_proto;
 		flow_node->src_port = tp->source;
 		flow_node->dest_port = tp->dest;
+		flow_node->ip_len = pkt_info->ip_len;
+		flow_node->trans_len = pkt_info->trans_len;
 		flow_node->hash_value = pkt_info->hash_key;
-		flow_node->gso_len = payload_len;
-
-		if (pkt_info->trans_proto == IPPROTO_TCP)
-			flow_node->timestamp = pkt_info->curr_timestamp;
+		/* Use already stamped gso_size if available */
+		if (!rmnet_perf_core_is_deag_mode() &&
+		    pkt_info->frag_desc->gso_size)
+			flow_node->gso_len = pkt_info->frag_desc->gso_size;
+		else
+			flow_node->gso_len = payload_len;
 
 		if (ip_version == 0x04) {
 			flow_node->saddr.saddr4 =
-				(__be32) ((struct iphdr *) iph)->saddr;
+				(__be32)((struct iphdr *)iph)->saddr;
 			flow_node->daddr.daddr4 =
-				(__be32) ((struct iphdr *) iph)->daddr;
-			flow_node->protocol = ((struct iphdr *) iph)->protocol;
-		} else if (ip_version == 0x06) {
-			flow_node->saddr.saddr6 =
-				((struct ipv6hdr *) iph)->saddr;
-			flow_node->daddr.daddr6 =
-				((struct ipv6hdr *) iph)->daddr;
-			flow_node->protocol = ((struct ipv6hdr *) iph)->nexthdr;
+				(__be32)((struct iphdr *)iph)->daddr;
+			flow_node->trans_proto =
+				((struct iphdr *)iph)->protocol;
 		} else {
-			pr_err("%s(): Encountered invalid ip version\n",
-			       __func__);
-			/* TODO as Vamsi mentioned get a way to handle
-			 * this case... still want to send packet up NW stack
-			 */
+			flow_node->saddr.saddr6 =
+				((struct ipv6hdr *)iph)->saddr;
+			flow_node->daddr.daddr6 =
+				((struct ipv6hdr *)iph)->daddr;
+			flow_node->trans_proto =
+				((struct ipv6hdr *)iph)->nexthdr;
 		}
-		flow_node->num_pkts_held = 1;
-	} else {
-		pkt_node->ip_start = (unsigned char *) iph;
-		pkt_node->data_start = (unsigned char *) iph + header_len;
-		flow_node->len += payload_len;
-		flow_node->num_pkts_held++;
+
+		/* Set initial TCP SEQ number */
+		if (pkt_info->trans_proto == IPPROTO_TCP) {
+			if (pkt_info->frag_desc &&
+			    pkt_info->frag_desc->tcp_seq_set) {
+				__be32 seq = pkt_info->frag_desc->tcp_seq;
+
+				flow_node->next_seq = ntohl(seq);
+			} else {
+				flow_node->next_seq = ntohl(tp->seq);
+			}
+		}
+
 	}
+
+	if (pkt_info->trans_proto == IPPROTO_TCP)
+		flow_node->next_seq += payload_len;
+}
+void
+rmnet_perf_free_hash_table()
+{
+	int i;
+	struct rmnet_perf_opt_flow_node *flow_node;
+	struct hlist_node *tmp;
+
+	hash_for_each_safe(rmnet_perf_opt_fht, i, tmp, flow_node, list) {
+		hash_del(&flow_node->list);
+	}
+
 }
 
 /* rmnet_perf_opt_ingress() - Core business logic of optimization framework
- * @perf: allows access to our required global structures
- * @skb: the incoming ip packet
  * @pkt_info: characteristics of the current packet
  *
  * Makes determination of what to do with a given incoming
@@ -626,8 +736,7 @@
  *		- true if packet has been handled
  *		- false if caller needs to flush packet
  **/
-bool rmnet_perf_opt_ingress(struct rmnet_perf *perf, struct sk_buff *skb,
-			    struct rmnet_perf_pkt_info *pkt_info)
+bool rmnet_perf_opt_ingress(struct rmnet_perf_pkt_info *pkt_info)
 {
 	struct rmnet_perf_opt_flow_node *flow_node;
 	struct rmnet_perf_opt_flow_node *flow_node_recycled;
@@ -635,17 +744,16 @@
 	bool handled = false;
 	bool flow_node_exists = false;
 
-	spin_lock(&rmnet_perf_opt_lock);
-	if (!optimize_protocol(pkt_info->trans_proto))
+	if (!rmnet_perf_optimize_protocol(pkt_info->trans_proto))
 		goto out;
 
 handle_pkt:
 	hash_for_each_possible(rmnet_perf_opt_fht, flow_node, list,
 			       pkt_info->hash_key) {
-		if (!identify_flow(flow_node, pkt_info))
+		if (!rmnet_perf_opt_identify_flow(flow_node, pkt_info))
 			continue;
 
-		flush = ip_flag_flush(flow_node, pkt_info);
+		flush = rmnet_perf_opt_ip_flag_flush(flow_node, pkt_info);
 
 		/* set this to true by default. Let the protocol helpers
 		 * change this if it is needed.
@@ -655,13 +763,11 @@
 
 		switch (pkt_info->trans_proto) {
 		case IPPROTO_TCP:
-			rmnet_perf_tcp_opt_ingress(perf, skb, flow_node,
-						   pkt_info, flush);
+			rmnet_perf_tcp_opt_ingress(flow_node, pkt_info, flush);
 			handled = true;
 			goto out;
 		case IPPROTO_UDP:
-			rmnet_perf_udp_opt_ingress(perf, skb, flow_node,
-						   pkt_info, flush);
+			rmnet_perf_udp_opt_ingress(flow_node, pkt_info, flush);
 			handled = true;
 			goto out;
 		default:
@@ -673,7 +779,7 @@
 
 	/* If we didn't find the flow, we need to add it and try again */
 	if (!flow_node_exists) {
-		flow_node_recycled = get_new_flow_index(perf);
+		flow_node_recycled = rmnet_perf_opt_get_new_flow_index();
 		flow_node_recycled->hash_value = pkt_info->hash_key;
 		rmnet_perf_opt_update_flow(flow_node_recycled, pkt_info);
 		hash_add(rmnet_perf_opt_fht, &flow_node_recycled->list,
@@ -682,6 +788,5 @@
 	}
 
 out:
-	spin_unlock(&rmnet_perf_opt_lock);
 	return handled;
 }
diff --git a/drivers/rmnet/perf/rmnet_perf_opt.h b/drivers/rmnet/perf/rmnet_perf_opt.h
index 4a785cb..b98261d 100644
--- a/drivers/rmnet/perf/rmnet_perf_opt.h
+++ b/drivers/rmnet/perf/rmnet_perf_opt.h
@@ -20,9 +20,10 @@
 #define RMNET_PERF_NUM_FLOW_NODES              8
 
 struct rmnet_perf_opt_pkt_node {
-	unsigned char *ip_start; /* This is simply used for debug purposes */
+	unsigned char *header_start;
 	unsigned char *data_start;
-	unsigned char *data_end;
+	struct rmnet_frag_descriptor *frag_desc;
+	u16 data_len;
 };
 
 struct rmnet_perf_opt_ip_flags {
@@ -32,21 +33,19 @@
 };
 
 struct rmnet_perf_opt_flow_node {
-	u8 mux_id;
-	u8 protocol;
-	u8 num_pkts_held;
-	union {
-		struct rmnet_perf_opt_ip_flags ip4_flags;
-		__be32 first_word;
-	} ip_flags;
-	u32 timestamp;
-	__be32 next_seq;
-	u32 gso_len;
-	u32 len;
-	u32 hash_value;
+	/* Header lengths */
+	u8 ip_len;
+	u8 trans_len;
 
+	/* Header protocols */
+	u8 ip_proto;
+	u8 trans_proto;
+
+	/* Ports */
 	__be16	src_port;
 	__be16	dest_port;
+
+	/* IP addresses */
 	union {
 		__be32	saddr4;
 		struct in6_addr saddr6;
@@ -56,7 +55,26 @@
 		struct in6_addr daddr6;
 	} daddr;
 
+	/* IP flags */
+	union {
+		struct rmnet_perf_opt_ip_flags ip4_flags;
+		__be32 first_word;
+	} ip_flags;
+
+	/* TCP metadata */
+	__be32 next_seq;
+
+	/* GSO metadata */
+	u32 gso_len;
+
+	/* Perf metadata */
+	u8 num_pkts_held;
+	u32 len;
+	u32 hash_value;
+	struct rmnet_endpoint *ep;
 	struct hlist_node list;
+
+	/* The packets we're holding */
 	struct rmnet_perf_opt_pkt_node pkt_list[50];
 };
 
@@ -73,19 +91,22 @@
 
 enum rmnet_perf_opt_flush_reasons {
 	RMNET_PERF_OPT_PACKET_CORRUPT_ERROR,
+	RMNET_PERF_OPT_CHAIN_END,
 	RMNET_PERF_OPT_NUM_CONDITIONS
 };
 
 void
 rmnet_perf_opt_update_flow(struct rmnet_perf_opt_flow_node *flow_node,
 			   struct rmnet_perf_pkt_info *pkt_info);
-void rmnet_perf_opt_flush_single_flow_node(struct rmnet_perf *perf,
+void rmnet_perf_opt_flush_single_flow_node(
 				struct rmnet_perf_opt_flow_node *flow_node);
-void rmnet_perf_opt_flush_all_flow_nodes(struct rmnet_perf *perf);
-void rmnet_perf_opt_insert_pkt_in_flow(struct sk_buff *skb,
+void rmnet_perf_opt_flush_flow_by_hash(u32 hash_val);
+void rmnet_perf_opt_flush_all_flow_nodes(void);
+void rmnet_perf_opt_chain_end(void);
+void rmnet_perf_opt_insert_pkt_in_flow(
 			struct rmnet_perf_opt_flow_node *flow_node,
 			struct rmnet_perf_pkt_info *pkt_info);
-bool rmnet_perf_opt_ingress(struct rmnet_perf *perf, struct sk_buff *skb,
-			    struct rmnet_perf_pkt_info *pkt_info);
+bool rmnet_perf_opt_ingress(struct rmnet_perf_pkt_info *pkt_info);
+void rmnet_perf_free_hash_table(void);
 
 #endif /* _RMNET_PERF_OPT_H_ */
diff --git a/drivers/rmnet/perf/rmnet_perf_tcp_opt.c b/drivers/rmnet/perf/rmnet_perf_tcp_opt.c
index 5dc2224..7c73aa1 100644
--- a/drivers/rmnet/perf/rmnet_perf_tcp_opt.c
+++ b/drivers/rmnet/perf/rmnet_perf_tcp_opt.c
@@ -27,7 +27,7 @@
 #include "rmnet_perf_config.h"
 
 /* Max number of bytes we allow tcp_opt to aggregate per flow */
-unsigned int rmnet_perf_tcp_opt_flush_limit __read_mostly = 65536;
+unsigned int rmnet_perf_tcp_opt_flush_limit __read_mostly = 65000;
 module_param(rmnet_perf_tcp_opt_flush_limit, uint, 0644);
 MODULE_PARM_DESC(rmnet_perf_tcp_opt_flush_limit,
 		 "Max flush limiit for tcp_opt");
@@ -67,7 +67,7 @@
 static bool
 rmnet_perf_tcp_opt_tcp_flag_flush(struct rmnet_perf_pkt_info *pkt_info)
 {
-	struct tcphdr *tp = pkt_info->trns_hdr.tp;
+	struct tcphdr *tp = pkt_info->trans_hdr.tp;
 
 	if ((pkt_info->payload_len == 0 && tp->ack) || tp->cwr || tp->syn ||
 	    tp->fin || tp->rst || tp->urg || tp->psh)
@@ -77,7 +77,6 @@
 }
 
 /* rmnet_perf_tcp_opt_pkt_can_be_merged() - Check if packet can be merged
- * @skb:        Source socket buffer containing current MAP frames
  * @flow_node:  flow node meta data for checking condition
  * @pkt_info: characteristics of the current packet
  *
@@ -89,56 +88,44 @@
  *    - false if not
  **/
 static enum rmnet_perf_tcp_opt_merge_check_rc
-rmnet_perf_tcp_opt_pkt_can_be_merged(struct sk_buff *skb,
+rmnet_perf_tcp_opt_pkt_can_be_merged(
 				struct rmnet_perf_opt_flow_node *flow_node,
 				struct rmnet_perf_pkt_info *pkt_info)
 {
-	struct iphdr *ip4h;
-	struct ipv6hdr *ip6h;
-	u16 payload_len = pkt_info->payload_len;
-	struct tcphdr *tp = pkt_info->trns_hdr.tp;
+	struct tcphdr *tp = pkt_info->trans_hdr.tp;
+	u32 tcp_seq = ntohl(tp->seq);
+	u16 gso_len;
 
-	/* cast iph to right ip header struct for ip_version */
-	switch (pkt_info->ip_proto) {
-	case 0x04:
-		ip4h = pkt_info->iphdr.v4hdr;
-		if (((__force u32)flow_node->next_seq ^
-		    (__force u32) ntohl(tp->seq))) {
-			rmnet_perf_tcp_opt_fn_seq = flow_node->next_seq;
-			rmnet_perf_tcp_opt_pkt_seq = ntohl(tp->seq);
-			rmnet_perf_tcp_opt_flush_reason_cnt[
-				RMNET_PERF_TCP_OPT_OUT_OF_ORDER_SEQ]++;
-			return RMNET_PERF_TCP_OPT_FLUSH_ALL;
-		}
-		break;
-	case 0x06:
-		ip6h = (struct ipv6hdr *) pkt_info->iphdr.v6hdr;
-		if (((__force u32)flow_node->next_seq ^
-		    (__force u32) ntohl(tp->seq))) {
-			rmnet_perf_tcp_opt_fn_seq = flow_node->next_seq;
-			rmnet_perf_tcp_opt_pkt_seq = ntohl(tp->seq);
-			rmnet_perf_tcp_opt_flush_reason_cnt[
-				RMNET_PERF_TCP_OPT_OUT_OF_ORDER_SEQ]++;
-			return RMNET_PERF_TCP_OPT_FLUSH_ALL;
-		}
-		break;
-	default:
-		pr_err("Unsupported ip version %d", pkt_info->ip_proto);
+	/* Use any previous GRO information, if present */
+	if (pkt_info->frag_desc && pkt_info->frag_desc->gso_size)
+		gso_len = pkt_info->frag_desc->gso_size;
+	else
+		gso_len = pkt_info->payload_len;
+
+	/* Use stamped TCP SEQ number if we have it */
+	if (pkt_info->frag_desc && pkt_info->frag_desc->tcp_seq_set)
+		tcp_seq = ntohl(pkt_info->frag_desc->tcp_seq);
+
+	/* 1. check ordering */
+	if (flow_node->next_seq ^ tcp_seq) {
+		rmnet_perf_tcp_opt_fn_seq = flow_node->next_seq;
+		rmnet_perf_tcp_opt_pkt_seq = ntohl(tp->seq);
 		rmnet_perf_tcp_opt_flush_reason_cnt[
-				RMNET_PERF_TCP_OPT_PACKET_CORRUPT_ERROR]++;
-		return RMNET_PERF_TCP_OPT_FLUSH_SOME;
+				RMNET_PERF_TCP_OPT_OUT_OF_ORDER_SEQ]++;
+		return RMNET_PERF_TCP_OPT_FLUSH_ALL;
 	}
 
 	/* 2. check if size overflow */
-	if ((payload_len + flow_node->len >= rmnet_perf_tcp_opt_flush_limit)) {
+	if (pkt_info->payload_len + flow_node->len >=
+	    rmnet_perf_tcp_opt_flush_limit) {
 		rmnet_perf_tcp_opt_flush_reason_cnt[
 						RMNET_PERF_TCP_OPT_64K_LIMIT]++;
 		return RMNET_PERF_TCP_OPT_FLUSH_SOME;
-	} else if ((flow_node->num_pkts_held >= 50)) {
+	} else if (flow_node->num_pkts_held >= 50) {
 		rmnet_perf_tcp_opt_flush_reason_cnt[
 					RMNET_PERF_TCP_OPT_NO_SPACE_IN_NODE]++;
 		return RMNET_PERF_TCP_OPT_FLUSH_SOME;
-	} else if (flow_node->gso_len != payload_len) {
+	} else if (flow_node->gso_len != gso_len) {
 		rmnet_perf_tcp_opt_flush_reason_cnt[
 					RMNET_PERF_TCP_OPT_LENGTH_MISMATCH]++;
 		return RMNET_PERF_TCP_OPT_FLUSH_SOME;
@@ -146,57 +133,42 @@
 	return RMNET_PERF_TCP_OPT_MERGE_SUCCESS;
 }
 
-/* rmnet_perf_tcp_opt_check_timestamp() -Check timestamp of incoming packet
- * @skb: incoming packet to check
- * @tp: pointer to tcp header of incoming packet
- *
- * If the tcp segment has extended headers then parse them to check to see
- * if timestamps are included. If so, return the value
+/* rmnet_perf_tcp_opt_cmp_options() - Compare the TCP options of the packets
+ *		in a given flow node with an incoming packet in the flow
+ * @flow_node: The flow node representing the current flow
+ * @pkt_info: The characteristics of the incoming packet
  *
  * Return:
- *		- timestamp: if a timestamp is valid
- *		- 0: if there is no timestamp extended header
+ *    - true: The TCP headers have differing option fields
+ *    - false: The TCP headers have the same options
  **/
-static u32 rmnet_perf_tcp_opt_check_timestamp(struct sk_buff *skb,
-					      struct tcphdr *tp,
-					      struct net_device *dev)
+static bool
+rmnet_perf_tcp_opt_cmp_options(struct rmnet_perf_opt_flow_node *flow_node,
+			       struct rmnet_perf_pkt_info *pkt_info)
 {
-	int length = tp->doff * 4 - sizeof(*tp);
-	unsigned char *ptr = (unsigned char *)(tp + 1);
+	struct tcphdr *flow_header;
+	struct tcphdr *new_header;
+	u32 optlen, i;
 
-	while (length > 0) {
-		int code = *ptr++;
-		int size = *ptr++;
+	flow_header = (struct tcphdr *)
+		      (flow_node->pkt_list[0].header_start +
+		       flow_node->ip_len);
+	new_header = pkt_info->trans_hdr.tp;
+	optlen = flow_header->doff * 4;
+	if (new_header->doff * 4 != optlen)
+		return true;
 
-		/* Partial or malformed options */
-		if (size < 2 || size > length)
-			return 0;
-
-		switch (code) {
-		case TCPOPT_EOL:
-			/* No more options */
-			return 0;
-		case TCPOPT_NOP:
-			/* Empty option */
-			length--;
-			continue;
-		case TCPOPT_TIMESTAMP:
-			if (size == TCPOLEN_TIMESTAMP &&
-			    dev_net(dev)->ipv4.sysctl_tcp_timestamps)
-				return get_unaligned_be32(ptr);
-		}
-
-		ptr += size - 2;
-		length -= size;
+	/* Compare the bytes of the options */
+	for (i = sizeof(*flow_header); i < optlen; i += 4) {
+		if (*(u32 *)((u8 *)flow_header + i) ^
+		    *(u32 *)((u8 *)new_header + i))
+			return true;
 	}
 
-	/* No timestamp in the options */
-	return 0;
+	return false;
 }
 
 /* rmnet_perf_tcp_opt_ingress() - Core business logic of tcp_opt
- * @perf: allows access to our required global structures
- * @skb: the incoming ip packet
  * @pkt_info: characteristics of the current packet
  * @flush: IP flag mismatch detected
  *
@@ -208,24 +180,24 @@
  * Return:
  *		- void
  **/
-void rmnet_perf_tcp_opt_ingress(struct rmnet_perf *perf, struct sk_buff *skb,
-				struct rmnet_perf_opt_flow_node *flow_node,
+void rmnet_perf_tcp_opt_ingress(struct rmnet_perf_opt_flow_node *flow_node,
 				struct rmnet_perf_pkt_info *pkt_info,
 				bool flush)
 {
-	bool timestamp_mismatch;
+	struct napi_struct *napi;
+	bool option_mismatch;
 	enum rmnet_perf_tcp_opt_merge_check_rc rc;
-	struct napi_struct *napi = NULL;
+	u16 pkt_len;
+
+	pkt_len = pkt_info->ip_len + pkt_info->trans_len +
+		  pkt_info->payload_len;
 
 	if (flush || rmnet_perf_tcp_opt_tcp_flag_flush(pkt_info)) {
 		rmnet_perf_opt_update_flow(flow_node, pkt_info);
-		rmnet_perf_opt_flush_single_flow_node(perf, flow_node);
+		rmnet_perf_opt_flush_single_flow_node(flow_node);
 		napi = get_current_napi_context();
 		napi_gro_flush(napi, false);
-		rmnet_perf_core_flush_curr_pkt(perf, skb, pkt_info,
-					       pkt_info->header_len +
-					       pkt_info->payload_len, true,
-					       false);
+		rmnet_perf_core_flush_curr_pkt(pkt_info, pkt_len, true, false);
 		napi_gro_flush(napi, false);
 		rmnet_perf_tcp_opt_flush_reason_cnt[
 			RMNET_PERF_TCP_OPT_TCP_FLUSH_FORCE]++;
@@ -236,33 +208,27 @@
 	 * We know at this point that it's a normal packet in the flow
 	 */
 	if (!flow_node->num_pkts_held) {
-		rmnet_perf_opt_insert_pkt_in_flow(skb, flow_node, pkt_info);
+		rmnet_perf_opt_insert_pkt_in_flow(flow_node, pkt_info);
 		return;
 	}
 
-	pkt_info->curr_timestamp =
-		rmnet_perf_tcp_opt_check_timestamp(skb,
-						   pkt_info->trns_hdr.tp,
-						   perf->core_meta->dev);
-	timestamp_mismatch = flow_node->timestamp != pkt_info->curr_timestamp;
+	option_mismatch = rmnet_perf_tcp_opt_cmp_options(flow_node, pkt_info);
 
-	rc = rmnet_perf_tcp_opt_pkt_can_be_merged(skb, flow_node, pkt_info);
+	rc = rmnet_perf_tcp_opt_pkt_can_be_merged(flow_node, pkt_info);
 	if (rc == RMNET_PERF_TCP_OPT_FLUSH_ALL) {
-		rmnet_perf_opt_flush_single_flow_node(perf, flow_node);
-		rmnet_perf_core_flush_curr_pkt(perf, skb, pkt_info,
-					       pkt_info->header_len +
-					       pkt_info->payload_len, false,
+		rmnet_perf_opt_flush_single_flow_node(flow_node);
+		rmnet_perf_core_flush_curr_pkt(pkt_info, pkt_len, false,
 					       false);
 	} else if (rc == RMNET_PERF_TCP_OPT_FLUSH_SOME) {
-		rmnet_perf_opt_flush_single_flow_node(perf, flow_node);
-		rmnet_perf_opt_insert_pkt_in_flow(skb, flow_node, pkt_info);
-	} else if (timestamp_mismatch) {
-		rmnet_perf_opt_flush_single_flow_node(perf, flow_node);
-		rmnet_perf_opt_insert_pkt_in_flow(skb, flow_node, pkt_info);
+		rmnet_perf_opt_flush_single_flow_node(flow_node);
+		rmnet_perf_opt_insert_pkt_in_flow(flow_node, pkt_info);
+	} else if (option_mismatch) {
+		rmnet_perf_opt_flush_single_flow_node(flow_node);
+		rmnet_perf_opt_insert_pkt_in_flow(flow_node, pkt_info);
 		rmnet_perf_tcp_opt_flush_reason_cnt[
-			RMNET_PERF_TCP_OPT_TIMESTAMP_MISMATCH]++;
+			RMNET_PERF_TCP_OPT_OPTION_MISMATCH]++;
 	} else if (rc == RMNET_PERF_TCP_OPT_MERGE_SUCCESS) {
 		pkt_info->first_packet = false;
-		rmnet_perf_opt_insert_pkt_in_flow(skb, flow_node, pkt_info);
+		rmnet_perf_opt_insert_pkt_in_flow(flow_node, pkt_info);
 	}
 }
diff --git a/drivers/rmnet/perf/rmnet_perf_tcp_opt.h b/drivers/rmnet/perf/rmnet_perf_tcp_opt.h
index 2a6f2c6..4b957dd 100644
--- a/drivers/rmnet/perf/rmnet_perf_tcp_opt.h
+++ b/drivers/rmnet/perf/rmnet_perf_tcp_opt.h
@@ -28,7 +28,7 @@
 
 enum rmnet_perf_tcp_opt_flush_reasons {
 	RMNET_PERF_TCP_OPT_TCP_FLUSH_FORCE,
-	RMNET_PERF_TCP_OPT_TIMESTAMP_MISMATCH,
+	RMNET_PERF_TCP_OPT_OPTION_MISMATCH,
 	RMNET_PERF_TCP_OPT_64K_LIMIT,
 	RMNET_PERF_TCP_OPT_NO_SPACE_IN_NODE,
 	RMNET_PERF_TCP_OPT_FLOW_NODE_SHORTAGE,
@@ -38,8 +38,7 @@
 	RMNET_PERF_TCP_OPT_NUM_CONDITIONS
 };
 
-void rmnet_perf_tcp_opt_ingress(struct rmnet_perf *perf, struct sk_buff *skb,
-				struct rmnet_perf_opt_flow_node *flow_node,
+void rmnet_perf_tcp_opt_ingress(struct rmnet_perf_opt_flow_node *flow_node,
 				struct rmnet_perf_pkt_info *pkt_info,
 				bool flush);
 #endif /* _RMNET_PERF_TCP_OPT_H_ */
diff --git a/drivers/rmnet/perf/rmnet_perf_udp_opt.c b/drivers/rmnet/perf/rmnet_perf_udp_opt.c
index d730820..0f4399c 100644
--- a/drivers/rmnet/perf/rmnet_perf_udp_opt.c
+++ b/drivers/rmnet/perf/rmnet_perf_udp_opt.c
@@ -26,7 +26,7 @@
 #include "rmnet_perf_config.h"
 
 /* Max number of bytes we allow udp_opt to aggregate per flow */
-unsigned int rmnet_perf_udp_opt_flush_limit __read_mostly = 65536;
+unsigned int rmnet_perf_udp_opt_flush_limit __read_mostly = 65000;
 module_param(rmnet_perf_udp_opt_flush_limit, uint, 0644);
 MODULE_PARM_DESC(rmnet_perf_udp_opt_flush_limit,
 		 "Max flush limiit for udp_opt");
@@ -52,7 +52,6 @@
 }
 
 /* udp_pkt_can_be_merged() - Check if packet can be merged
- * @skb:        Source socket buffer containing current MAP frames
  * @flow_node:  flow node meta data for checking condition
  * @pkt_info: characteristics of the current packet
  *
@@ -64,23 +63,29 @@
  *      merge status
  **/
 static enum rmnet_perf_udp_opt_merge_check_rc
-udp_pkt_can_be_merged(struct sk_buff *skb,
-		      struct rmnet_perf_opt_flow_node *flow_node,
+udp_pkt_can_be_merged(struct rmnet_perf_opt_flow_node *flow_node,
 		      struct rmnet_perf_pkt_info *pkt_info)
 {
-	u16 payload_len = pkt_info->payload_len;
+	u16 gso_len;
+
+	/* Use any previous GRO information, if present */
+	if (pkt_info->frag_desc && pkt_info->frag_desc->gso_size)
+		gso_len = pkt_info->frag_desc->gso_size;
+	else
+		gso_len = pkt_info->payload_len;
 
 	/* 1. validate length */
-	if (flow_node->gso_len != payload_len) {
+	if (flow_node->gso_len != gso_len) {
 		update_udp_flush_stat(RMNET_PERF_UDP_OPT_LENGTH_MISMATCH);
 		return RMNET_PERF_UDP_OPT_FLUSH_SOME;
 	}
 
 	/* 2. check for size/count overflow */
-	if ((payload_len + flow_node->len >= rmnet_perf_udp_opt_flush_limit)) {
+	if (pkt_info->payload_len + flow_node->len >=
+	    rmnet_perf_udp_opt_flush_limit) {
 		update_udp_flush_stat(RMNET_PERF_UDP_OPT_64K_LIMIT);
 		return RMNET_PERF_UDP_OPT_FLUSH_SOME;
-	} else if ((flow_node->num_pkts_held >= 50)) {
+	} else if (flow_node->num_pkts_held >= 50) {
 		update_udp_flush_stat(RMNET_PERF_UDP_OPT_NO_SPACE_IN_NODE);
 		return RMNET_PERF_UDP_OPT_FLUSH_SOME;
 	}
@@ -88,8 +93,6 @@
 }
 
 /* rmnet_perf_udp_opt_ingress() - Core business logic of udp_opt
- * @perf: allows access to our required global structures
- * @skb: the incoming ip packet
  * @pkt_info: characteristics of the current packet
  * @flush: IP flag mismatch detected
  *
@@ -101,8 +104,7 @@
  * Return:
  *		- void
  **/
-void rmnet_perf_udp_opt_ingress(struct rmnet_perf *perf, struct sk_buff *skb,
-				struct rmnet_perf_opt_flow_node *flow_node,
+void rmnet_perf_udp_opt_ingress(struct rmnet_perf_opt_flow_node *flow_node,
 				struct rmnet_perf_pkt_info *pkt_info,
 				bool flush)
 {
@@ -110,9 +112,10 @@
 
 	if (flush) {
 		rmnet_perf_opt_update_flow(flow_node, pkt_info);
-		rmnet_perf_opt_flush_single_flow_node(perf, flow_node);
-		rmnet_perf_core_flush_curr_pkt(perf, skb, pkt_info,
-					       pkt_info->header_len +
+		rmnet_perf_opt_flush_single_flow_node(flow_node);
+		rmnet_perf_core_flush_curr_pkt(pkt_info,
+					       pkt_info->ip_len +
+					       pkt_info->trans_len +
 					       pkt_info->payload_len, false,
 					       false);
 		update_udp_flush_stat(RMNET_PERF_UDP_OPT_FLAG_MISMATCH);
@@ -122,17 +125,15 @@
 	/* Go ahead and insert the packet now if we're not holding anything.
 	 * We know at this point that it's a normal packet in the flow
 	 */
-	if (!flow_node->num_pkts_held) {
-		rmnet_perf_opt_insert_pkt_in_flow(skb, flow_node, pkt_info);
-		return;
-	}
+	if (!flow_node->num_pkts_held)
+		goto insert;
 
-	rc = udp_pkt_can_be_merged(skb, flow_node, pkt_info);
-	if (rc == RMNET_PERF_UDP_OPT_FLUSH_SOME) {
-		rmnet_perf_opt_flush_single_flow_node(perf, flow_node);
-		rmnet_perf_opt_insert_pkt_in_flow(skb, flow_node, pkt_info);
-	} else if (rc == RMNET_PERF_UDP_OPT_MERGE_SUCCESS) {
+	rc = udp_pkt_can_be_merged(flow_node, pkt_info);
+	if (rc == RMNET_PERF_UDP_OPT_FLUSH_SOME)
+		rmnet_perf_opt_flush_single_flow_node(flow_node);
+	else if (rc == RMNET_PERF_UDP_OPT_MERGE_SUCCESS)
 		pkt_info->first_packet = false;
-		rmnet_perf_opt_insert_pkt_in_flow(skb, flow_node, pkt_info);
-	}
+
+insert:
+	rmnet_perf_opt_insert_pkt_in_flow(flow_node, pkt_info);
 }
diff --git a/drivers/rmnet/perf/rmnet_perf_udp_opt.h b/drivers/rmnet/perf/rmnet_perf_udp_opt.h
index b48d79a..6be35dd 100644
--- a/drivers/rmnet/perf/rmnet_perf_udp_opt.h
+++ b/drivers/rmnet/perf/rmnet_perf_udp_opt.h
@@ -33,8 +33,7 @@
 	RMNET_PERF_UDP_OPT_NUM_CONDITIONS
 };
 
-void rmnet_perf_udp_opt_ingress(struct rmnet_perf *perf, struct sk_buff *skb,
-				struct rmnet_perf_opt_flow_node *flow_node,
+void rmnet_perf_udp_opt_ingress(struct rmnet_perf_opt_flow_node *flow_node,
 				struct rmnet_perf_pkt_info *pkt_info,
 				bool flush);
 #endif /* _RMNET_PERF_UDP_OPT_H_ */
diff --git a/drivers/rmnet/shs/Android.mk b/drivers/rmnet/shs/Android.mk
index c7511f9..b150417 100644
--- a/drivers/rmnet/shs/Android.mk
+++ b/drivers/rmnet/shs/Android.mk
@@ -1,6 +1,7 @@
 ifneq ($(TARGET_PRODUCT),qssi)
 RMNET_SHS_DLKM_PLATFORMS_LIST := msmnile
 RMNET_SHS_DLKM_PLATFORMS_LIST += kona
+RMNET_SHS_DLKM_PLATFORMS_LIST += lito
 
 ifeq ($(call is-board-platform-in-list, $(RMNET_SHS_DLKM_PLATFORMS_LIST)),true)
 #Make file to create RMNET_SHS DLKM
@@ -13,13 +14,12 @@
 LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
 LOCAL_MODULE := rmnet_shs.ko
 
-LOCAL_SRC_FILES := rmnet_shs_main.c rmnet_shs_config.c rmnet_shs_wq.c
+LOCAL_SRC_FILES := rmnet_shs_main.c rmnet_shs_config.c rmnet_shs_wq.c rmnet_shs_freq.c rmnet_shs_wq_mem.c rmnet_shs_wq_genl.c
 
 RMNET_SHS_BLD_DIR := ../../vendor/qcom/opensource/data-kernel/drivers/rmnet/shs
 DLKM_DIR := ./device/qcom/common/dlkm
 
 KBUILD_OPTIONS := $(RMNET_SHS_BLD_DIR)
-LOCAL_MODULE_TAGS := debug
 
 $(warning $(DLKM_DIR))
 include $(DLKM_DIR)/AndroidKernelModule.mk
diff --git a/drivers/rmnet/shs/Kbuild b/drivers/rmnet/shs/Kbuild
index 055d856..196d128 100644
--- a/drivers/rmnet/shs/Kbuild
+++ b/drivers/rmnet/shs/Kbuild
@@ -1,2 +1,2 @@
 obj-m += rmnet_shs.o
-rmnet_shs-y := rmnet_shs_config.o rmnet_shs_main.o rmnet_shs_wq.o
+rmnet_shs-y := rmnet_shs_config.o rmnet_shs_main.o rmnet_shs_wq.o rmnet_shs_freq.o rmnet_shs_wq_mem.o rmnet_shs_wq_genl.o
diff --git a/drivers/rmnet/shs/rmnet_shs.h b/drivers/rmnet/shs/rmnet_shs.h
index 3632b3c..f6ce09e 100644
--- a/drivers/rmnet/shs/rmnet_shs.h
+++ b/drivers/rmnet/shs/rmnet_shs.h
@@ -19,6 +19,8 @@
 #ifndef _RMNET_SHS_H_
 #define _RMNET_SHS_H_
 
+#include "rmnet_shs_freq.h"
+
 #include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h>
 #include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h>
 #include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h>
@@ -32,6 +34,7 @@
 #define RMNET_SHS_MAX_SKB_INACTIVE_TSEC 30
 #define MAX_SILVER_CORES 4
 #define MAX_CPUS  8
+#define PERF_MASK 0xF0
 
 /* RPS mask change's Default core for orphaned CPU flows */
 #define MAIN_CORE 0
@@ -51,14 +54,14 @@
 //#define RMNET_SHS_UDP_PPS_SILVER_CORE_UPPER_THRESH 90000
 //#define RMNET_SHS_TCP_PPS_SILVER_CORE_UPPER_THRESH 90000
 
-#define SHS_TRACE_ERR(...) if (rmnet_shs_debug) \
-	trace_rmnet_shs_err(__VA_ARGS__)
+#define SHS_TRACE_ERR(...) \
+  do { if (rmnet_shs_debug) trace_rmnet_shs_err(__VA_ARGS__); } while (0)
 
-#define SHS_TRACE_HIGH(...) if (rmnet_shs_debug) \
-	trace_rmnet_shs_high(__VA_ARGS__)
+#define SHS_TRACE_HIGH(...) \
+  do { if (rmnet_shs_debug) trace_rmnet_shs_high(__VA_ARGS__); } while (0)
 
-#define SHS_TRACE_LOW(...) if (rmnet_shs_debug) \
-	trace_rmnet_shs_low(__VA_ARGS__)
+#define SHS_TRACE_LOW(...) \
+  do { if (rmnet_shs_debug) trace_rmnet_shs_low(__VA_ARGS__); } while (0)
 
 #define RMNET_SHS_MAX_SILVER_CORE_BURST_CAPACITY  204800
 
@@ -74,6 +77,9 @@
 #define RMNET_SHS_UDP_PPS_PERF_CPU_LTHRESH 40000
 #define RMNET_SHS_TCP_PPS_PERF_CPU_LTHRESH (40000*RMNET_SHS_TCP_COALESCING_RATIO)
 
+#define RMNET_SHS_UDP_PPS_HEADROOM 20000
+#define RMNET_SHS_GOLD_BALANCING_THRESH (RMNET_SHS_UDP_PPS_PERF_CPU_UTHRESH / 2)
+
 struct core_flush_s {
 	struct  hrtimer core_timer;
 	struct work_struct work;
@@ -89,8 +95,8 @@
 	struct rmnet_port *port;
 	struct  core_flush_s core_flush[MAX_CPUS];
 	u64 core_skbs[MAX_CPUS];
-	long int num_bytes_parked;
-	long int num_pkts_parked;
+	long num_bytes_parked;
+	long num_pkts_parked;
 	u32 is_reg_dl_mrk_ind;
 	u16 num_flows;
 	u8 is_pkt_parked;
@@ -298,6 +304,10 @@
 
 int rmnet_shs_chk_and_flush_node(struct rmnet_shs_skbn_s *node,
 				 u8 force_flush, u8 ctxt);
+void rmnet_shs_dl_hdr_handler_v2(struct rmnet_map_dl_ind_hdr *dlhdr,
+			      struct rmnet_map_control_command_header *qcmd);
+void rmnet_shs_dl_trl_handler_v2(struct rmnet_map_dl_ind_trl *dltrl,
+			      struct rmnet_map_control_command_header *qcmd);
 void rmnet_shs_dl_hdr_handler(struct rmnet_map_dl_ind_hdr *dlhdr);
 void rmnet_shs_dl_trl_handler(struct rmnet_map_dl_ind_trl *dltrl);
 void rmnet_shs_assign(struct sk_buff *skb, struct rmnet_port *port);
diff --git a/drivers/rmnet/shs/rmnet_shs_config.c b/drivers/rmnet/shs/rmnet_shs_config.c
index 1bb731f..e6b4002 100644
--- a/drivers/rmnet/shs/rmnet_shs_config.c
+++ b/drivers/rmnet/shs/rmnet_shs_config.c
@@ -16,9 +16,11 @@
 #include <linux/netdevice.h>
 #include <linux/module.h>
 #include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h>
+#include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h>
 #include "rmnet_shs_config.h"
 #include "rmnet_shs.h"
 #include "rmnet_shs_wq.h"
+#include "rmnet_shs_wq_genl.h"
 
 MODULE_LICENSE("GPL v2");
 
@@ -31,7 +33,7 @@
 module_param(rmnet_shs_stats_enabled, uint, 0644);
 MODULE_PARM_DESC(rmnet_shs_stats_enabled, "Enable Disable stats collection");
 
-unsigned long int rmnet_shs_crit_err[RMNET_SHS_CRIT_ERR_MAX];
+unsigned long rmnet_shs_crit_err[RMNET_SHS_CRIT_ERR_MAX];
 module_param_array(rmnet_shs_crit_err, ulong, 0, 0444);
 MODULE_PARM_DESC(rmnet_shs_crit_err, "rmnet shs crtical error type");
 
@@ -40,43 +42,34 @@
 
 static struct notifier_block rmnet_shs_dev_notifier __read_mostly = {
 	.notifier_call = rmnet_shs_dev_notify_cb,
+	.priority = 2,
 };
 
-static int rmnet_shs_dev_notify_cb(struct notifier_block *nb,
-				    unsigned long event, void *data);
-
 static int rmnet_vnd_total;
 /* Enable smart hashing capability upon call to initialize module*/
 int __init rmnet_shs_module_init(void)
 {
-
-	if (unlikely(rmnet_shs_debug))
-		pr_info("%s(): Initializing rmnet SHS module\n", __func__);
-
-	if (!rmnet_shs_skb_entry)
-		RCU_INIT_POINTER(rmnet_shs_skb_entry, rmnet_shs_assign);
-
+	pr_info("%s(): Starting rmnet SHS module\n", __func__);
 	trace_rmnet_shs_high(RMNET_SHS_MODULE, RMNET_SHS_MODULE_INIT,
 			    0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
+
+	if (rmnet_shs_wq_genl_init()) {
+		rm_err("%s", "SHS_GNL: Failed to init generic netlink");
+	}
+
 	return register_netdevice_notifier(&rmnet_shs_dev_notifier);
 }
 
 /* Remove smart hashing capability upon call to initialize module */
 void __exit rmnet_shs_module_exit(void)
 {
-	RCU_INIT_POINTER(rmnet_shs_skb_entry, NULL);
-
-	if (rmnet_shs_cfg.rmnet_shs_init_complete) {
-		rmnet_shs_cancel_table();
-		rmnet_shs_rx_wq_exit();
-		rmnet_shs_wq_exit();
-		rmnet_shs_exit();
-	}
-	unregister_netdevice_notifier(&rmnet_shs_dev_notifier);
-	if (unlikely(rmnet_shs_debug))
-		pr_info("Exiting rmnet_shs module");
 	trace_rmnet_shs_high(RMNET_SHS_MODULE, RMNET_SHS_MODULE_EXIT,
 			    0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
+	unregister_netdevice_notifier(&rmnet_shs_dev_notifier);
+
+	rmnet_shs_wq_genl_deinit();
+
+	pr_info("%s(): Exiting rmnet SHS module\n", __func__);
 }
 
 static int rmnet_shs_dev_notify_cb(struct notifier_block *nb,
@@ -84,28 +77,32 @@
 {
 
 	struct net_device *dev = netdev_notifier_info_to_dev(data);
-	static struct net_device *phy_dev;
+	struct rmnet_priv *priv;
+	struct rmnet_port *port;
+	int ret = 0;
 
 	if (!dev) {
 		rmnet_shs_crit_err[RMNET_SHS_NETDEV_ERR]++;
 		return NOTIFY_DONE;
 	}
 
-	switch (event) {
-	case NETDEV_GOING_DOWN:
-		rmnet_shs_wq_reset_ep_active(dev);
+	if (!(strncmp(dev->name, "rmnet_data", 10) == 0 ||
+	      strncmp(dev->name, "r_rmnet_data", 12) == 0))
+		return NOTIFY_DONE;
 
-		if (strncmp(dev->name, "rmnet_data", 10) == 0)
-			rmnet_vnd_total--;
+	switch (event) {
+	case NETDEV_UNREGISTER:
+		rmnet_shs_wq_reset_ep_active(dev);
+		rmnet_vnd_total--;
 
 		/* Deinitialize if last vnd is going down or if
 		 * phy_dev is going down.
 		 */
-		if ((rmnet_is_real_dev_registered(dev) &&
-		    (!strcmp(dev->name, "rmnet_ipa0") ||
-		    !strcmp(dev->name, "rmnet_mhi0"))) &&
-		    rmnet_shs_cfg.rmnet_shs_init_complete) {
+		if (!rmnet_vnd_total && rmnet_shs_cfg.rmnet_shs_init_complete) {
+			pr_info("rmnet_shs deinit %s going down ", dev->name);
 			RCU_INIT_POINTER(rmnet_shs_skb_entry, NULL);
+			qmi_rmnet_ps_ind_deregister(rmnet_shs_cfg.port,
+					    &rmnet_shs_cfg.rmnet_idl_ind_cb);
 			rmnet_shs_cancel_table();
 			rmnet_shs_rx_wq_exit();
 			rmnet_shs_wq_exit();
@@ -117,47 +114,74 @@
 		}
 		break;
 
-	case NETDEV_UP:
-		if (strncmp(dev->name, "rmnet_ipa0", 10) == 0 ||
-		    strncmp(dev->name, "rmnet_mhi0", 10) == 0)
-			phy_dev = dev;
+	case NETDEV_REGISTER:
+		rmnet_vnd_total++;
 
-
-		if (strncmp(dev->name, "rmnet_data", 10) == 0){
-			rmnet_vnd_total++;
-		}
-
-		if (strncmp(dev->name, "rmnet_data", 10) == 0) {
-			/* Need separate if check to avoid
-			 * NULL dereferencing
-			 */
-
-			if (phy_dev && !rmnet_shs_cfg.rmnet_shs_init_complete) {
-				rmnet_shs_init(phy_dev, dev);
-				rmnet_shs_wq_init(phy_dev);
-				rmnet_shs_rx_wq_init();
-				rmnet_shs_cfg.is_timer_init = 1;
-				rmnet_shs_cfg.dl_mrk_ind_cb.priority =
-				   RMNET_SHS;
-				rmnet_shs_cfg.dl_mrk_ind_cb.dl_hdr_handler =
-				   &rmnet_shs_dl_hdr_handler;
-				rmnet_shs_cfg.dl_mrk_ind_cb.dl_trl_handler =
-				   &rmnet_shs_dl_trl_handler;
-				trace_rmnet_shs_high(RMNET_SHS_MODULE,
-						     RMNET_SHS_MODULE_INIT_WQ,
-						     0xDEF, 0xDEF, 0xDEF,
-						     0xDEF, NULL, NULL);
-				rmnet_shs_cfg.rmnet_idl_ind_cb.ps_on_handler =
-						&rmnet_shs_ps_on_hdlr;
-				rmnet_shs_cfg.rmnet_idl_ind_cb.ps_off_handler =
-						&rmnet_shs_ps_off_hdlr;
-				RCU_INIT_POINTER(rmnet_shs_skb_entry,
-						 rmnet_shs_assign);
-
-
+		if (rmnet_vnd_total && !rmnet_shs_cfg.rmnet_shs_init_complete) {
+			pr_info("rmnet_shs initializing %s", dev->name);
+			priv = netdev_priv(dev);
+			port = rmnet_get_port(priv->real_dev);
+			if (!port) {
+				pr_err("rmnet_shs: invalid rmnet_port");
+				break;
 			}
-			rmnet_shs_wq_set_ep_active(dev);
+			rmnet_shs_init(priv->real_dev, dev);
+			rmnet_shs_wq_init(priv->real_dev);
+			rmnet_shs_rx_wq_init();
 
+			rmnet_shs_cfg.is_timer_init = 1;
+		}
+		rmnet_shs_wq_set_ep_active(dev);
+
+		break;
+	case NETDEV_UP:
+		if (!rmnet_shs_cfg.is_reg_dl_mrk_ind &&
+		    rmnet_shs_cfg.rmnet_shs_init_complete) {
+
+			port = rmnet_shs_cfg.port;
+			if (!port) {
+				pr_err("rmnet_shs: invalid rmnet_cfg_port");
+				break;
+			}
+
+			rmnet_shs_cfg.dl_mrk_ind_cb.priority =
+				RMNET_SHS;
+			if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2) {
+				rmnet_shs_cfg.dl_mrk_ind_cb.dl_hdr_handler_v2 =
+					&rmnet_shs_dl_hdr_handler_v2;
+				rmnet_shs_cfg.dl_mrk_ind_cb.dl_trl_handler_v2 =
+					&rmnet_shs_dl_trl_handler_v2;
+			} else {
+				rmnet_shs_cfg.dl_mrk_ind_cb.dl_hdr_handler =
+					&rmnet_shs_dl_hdr_handler;
+				rmnet_shs_cfg.dl_mrk_ind_cb.dl_trl_handler =
+					&rmnet_shs_dl_trl_handler;
+			}
+			rmnet_shs_cfg.rmnet_idl_ind_cb.ps_on_handler =
+					&rmnet_shs_ps_on_hdlr;
+			rmnet_shs_cfg.rmnet_idl_ind_cb.ps_off_handler =
+					&rmnet_shs_ps_off_hdlr;
+
+			ret = rmnet_map_dl_ind_register(port,
+						        &rmnet_shs_cfg.dl_mrk_ind_cb);
+			if (ret)
+				pr_err("%s(): rmnet dl_ind registration fail\n",
+				       __func__);
+
+			ret = qmi_rmnet_ps_ind_register(port,
+						        &rmnet_shs_cfg.rmnet_idl_ind_cb);
+			if (ret)
+				pr_err("%s(): rmnet ps_ind registration fail\n",
+				       __func__);
+			rmnet_shs_update_cfg_mask();
+			rmnet_shs_wq_refresh_new_flow_list();
+			rmnet_shs_cfg.is_reg_dl_mrk_ind = 1;
+			trace_rmnet_shs_high(RMNET_SHS_MODULE,
+					     RMNET_SHS_MODULE_INIT_WQ,
+					     0xDEF, 0xDEF, 0xDEF,
+					     0xDEF, NULL, NULL);
+			RCU_INIT_POINTER(rmnet_shs_skb_entry,
+					 rmnet_shs_assign);
 		}
 
 		break;
diff --git a/drivers/rmnet/shs/rmnet_shs_config.h b/drivers/rmnet/shs/rmnet_shs_config.h
index d033723..dc385e4 100644
--- a/drivers/rmnet/shs/rmnet_shs_config.h
+++ b/drivers/rmnet/shs/rmnet_shs_config.h
@@ -42,12 +42,16 @@
 	RMNET_SHS_CPU_PKTLEN_ERR,
 	RMNET_SHS_NULL_SKB_HEAD,
 	RMNET_SHS_RPS_MASK_CHANGE,
+	RMNET_SHS_WQ_INVALID_CPU_ERR,
+	RMNET_SHS_WQ_INVALID_PTR_ERR,
+	RMNET_SHS_WQ_NODE_MALLOC_ERR,
+	RMNET_SHS_WQ_NL_SOCKET_ERR,
 	RMNET_SHS_CRIT_ERR_MAX
 };
 
 extern unsigned int rmnet_shs_debug;
 extern unsigned int rmnet_shs_stats_enabled;
-extern unsigned long int rmnet_shs_crit_err[RMNET_SHS_CRIT_ERR_MAX];
+extern unsigned long rmnet_shs_crit_err[RMNET_SHS_CRIT_ERR_MAX];
 extern struct rmnet_shs_cfg_s rmnet_shs_cfg;
 extern int rmnet_is_real_dev_registered(const struct net_device *real_dev);
 
diff --git a/drivers/rmnet/shs/rmnet_shs_freq.c b/drivers/rmnet/shs/rmnet_shs_freq.c
new file mode 100644
index 0000000..c6123c6
--- /dev/null
+++ b/drivers/rmnet/shs/rmnet_shs_freq.c
@@ -0,0 +1,165 @@
+/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data Smart Hash stamping solution
+ *
+ */
+#include <linux/module.h>
+#include "rmnet_shs.h"
+#include "rmnet_shs_freq.h"
+
+#include <linux/cpufreq.h>
+#include <linux/cpu.h>
+
+#define MAX_FREQ INT_MAX
+#define MIN_FREQ 0
+#define BOOST_FREQ MAX_FREQ
+
+struct cpu_freq {
+	unsigned int freq_floor;
+	unsigned int freq_ceil;
+
+};
+
+unsigned int rmnet_shs_freq_enable __read_mostly = 1;
+module_param(rmnet_shs_freq_enable, uint, 0644);
+MODULE_PARM_DESC(rmnet_shs_freq_enable, "Enable/disable freq boost feature");
+
+struct workqueue_struct *shs_boost_wq;
+static DEFINE_PER_CPU(struct cpu_freq, cpu_boosts);
+static struct work_struct boost_cpu;
+
+static int rmnet_shs_freq_notify(struct notifier_block *nb,
+				 unsigned long val,
+				 void *data)
+{
+	struct cpufreq_policy *policy = data;
+	unsigned int cpu = policy->cpu;
+	struct cpu_freq *boost = &per_cpu(cpu_boosts, cpu);
+
+	switch (val) {
+	case CPUFREQ_ADJUST:
+		if (rmnet_shs_freq_enable) {
+			cpufreq_verify_within_limits(policy,
+						     boost->freq_floor,
+						     MAX_FREQ);
+			trace_rmnet_freq_update(cpu, policy->min,
+						policy->max);
+		}
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block freq_boost_nb = {
+	.notifier_call = rmnet_shs_freq_notify,
+};
+
+static void update_cpu_policy(struct work_struct *work)
+{
+	unsigned int i;
+
+	get_online_cpus();
+	for_each_online_cpu(i) {
+		cpufreq_update_policy(i);
+	}
+
+	put_online_cpus();
+}
+
+void rmnet_shs_reset_freq(void)
+{
+	struct cpu_freq *boost;
+	int i;
+
+	for_each_possible_cpu(i) {
+		boost = &per_cpu(cpu_boosts, i);
+		boost->freq_floor = MIN_FREQ;
+		boost->freq_ceil = MAX_FREQ;
+	}
+}
+
+void rmnet_shs_boost_cpus(void)
+{
+	struct cpu_freq *boost;
+	int i;
+
+	for_each_possible_cpu(i) {
+
+		if ((1 << i) & PERF_MASK)
+			continue;
+		boost = &per_cpu(cpu_boosts, i);
+		boost->freq_floor = BOOST_FREQ;
+		boost->freq_ceil = MAX_FREQ;
+		trace_rmnet_freq_boost(i, boost->freq_floor);
+	}
+
+	if (work_pending(&boost_cpu))
+		return;
+
+	if (shs_boost_wq)
+		queue_work(shs_boost_wq, &boost_cpu);
+}
+
+void rmnet_shs_reset_cpus(void)
+{
+	struct cpu_freq *boost;
+	int i;
+
+	for_each_possible_cpu(i) {
+
+		if ((1 << i) & PERF_MASK)
+			continue;
+		boost = &per_cpu(cpu_boosts, i);
+		boost->freq_floor = MIN_FREQ;
+		boost->freq_ceil = MAX_FREQ;
+		trace_rmnet_freq_reset(i, boost->freq_floor);
+	}
+	if (work_pending(&boost_cpu))
+		return;
+
+	if (shs_boost_wq)
+		queue_work(shs_boost_wq, &boost_cpu);
+}
+
+int rmnet_shs_freq_init(void)
+{
+
+	if (!shs_boost_wq)
+		shs_boost_wq = alloc_workqueue("shs_boost_wq", WQ_HIGHPRI, 0);
+
+	if (!shs_boost_wq)
+		return -EFAULT;
+	INIT_WORK(&boost_cpu, update_cpu_policy);
+
+	if (rmnet_shs_freq_enable)
+		cpufreq_register_notifier(&freq_boost_nb,
+					  CPUFREQ_POLICY_NOTIFIER);
+	rmnet_shs_reset_freq();
+	return 0;
+}
+
+int rmnet_shs_freq_exit(void)
+{
+	rmnet_shs_reset_freq();
+	cancel_work_sync(&boost_cpu);
+
+	if (shs_boost_wq) {
+		destroy_workqueue(shs_boost_wq);
+		shs_boost_wq = NULL;
+	}
+
+	if (rmnet_shs_freq_enable)
+		cpufreq_unregister_notifier(&freq_boost_nb,
+					    CPUFREQ_POLICY_NOTIFIER);
+	return 0;
+}
diff --git a/drivers/rmnet/shs/rmnet_shs_freq.h b/drivers/rmnet/shs/rmnet_shs_freq.h
new file mode 100644
index 0000000..7be4538
--- /dev/null
+++ b/drivers/rmnet/shs/rmnet_shs_freq.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data Smart Hash solution
+ *
+ */
+
+#ifndef _RMNET_SHS_FREQ_H_
+#define _RMNET_SHS_FREQ_H_
+
+int rmnet_shs_freq_init(void);
+int rmnet_shs_freq_exit(void);
+void rmnet_shs_boost_cpus(void);
+void rmnet_shs_reset_cpus(void);
+
+#endif
diff --git a/drivers/rmnet/shs/rmnet_shs_main.c b/drivers/rmnet/shs/rmnet_shs_main.c
index 7d6fb92..ae66460 100755
--- a/drivers/rmnet/shs/rmnet_shs_main.c
+++ b/drivers/rmnet/shs/rmnet_shs_main.c
@@ -30,6 +30,8 @@
 #define NS_IN_MS 1000000
 #define LPWR_CLUSTER 0
 #define PERF_CLUSTER 4
+#define PERF_CORES 4
+
 #define INVALID_CPU -1
 
 #define WQ_DELAY 2000000
@@ -44,6 +46,8 @@
 DEFINE_SPINLOCK(rmnet_shs_ht_splock);
 DEFINE_HASHTABLE(RMNET_SHS_HT, RMNET_SHS_HT_SIZE);
 struct rmnet_shs_cpu_node_s rmnet_shs_cpu_node_tbl[MAX_CPUS];
+int cpu_num_flows[MAX_CPUS];
+
 /* Maintains a list of flows associated with a core
  * Also keeps track of number of packets processed on that core
  */
@@ -53,11 +57,11 @@
 
 struct rmnet_shs_flush_work shs_rx_work;
 /* Delayed workqueue that will be used to flush parked packets*/
-unsigned long int rmnet_shs_switch_reason[RMNET_SHS_SWITCH_MAX_REASON];
+unsigned long rmnet_shs_switch_reason[RMNET_SHS_SWITCH_MAX_REASON];
 module_param_array(rmnet_shs_switch_reason, ulong, 0, 0444);
 MODULE_PARM_DESC(rmnet_shs_switch_reason, "rmnet shs skb core swtich type");
 
-unsigned long int rmnet_shs_flush_reason[RMNET_SHS_FLUSH_MAX_REASON];
+unsigned long rmnet_shs_flush_reason[RMNET_SHS_FLUSH_MAX_REASON];
 module_param_array(rmnet_shs_flush_reason, ulong, 0, 0444);
 MODULE_PARM_DESC(rmnet_shs_flush_reason, "rmnet shs skb flush trigger type");
 
@@ -116,6 +120,8 @@
 			    0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
 
 	list_del_init(&node->node_id);
+	cpu_num_flows[node->map_cpu]--;
+
 }
 
 void rmnet_shs_cpu_node_add(struct rmnet_shs_skbn_s *node,
@@ -125,15 +131,18 @@
 			    0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
 
 	list_add(&node->node_id, hd);
+	cpu_num_flows[node->map_cpu]++;
 }
 
 void rmnet_shs_cpu_node_move(struct rmnet_shs_skbn_s *node,
-			     struct list_head *hd)
+			     struct list_head *hd, int oldcpu)
 {
 	SHS_TRACE_LOW(RMNET_SHS_CPU_NODE, RMNET_SHS_CPU_NODE_FUNC_MOVE,
 			    0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
 
 	list_move(&node->node_id, hd);
+	cpu_num_flows[node->map_cpu]++;
+	cpu_num_flows[oldcpu]--;
 }
 
 /* Evaluates the incoming transport protocol of the incoming skb. Determines
@@ -142,29 +151,101 @@
 int rmnet_shs_is_skb_stamping_reqd(struct sk_buff *skb)
 {
 	int ret_val = 0;
+	struct iphdr *ip4h;
+	struct ipv6hdr *ip6h;
 
-	/* SHS will ignore ICMP and frag pkts completely */
-	switch (skb->protocol) {
-	case htons(ETH_P_IP):
-		if (!ip_is_fragment(ip_hdr(skb)) &&
-		    ((ip_hdr(skb)->protocol == IPPROTO_TCP) ||
-		     (ip_hdr(skb)->protocol == IPPROTO_UDP)))
-			ret_val =  1;
+	/* This only applies to linear SKBs */
+	if (!skb_is_nonlinear(skb)) {
+		/* SHS will ignore ICMP and frag pkts completely */
+		switch (skb->protocol) {
+		case htons(ETH_P_IP):
+			if (!ip_is_fragment(ip_hdr(skb)) &&
+			((ip_hdr(skb)->protocol == IPPROTO_TCP) ||
+			(ip_hdr(skb)->protocol == IPPROTO_UDP))){
+				ret_val =  1;
+				break;
+			}
+			/* RPS logic is skipped if RPS hash is 0 while sw_hash
+			 * is set as active and packet is processed on the same
+			 * CPU as the initial caller.
+			 */
+			if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
+			    skb->hash = 0;
+			    skb->sw_hash = 1;
+			}
+			break;
 
-		break;
+		case htons(ETH_P_IPV6):
+			if (!(ipv6_hdr(skb)->nexthdr == NEXTHDR_FRAGMENT) &&
+			((ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) ||
+			(ipv6_hdr(skb)->nexthdr == IPPROTO_UDP))) {
+				ret_val =  1;
+				break;
+			}
 
-	case htons(ETH_P_IPV6):
-		if (!(ipv6_hdr(skb)->nexthdr == NEXTHDR_FRAGMENT) &&
-		    ((ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) ||
-		     (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)))
-			ret_val =  1;
+			/* RPS logic is skipped if RPS hash is 0 while sw_hash
+			 * is set as active and packet is processed on the same
+			 * CPU as the initial caller.
+			 */
+			if (ipv6_hdr(skb)->nexthdr == IPPROTO_ICMP) {
+			    skb->hash = 0;
+			    skb->sw_hash = 1;
+			}
 
-		break;
+			break;
 
-	default:
-		break;
+		default:
+			break;
+		}
+	} else {
+		switch (skb->protocol) {
+		case htons(ETH_P_IP):
+			ip4h = (struct iphdr *)rmnet_map_data_ptr(skb);
+
+			if (!(ntohs(ip4h->frag_off) & IP_MF) &&
+			    ((ntohs(ip4h->frag_off) & IP_OFFSET) == 0) &&
+			    (ip4h->protocol == IPPROTO_TCP ||
+			     ip4h->protocol == IPPROTO_UDP)) {
+				ret_val =  1;
+				break;
+			}
+			/* RPS logic is skipped if RPS hash is 0 while sw_hash
+			 * is set as active and packet is processed on the same
+			 * CPU as the initial caller.
+			 */
+			if (ip4h->protocol == IPPROTO_ICMP) {
+			    skb->hash = 0;
+			    skb->sw_hash = 1;
+			}
+
+			break;
+
+		case htons(ETH_P_IPV6):
+			ip6h = (struct ipv6hdr *)rmnet_map_data_ptr(skb);
+
+			if (!(ip6h->nexthdr == NEXTHDR_FRAGMENT) &&
+			((ip6h->nexthdr == IPPROTO_TCP) ||
+			(ip6h->nexthdr == IPPROTO_UDP))) {
+				ret_val =  1;
+				break;
+			}
+			/* RPS logic is skipped if RPS hash is 0 while sw_hash
+			 * is set as active and packet is processed on the same
+			 * CPU as the initial caller.
+			 */
+			if (ip6h->nexthdr == IPPROTO_ICMP) {
+			    skb->hash = 0;
+			    skb->sw_hash = 1;
+			}
+
+			break;
+
+		default:
+			break;
+		}
+
+
 	}
-
 	SHS_TRACE_LOW(RMNET_SHS_SKB_STAMPING, RMNET_SHS_SKB_STAMPING_END,
 			    ret_val, 0xDEF, 0xDEF, 0xDEF, skb, NULL);
 
@@ -176,7 +257,7 @@
 
 	struct  timespec time1;
 	struct  timespec *time2;
-	long int curinterval;
+	long curinterval;
 	int maxinterval = (rmnet_shs_inst_rate_interval < MIN_MS) ? MIN_MS :
 			   rmnet_shs_inst_rate_interval;
 
@@ -212,21 +293,39 @@
 /* We deliver packets to GRO module only for TCP traffic*/
 static int rmnet_shs_check_skb_can_gro(struct sk_buff *skb)
 {
-	int ret_val = -EPROTONOSUPPORT;
+	int ret_val = 0;
+	struct iphdr *ip4h;
+	struct ipv6hdr *ip6h;
 
-	switch (skb->protocol) {
-	case htons(ETH_P_IP):
-		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
-			ret_val =  0;
-		break;
+	if (!skb_is_nonlinear(skb)) {
+		switch (skb->protocol) {
+		case htons(ETH_P_IP):
+			if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+				ret_val = 1;
+			break;
 
-	case htons(ETH_P_IPV6):
-		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
-			ret_val =  0;
-		break;
-	default:
-		ret_val =  -EPROTONOSUPPORT;
-		break;
+		case htons(ETH_P_IPV6):
+			if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+				ret_val = 1;
+			break;
+		default:
+			break;
+		}
+	} else {
+		switch (skb->protocol) {
+		case htons(ETH_P_IP):
+			ip4h = (struct iphdr *)rmnet_map_data_ptr(skb);
+			if (ip4h->protocol == IPPROTO_TCP)
+				ret_val = 1;
+			break;
+		case htons(ETH_P_IPV6):
+			ip6h = (struct ipv6hdr *)rmnet_map_data_ptr(skb);
+			if (ip6h->nexthdr == IPPROTO_TCP)
+				ret_val = 1;
+			break;
+		default:
+			break;
+		}
 	}
 
 	SHS_TRACE_LOW(RMNET_SHS_SKB_CAN_GRO, RMNET_SHS_SKB_CAN_GRO_END,
@@ -244,8 +343,9 @@
 	SHS_TRACE_LOW(RMNET_SHS_DELIVER_SKB, RMNET_SHS_DELIVER_SKB_START,
 			    0xDEF, 0xDEF, 0xDEF, 0xDEF, skb, NULL);
 
-	if (!rmnet_shs_check_skb_can_gro(skb)) {
-		if ((napi = get_current_napi_context())) {
+	if (rmnet_shs_check_skb_can_gro(skb)) {
+		napi = get_current_napi_context();
+		if (napi) {
 			napi_gro_receive(napi, skb);
 		} else {
 			priv = netdev_priv(skb->dev);
@@ -267,6 +367,48 @@
 	gro_cells_receive(&priv->gro_cells, skb);
 }
 
+/* Delivers skbs after segmenting, directly to network stack */
+static void rmnet_shs_deliver_skb_segmented(struct sk_buff *in_skb, u8 ctext)
+{
+	struct sk_buff *skb = NULL;
+	struct sk_buff *nxt_skb = NULL;
+	struct sk_buff *segs = NULL;
+	int count = 0;
+
+	SHS_TRACE_LOW(RMNET_SHS_DELIVER_SKB, RMNET_SHS_DELIVER_SKB_START,
+			    0x1, 0xDEF, 0xDEF, 0xDEF, in_skb, NULL);
+
+	segs = __skb_gso_segment(in_skb, NETIF_F_SG, false);
+	if (unlikely(IS_ERR_OR_NULL(segs))) {
+		if (ctext == RMNET_RX_CTXT)
+			netif_receive_skb(in_skb);
+		else
+			netif_rx(in_skb);
+
+		return;
+	}
+
+	/* Send segmeneted skb */
+	for ((skb = segs); skb != NULL; skb = nxt_skb) {
+		nxt_skb = skb->next;
+
+		skb->hash = in_skb->hash;
+		skb->dev = in_skb->dev;
+		skb->next = NULL;
+
+		if (ctext == RMNET_RX_CTXT)
+			netif_receive_skb(skb);
+		else
+			netif_rx(skb);
+
+		count += 1;
+	}
+
+	consume_skb(in_skb);
+
+	return;
+}
+
 int rmnet_shs_flow_num_perf_cores(struct rmnet_shs_skbn_s *node_p)
 {
 	int ret = 0;
@@ -328,9 +470,9 @@
 	u8 mask = 0;
 	u8 i;
 
-	for (i = 0; i < map->len; i++) {
+	for (i = 0; i < map->len; i++)
 		mask |= 1 << map->cpus[i];
-	}
+
 	return mask;
 }
 
@@ -346,6 +488,37 @@
 	return sum;
 }
 
+int rmnet_shs_get_core_prio_flow(u8 mask)
+{
+	int ret = INVALID_CPU;
+	int least_flows = INVALID_CPU;
+	u8 curr_idx = 0;
+	u8 i;
+
+	/* Return 1st free core or the core with least # flows
+	 */
+	for (i = 0; i < MAX_CPUS; i++) {
+
+		if (!(mask & (1 << i)))
+			continue;
+
+		if (mask & (1 << i))
+			curr_idx++;
+
+		if (list_empty(&rmnet_shs_cpu_node_tbl[i].node_list_id))
+			return i;
+
+		if (cpu_num_flows[i] <= least_flows ||
+		    least_flows == INVALID_CPU) {
+			ret = i;
+			least_flows = cpu_num_flows[i];
+		}
+
+	}
+
+	return ret;
+}
+
 /* Take a index and a mask and returns what active CPU is
  * in that index.
  */
@@ -387,7 +560,7 @@
 			ret = idx;
 			break;
 		}
-		if(mask & (1 << i))
+		if (mask & (1 << i))
 			idx++;
 	}
 	return ret;
@@ -427,7 +600,8 @@
 	/* Return same perf core unless moving to gold from silver*/
 	if (rmnet_shs_cpu_node_tbl[node->map_cpu].prio &&
 	    rmnet_shs_is_lpwr_cpu(node->map_cpu)) {
-		cpu = rmnet_shs_wq_get_least_utilized_core(0xF0);
+		cpu = rmnet_shs_get_core_prio_flow(PERF_MASK &
+						   rmnet_shs_cfg.map_mask);
 		if (cpu < 0 && node->hstats != NULL)
 			cpu = node->hstats->suggested_cpu;
 	} else if (node->hstats != NULL)
@@ -439,14 +613,14 @@
 int rmnet_shs_get_hash_map_idx_to_stamp(struct rmnet_shs_skbn_s *node)
 {
 	int cpu, idx = INVALID_CPU;
-	cpu = rmnet_shs_get_suggested_cpu(node);
 
+	cpu = rmnet_shs_get_suggested_cpu(node);
 	idx = rmnet_shs_idx_from_cpu(cpu, rmnet_shs_cfg.map_mask);
 
-        /* If suggested CPU is no longer in mask. Try using current.*/
-        if (unlikely(idx < 0))
-                idx = rmnet_shs_idx_from_cpu(node->map_cpu,
-                                             rmnet_shs_cfg.map_mask);
+	/* If suggested CPU is no longer in mask. Try using current.*/
+	if (unlikely(idx < 0))
+		idx = rmnet_shs_idx_from_cpu(node->map_cpu,
+					     rmnet_shs_cfg.map_mask);
 
 	SHS_TRACE_LOW(RMNET_SHS_HASH_MAP,
 			    RMNET_SHS_HASH_MAP_IDX_TO_STAMP,
@@ -568,7 +742,7 @@
 			break;
 		}
 		node->is_shs_enabled = 1;
-		if (!map){
+		if (!map) {
 			node->is_shs_enabled = 0;
 			ret = 1;
 			break;
@@ -589,12 +763,12 @@
 		    (force_flush)) {
 			if (rmnet_shs_switch_cores) {
 
-			/* Move the amount parked to other core's count
-			 * Update old core's parked to not include diverted
-			 * packets and update new core's packets
-			 */
-			new_cpu = rmnet_shs_cpu_from_idx(cpu_map_index,
-							 rmnet_shs_cfg.map_mask);
+				/* Move the amount parked to other core's count
+				 * Update old core's parked to not include diverted
+				 * packets and update new core's packets
+				 */
+				new_cpu = rmnet_shs_cpu_from_idx(cpu_map_index,
+								 rmnet_shs_cfg.map_mask);
 				if (new_cpu < 0) {
 					ret = 1;
 					break;
@@ -607,7 +781,7 @@
 
 				if (cur_cpu_qhead < node_qhead) {
 					rmnet_shs_switch_reason[RMNET_SHS_OOO_PACKET_SWITCH]++;
-					rmnet_shs_switch_reason[RMNET_SHS_OOO_PACKET_TOTAL]+=
+					rmnet_shs_switch_reason[RMNET_SHS_OOO_PACKET_TOTAL] +=
 							(node_qhead -
 							cur_cpu_qhead);
 				}
@@ -628,7 +802,8 @@
 				rmnet_shs_update_cpu_proc_q_all_cpus();
 				node->queue_head = cpun->qhead;
 				rmnet_shs_cpu_node_move(node,
-							&cpun->node_list_id);
+							&cpun->node_list_id,
+							cpu_num);
 				SHS_TRACE_HIGH(RMNET_SHS_FLUSH,
 					RMNET_SHS_FLUSH_NODE_CORE_SWITCH,
 					node->map_cpu, prev_cpu,
@@ -714,12 +889,13 @@
 /* Flushes all the packets parked in order for this flow */
 void rmnet_shs_flush_node(struct rmnet_shs_skbn_s *node, u8 ctext)
 {
-	struct sk_buff *skb;
+	struct sk_buff *skb = NULL;
 	struct sk_buff *nxt_skb = NULL;
 	u32 skbs_delivered = 0;
 	u32 skb_bytes_delivered = 0;
-	u32 hash2stamp;
-	u8 map, maplen;
+	u32 hash2stamp = 0; /* the default value of skb->hash*/
+	u8 map = 0, maplen = 0;
+	u8 segment_enable = 0;
 
 	if (!node->skb_list.head)
 		return;
@@ -741,6 +917,8 @@
 			     node->skb_list.num_parked_bytes,
 			     node, node->skb_list.head);
 
+	segment_enable = node->hstats->segment_enable;
+
 	for ((skb = node->skb_list.head); skb != NULL; skb = nxt_skb) {
 
 		nxt_skb = skb->next;
@@ -750,11 +928,15 @@
 		skb->next = NULL;
 		skbs_delivered += 1;
 		skb_bytes_delivered += skb->len;
-		if (ctext == RMNET_RX_CTXT)
-			rmnet_shs_deliver_skb(skb);
-		else
-			rmnet_shs_deliver_skb_wq(skb);
 
+		if (segment_enable) {
+			rmnet_shs_deliver_skb_segmented(skb, ctext);
+		} else {
+			if (ctext == RMNET_RX_CTXT)
+				rmnet_shs_deliver_skb(skb);
+			else
+				rmnet_shs_deliver_skb_wq(skb);
+		}
 	}
 
 	node->skb_list.num_parked_skbs = 0;
@@ -822,14 +1004,14 @@
 
 	SHS_TRACE_HIGH(RMNET_SHS_FLUSH,
 			     RMNET_SHS_FLUSH_CHK_AND_FLUSH_NODE_START,
-			     force_flush, 0xDEF, 0xDEF, 0xDEF,
+			     force_flush, ctxt, 0xDEF, 0xDEF,
 			     node, NULL);
 	/* Return saved cpu assignment if an entry found*/
 	if (rmnet_shs_cpu_from_idx(node->map_index, map) != node->map_cpu) {
 
 		/* Keep flow on the same core if possible
-		* or put Orphaned flow on the default 1st core
-		*/
+		 * or put Orphaned flow on the default 1st core
+		 */
 		map_idx = rmnet_shs_idx_from_cpu(node->map_cpu,
 							map);
 		if (map_idx >= 0) {
@@ -875,8 +1057,8 @@
 
 void rmnet_shs_flush_lock_table(u8 flsh, u8 ctxt)
 {
-	struct rmnet_shs_skbn_s *n;
-	struct list_head *ptr, *next;
+	struct rmnet_shs_skbn_s *n = NULL;
+	struct list_head *ptr = NULL, *next = NULL;
 	int cpu_num;
 	u32 cpu_tail;
 	u32 num_pkts_flush = 0;
@@ -923,8 +1105,8 @@
 					rmnet_shs_cpu_node_tbl[n->map_cpu].parkedlen -= num_pkts_flush;
 					n->skb_list.skb_load = 0;
 					if (n->map_cpu == cpu_num) {
-					       cpu_tail += num_pkts_flush;
-					       n->queue_head = cpu_tail;
+						cpu_tail += num_pkts_flush;
+						n->queue_head = cpu_tail;
 
 					}
 				}
@@ -945,6 +1127,7 @@
 			    !rmnet_shs_cpu_node_tbl[cpu_num].prio) {
 
 				rmnet_shs_cpu_node_tbl[cpu_num].prio = 1;
+				rmnet_shs_boost_cpus();
 				if (hrtimer_active(&GET_CTIMER(cpu_num)))
 					hrtimer_cancel(&GET_CTIMER(cpu_num));
 
@@ -980,9 +1163,8 @@
 		rmnet_shs_cfg.is_pkt_parked = 0;
 		rmnet_shs_cfg.force_flush_state = RMNET_SHS_FLUSH_DONE;
 		if (rmnet_shs_fall_back_timer) {
-			if (hrtimer_active(&rmnet_shs_cfg.hrtimer_shs)) {
+			if (hrtimer_active(&rmnet_shs_cfg.hrtimer_shs))
 				hrtimer_cancel(&rmnet_shs_cfg.hrtimer_shs);
-			}
 		}
 
 	}
@@ -1009,59 +1191,30 @@
 {
 	u8 pushflush = 0;
 	struct napi_struct *napi = get_current_napi_context();
-	/* UDP GRO should tell us how many packets make up a
-	 * coalesced packet. Use that instead for stats for wq
-	 * Node stats only used by WQ
-	 * Parkedlen useful for cpu stats used by old IB
-	 * skb_load used by IB + UDP coals
+
+	/* Early flush for TCP if PSH packet.
+	 * Flush before parking PSH packet.
 	 */
+	if (skb->cb[SKB_FLUSH]) {
+		rmnet_shs_flush_lock_table(0, RMNET_RX_CTXT);
+		rmnet_shs_flush_reason[RMNET_SHS_FLUSH_PSH_PKT_FLUSH]++;
+		napi_gro_flush(napi, false);
+		pushflush = 1;
+	}
 
-	if ((skb->protocol == htons(ETH_P_IP) &&
-	     ip_hdr(skb)->protocol == IPPROTO_UDP) ||
-	    (skb->protocol == htons(ETH_P_IPV6) &&
-	     ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)) {
-
-		if (skb_shinfo(skb)->gso_segs) {
-			node->num_skb += skb_shinfo(skb)->gso_segs;
-			rmnet_shs_cpu_node_tbl[node->map_cpu].parkedlen++;
-			node->skb_list.skb_load += skb_shinfo(skb)->gso_segs;
-		} else {
-			node->num_skb += 1;
-			rmnet_shs_cpu_node_tbl[node->map_cpu].parkedlen++;
-			node->skb_list.skb_load++;
-
-		}
+	/* Support for gso marked packets */
+	if (skb_shinfo(skb)->gso_segs) {
+		node->num_skb += skb_shinfo(skb)->gso_segs;
+		rmnet_shs_cpu_node_tbl[node->map_cpu].parkedlen++;
+		node->skb_list.skb_load += skb_shinfo(skb)->gso_segs;
 	} else {
-		/* This should only have TCP based on current
-		 * rmnet_shs_is_skb_stamping_reqd logic. Unoptimal
-		 * if non UDP/TCP protos are supported
-		 */
-
-		/* Early flush for TCP if PSH packet.
-		 * Flush before parking PSH packet.
-		 */
-		if (skb->cb[SKB_FLUSH]){
-			rmnet_shs_flush_lock_table(0, RMNET_RX_CTXT);
-			rmnet_shs_flush_reason[RMNET_SHS_FLUSH_PSH_PKT_FLUSH]++;
-			napi_gro_flush(napi, false);
-			pushflush = 1;
-		}
-
-		/* TCP support for gso marked packets */
-		if (skb_shinfo(skb)->gso_segs) {
-			node->num_skb += skb_shinfo(skb)->gso_segs;
-			rmnet_shs_cpu_node_tbl[node->map_cpu].parkedlen++;
-			node->skb_list.skb_load += skb_shinfo(skb)->gso_segs;
-		} else {
-			node->num_skb += 1;
-			rmnet_shs_cpu_node_tbl[node->map_cpu].parkedlen++;
-			node->skb_list.skb_load++;
-
-		}
+		node->num_skb += 1;
+		rmnet_shs_cpu_node_tbl[node->map_cpu].parkedlen++;
+		node->skb_list.skb_load++;
 
 	}
-	node->num_skb_bytes += skb->len;
 
+	node->num_skb_bytes += skb->len;
 	node->skb_list.num_parked_bytes += skb->len;
 	rmnet_shs_cfg.num_bytes_parked  += skb->len;
 
@@ -1116,9 +1269,8 @@
 		if (rmnet_shs_fall_back_timer &&
 		    rmnet_shs_cfg.num_bytes_parked &&
 		    rmnet_shs_cfg.num_pkts_parked){
-			if(hrtimer_active(&rmnet_shs_cfg.hrtimer_shs)) {
+			if (hrtimer_active(&rmnet_shs_cfg.hrtimer_shs))
 				hrtimer_cancel(&rmnet_shs_cfg.hrtimer_shs);
-			}
 
 			hrtimer_start(&rmnet_shs_cfg.hrtimer_shs,
 				      ns_to_ktime(rmnet_shs_timeout * NS_IN_MS),
@@ -1179,8 +1331,9 @@
 	struct core_flush_s *core_work = container_of(t,
 				 struct core_flush_s, core_timer);
 
-	schedule_work(&core_work->work);
+	rmnet_shs_reset_cpus();
 
+	schedule_work(&core_work->work);
 	return ret;
 }
 
@@ -1226,6 +1379,12 @@
 	rmnet_shs_wq_restart();
 }
 
+void rmnet_shs_dl_hdr_handler_v2(struct rmnet_map_dl_ind_hdr *dlhdr,
+			      struct rmnet_map_control_command_header *qcmd)
+{
+	rmnet_shs_dl_hdr_handler(dlhdr);
+}
+
 void rmnet_shs_dl_hdr_handler(struct rmnet_map_dl_ind_hdr *dlhdr)
 {
 
@@ -1244,6 +1403,12 @@
 /* Triggers flushing of all packets upon DL trailer
  * receiving a DL trailer marker
  */
+void rmnet_shs_dl_trl_handler_v2(struct rmnet_map_dl_ind_trl *dltrl,
+			      struct rmnet_map_control_command_header *qcmd)
+{
+	rmnet_shs_dl_trl_handler(dltrl);
+}
+
 void rmnet_shs_dl_trl_handler(struct rmnet_map_dl_ind_trl *dltrl)
 {
 
@@ -1263,20 +1428,29 @@
 {
 	struct rps_map *map;
 	u8 num_cpu;
+	u8 map_mask;
+	u8 map_len;
 
 	if (rmnet_shs_cfg.rmnet_shs_init_complete)
 		return;
 	map = rcu_dereference(vnd->_rx->rps_map);
 
-	if (!map)
-		return;
+	if (!map) {
+		map_mask = 0;
+		map_len = 0;
+	} else {
+		map_mask = rmnet_shs_mask_from_map(map);
+		map_len = rmnet_shs_get_mask_len(map_mask);
+	}
 
 	rmnet_shs_cfg.port = rmnet_get_port(dev);
-	rmnet_shs_cfg.map_mask = rmnet_shs_mask_from_map(map);
-	rmnet_shs_cfg.map_len = rmnet_shs_get_mask_len(rmnet_shs_cfg.map_mask);
+	rmnet_shs_cfg.map_mask = map_mask;
+	rmnet_shs_cfg.map_len = map_len;
 	for (num_cpu = 0; num_cpu < MAX_CPUS; num_cpu++)
 		INIT_LIST_HEAD(&rmnet_shs_cpu_node_tbl[num_cpu].node_list_id);
 
+	rmnet_shs_freq_init();
+
 	rmnet_shs_cfg.rmnet_shs_init_complete = 1;
 }
 
@@ -1318,16 +1492,33 @@
 void rmnet_shs_get_update_skb_proto(struct sk_buff *skb,
 				    struct rmnet_shs_skbn_s *node_p)
 {
-	switch (skb->protocol) {
-	case htons(ETH_P_IP):
-		node_p->skb_tport_proto = ip_hdr(skb)->protocol;
-		break;
-	case htons(ETH_P_IPV6):
-		node_p->skb_tport_proto = ipv6_hdr(skb)->nexthdr;
-		break;
-	default:
-		node_p->skb_tport_proto = IPPROTO_RAW;
-		break;
+	struct iphdr *ip4h;
+	struct ipv6hdr *ip6h;
+
+	if (!skb_is_nonlinear(skb)) {
+		switch (skb->protocol) {
+		case htons(ETH_P_IP):
+			node_p->skb_tport_proto = ip_hdr(skb)->protocol;
+			break;
+		case htons(ETH_P_IPV6):
+			node_p->skb_tport_proto = ipv6_hdr(skb)->nexthdr;
+			break;
+		default:
+			break;
+		}
+	} else {
+		switch (skb->protocol) {
+		case htons(ETH_P_IP):
+			ip4h = (struct iphdr *)rmnet_map_data_ptr(skb);
+			node_p->skb_tport_proto = ip4h->protocol;
+			break;
+		case htons(ETH_P_IPV6):
+			ip6h = (struct ipv6hdr *)rmnet_map_data_ptr(skb);
+			node_p->skb_tport_proto = ip6h->nexthdr;
+			break;
+		default:
+			break;
+		}
 	}
 }
 
@@ -1359,7 +1550,7 @@
 		return;
 	}
 
-	if ((unlikely(!map))|| !rmnet_shs_cfg.rmnet_shs_init_complete) {
+	if ((unlikely(!map)) || !rmnet_shs_cfg.rmnet_shs_init_complete) {
 		rmnet_shs_deliver_skb(skb);
 		SHS_TRACE_ERR(RMNET_SHS_ASSIGN,
 				    RMNET_SHS_ASSIGN_CRIT_ERROR_NO_SHS_REQD,
@@ -1377,8 +1568,8 @@
 	spin_lock_irqsave(&rmnet_shs_ht_splock, ht_flags);
 	do {
 		hash_for_each_possible_safe(RMNET_SHS_HT, node_p, tmp, list,
-					    skb->hash) {
-			if (skb->hash != node_p->hash)
+					    hash) {
+			if (hash != node_p->hash)
 				continue;
 
 
@@ -1391,6 +1582,7 @@
 			rmnet_shs_chain_to_skb_list(skb, node_p);
 			is_match_found = 1;
 			is_shs_reqd = 1;
+			break;
 
 		}
 		if (is_match_found)
@@ -1457,15 +1649,6 @@
 		return;
 	}
 
-	if (!rmnet_shs_cfg.is_reg_dl_mrk_ind) {
-		rmnet_map_dl_ind_register(port, &rmnet_shs_cfg.dl_mrk_ind_cb);
-		qmi_rmnet_ps_ind_register(port,
-					  &rmnet_shs_cfg.rmnet_idl_ind_cb);
-
-		rmnet_shs_cfg.is_reg_dl_mrk_ind = 1;
-		shs_rx_work.port = port;
-
-	}
 	/* We got the first packet after a previous successdul flush. Arm the
 	 * flushing timer.
 	 */
@@ -1543,9 +1726,7 @@
  */
 void rmnet_shs_exit(void)
 {
-	qmi_rmnet_ps_ind_deregister(rmnet_shs_cfg.port,
-				    &rmnet_shs_cfg.rmnet_idl_ind_cb);
-
+	rmnet_shs_freq_exit();
 	rmnet_shs_cfg.dl_mrk_ind_cb.dl_hdr_handler = NULL;
 	rmnet_shs_cfg.dl_mrk_ind_cb.dl_trl_handler = NULL;
 	rmnet_map_dl_ind_deregister(rmnet_shs_cfg.port,
@@ -1555,6 +1736,7 @@
 		hrtimer_cancel(&rmnet_shs_cfg.hrtimer_shs);
 
 	memset(&rmnet_shs_cfg, 0, sizeof(rmnet_shs_cfg));
+	rmnet_shs_cfg.port = NULL;
 	rmnet_shs_cfg.rmnet_shs_init_complete = 0;
 
 }
diff --git a/drivers/rmnet/shs/rmnet_shs_wq.c b/drivers/rmnet/shs/rmnet_shs_wq.c
index 9265289..298058c 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq.c
+++ b/drivers/rmnet/shs/rmnet_shs_wq.c
@@ -14,8 +14,12 @@
  */
 
 #include "rmnet_shs.h"
-#include <linux/module.h>
+#include "rmnet_shs_wq_genl.h"
+#include "rmnet_shs_wq_mem.h"
 #include <linux/workqueue.h>
+#include <linux/list_sort.h>
+#include <net/sock.h>
+#include <linux/skbuff.h>
 
 MODULE_LICENSE("GPL v2");
 /* Local Macros */
@@ -149,8 +153,21 @@
 module_param_array(rmnet_shs_flow_rx_pps, ullong, 0, 0444);
 MODULE_PARM_DESC(rmnet_shs_flow_rx_pps, "SHS stamp pkt enq rate per flow");
 
-static spinlock_t rmnet_shs_wq_splock;
+/* Counters for suggestions made by wq */
+unsigned long long rmnet_shs_flow_silver_to_gold[MAX_SUPPORTED_FLOWS_DEBUG];
+module_param_array(rmnet_shs_flow_silver_to_gold, ullong, 0, 0444);
+MODULE_PARM_DESC(rmnet_shs_flow_silver_to_gold, "SHS Suggest Silver to Gold");
+
+unsigned long long rmnet_shs_flow_gold_to_silver[MAX_SUPPORTED_FLOWS_DEBUG];
+module_param_array(rmnet_shs_flow_gold_to_silver, ullong, 0, 0444);
+MODULE_PARM_DESC(rmnet_shs_flow_gold_to_silver, "SHS Suggest Gold to Silver");
+
+unsigned long long rmnet_shs_flow_gold_balance[MAX_SUPPORTED_FLOWS_DEBUG];
+module_param_array(rmnet_shs_flow_gold_balance, ullong, 0, 0444);
+MODULE_PARM_DESC(rmnet_shs_flow_gold_balance, "SHS Suggest Gold Balance");
+
 static DEFINE_SPINLOCK(rmnet_shs_hstat_tbl_lock);
+static DEFINE_SPINLOCK(rmnet_shs_ep_lock);
 
 static time_t rmnet_shs_wq_tnsec;
 static struct workqueue_struct *rmnet_shs_wq;
@@ -168,24 +185,16 @@
  */
 void rmnet_shs_wq_ep_tbl_add(struct rmnet_shs_wq_ep_s *ep)
 {
-	unsigned long flags;
 	trace_rmnet_shs_wq_low(RMNET_SHS_WQ_EP_TBL, RMNET_SHS_WQ_EP_TBL_ADD,
 				0xDEF, 0xDEF, 0xDEF, 0xDEF, ep, NULL);
-	spin_lock_irqsave(&rmnet_shs_hstat_tbl_lock, flags);
 	list_add(&ep->ep_list_id, &rmnet_shs_wq_ep_tbl);
-	spin_unlock_irqrestore(&rmnet_shs_hstat_tbl_lock, flags);
 }
 
 void rmnet_shs_wq_ep_tbl_remove(struct rmnet_shs_wq_ep_s *ep)
 {
-	unsigned long flags;
 	trace_rmnet_shs_wq_low(RMNET_SHS_WQ_EP_TBL, RMNET_SHS_WQ_EP_TBL_DEL,
 				0xDEF, 0xDEF, 0xDEF, 0xDEF, ep, NULL);
-
-	spin_lock_irqsave(&rmnet_shs_hstat_tbl_lock, flags);
 	list_del_init(&ep->ep_list_id);
-	spin_unlock_irqrestore(&rmnet_shs_hstat_tbl_lock, flags);
-
 }
 
 /* Helper functions to add and remove entries to the table
@@ -325,7 +334,7 @@
  */
 struct rmnet_shs_wq_hstat_s *rmnet_shs_wq_get_new_hstat_node(void)
 {
-	struct rmnet_shs_wq_hstat_s *hnode;
+	struct rmnet_shs_wq_hstat_s *hnode = NULL;
 	struct rmnet_shs_wq_hstat_s *ret_node = NULL;
 	unsigned long flags;
 
@@ -379,10 +388,16 @@
 
 	return ret_node;
 }
+
 void rmnet_shs_wq_create_new_flow(struct rmnet_shs_skbn_s *node_p)
 {
 	struct timespec time;
 
+	if (!node_p) {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+		return;
+	}
+
 	node_p->hstats = rmnet_shs_wq_get_new_hstat_node();
 	if (node_p->hstats != NULL) {
 		(void)getnstimeofday(&time);
@@ -391,6 +406,12 @@
 		node_p->hstats->skb_tport_proto = node_p->skb_tport_proto;
 		node_p->hstats->current_cpu = node_p->map_cpu;
 		node_p->hstats->suggested_cpu = node_p->map_cpu;
+
+		/* Start TCP flows with segmentation if userspace connected */
+		if (rmnet_shs_userspace_connected &&
+		    node_p->hstats->skb_tport_proto == IPPROTO_TCP)
+			node_p->hstats->segment_enable = 1;
+
 		node_p->hstats->node = node_p;
 		node_p->hstats->c_epoch = RMNET_SHS_SEC_TO_NSEC(time.tv_sec) +
 		   time.tv_nsec;
@@ -404,18 +425,113 @@
 				node_p, node_p->hstats);
 }
 
+
+/* Compute the average pps for a flow based on tuning param
+ * Often when we decide to switch from a small cluster core,
+ * it is because of the heavy traffic on that core. In such
+ * circumstances, we want to switch to a big cluster
+ * core as soon as possible. Therefore, we will provide a
+ * greater weightage to the most recent sample compared to
+ * the previous samples.
+ *
+ * On the other hand, when a flow which is on a big cluster
+ * cpu suddenly starts to receive low traffic we move to a
+ * small cluster core after observing low traffic for some
+ * more samples. This approach avoids switching back and forth
+ * to small cluster cpus due to momentary decrease in data
+ * traffic.
+ */
+static u64 rmnet_shs_wq_get_flow_avg_pps(struct rmnet_shs_wq_hstat_s *hnode)
+{
+	u64 avg_pps, mov_avg_pps;
+	u16 new_weight, old_weight;
+
+	if (!hnode) {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+		return 0;
+	}
+
+	if (rmnet_shs_is_lpwr_cpu(hnode->current_cpu)) {
+		/* More weight to current value */
+		new_weight = rmnet_shs_wq_tuning;
+		old_weight = 100 - rmnet_shs_wq_tuning;
+	}
+
+	/* computing weighted average per flow, if the flow has just started,
+	 * there is no past values, so we use the current pps as the avg
+	 */
+	if (hnode->last_pps == 0) {
+		avg_pps = hnode->rx_pps;
+	} else {
+		mov_avg_pps = (hnode->last_pps + hnode->avg_pps) / 2;
+		avg_pps = (((new_weight * hnode->rx_pps) +
+			    (old_weight * mov_avg_pps)) /
+			    (new_weight + old_weight));
+	}
+
+	return avg_pps;
+}
+
+static u64 rmnet_shs_wq_get_cpu_avg_pps(u16 cpu_num)
+{
+	u64 avg_pps, mov_avg_pps;
+	u16 new_weight, old_weight;
+	struct rmnet_shs_wq_cpu_rx_pkt_q_s *cpu_node;
+	struct rmnet_shs_wq_rx_flow_s *rx_flow_tbl_p = &rmnet_shs_rx_flow_tbl;
+
+	if (cpu_num >= MAX_CPUS) {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_CPU_ERR]++;
+		return 0;
+	}
+
+	cpu_node = &rx_flow_tbl_p->cpu_list[cpu_num];
+
+	if (rmnet_shs_is_lpwr_cpu(cpu_num)) {
+		/* More weight to current value */
+		new_weight = rmnet_shs_wq_tuning;
+		old_weight = 100 - rmnet_shs_wq_tuning;
+	} else {
+		old_weight = rmnet_shs_wq_tuning;
+		new_weight = 100 - rmnet_shs_wq_tuning;
+	}
+
+	/* computing weighted average per flow, if the cpu has not past values
+	 * for pps, we use the current value as the average
+	 */
+	if (cpu_node->last_rx_pps == 0) {
+		avg_pps = cpu_node->avg_pps;
+	} else {
+		mov_avg_pps = (cpu_node->last_rx_pps + cpu_node->avg_pps) / 2;
+		avg_pps = (((new_weight * cpu_node->rx_pps) +
+			    (old_weight * mov_avg_pps)) /
+			    (new_weight + old_weight));
+	}
+
+	trace_rmnet_shs_wq_high(RMNET_SHS_WQ_CPU_STATS,
+			   RMNET_SHS_WQ_CPU_STATS_CORE2SWITCH_EVAL_CPU,
+			   cpu_num, cpu_node->rx_pps, cpu_node->last_rx_pps,
+			   avg_pps, NULL, NULL);
+
+	return avg_pps;
+}
+
 /* Refresh the RPS mask associated with this flow */
 void rmnet_shs_wq_update_hstat_rps_msk(struct rmnet_shs_wq_hstat_s *hstat_p)
 {
-	struct rmnet_shs_skbn_s *node_p;
-	struct rmnet_shs_wq_ep_s *ep;
+	struct rmnet_shs_skbn_s *node_p = NULL;
+	struct rmnet_shs_wq_ep_s *ep = NULL;
+
+	if (!hstat_p) {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+		return;
+	}
 
 	node_p = hstat_p->node;
 
 	/*Map RPS mask from the endpoint associated with this flow*/
 	list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
 
-		if (ep && (node_p->dev == ep->ep->egress_dev)) {
+		if (ep && (node_p->dev == ep->ep)) {
 			hstat_p->rps_config_msk = ep->rps_config_msk;
 			hstat_p->def_core_msk = ep->default_core_msk;
 			hstat_p->pri_core_msk = ep->pri_core_msk;
@@ -438,6 +554,11 @@
 	if (!rmnet_shs_stats_enabled)
 		return;
 
+	if (!hstats_p || !node_p) {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+		return;
+	}
+
 	if (hstats_p->stat_idx < 0) {
 		idx = idx % MAX_SUPPORTED_FLOWS_DEBUG;
 		hstats_p->stat_idx = idx;
@@ -455,6 +576,12 @@
 	rmnet_shs_flow_cpu[hstats_p->stat_idx] = hstats_p->current_cpu;
 	rmnet_shs_flow_cpu_recommended[hstats_p->stat_idx] =
 						hstats_p->suggested_cpu;
+	rmnet_shs_flow_silver_to_gold[hstats_p->stat_idx] =
+		hstats_p->rmnet_shs_wq_suggs[RMNET_SHS_WQ_SUGG_SILVER_TO_GOLD];
+	rmnet_shs_flow_gold_to_silver[hstats_p->stat_idx] =
+		hstats_p->rmnet_shs_wq_suggs[RMNET_SHS_WQ_SUGG_GOLD_TO_SILVER];
+	rmnet_shs_flow_gold_balance[hstats_p->stat_idx] =
+		hstats_p->rmnet_shs_wq_suggs[RMNET_SHS_WQ_SUGG_GOLD_BALANCE];
 
 }
 
@@ -464,6 +591,11 @@
 u8 rmnet_shs_wq_is_hash_rx_new_pkt(struct rmnet_shs_wq_hstat_s *hstats_p,
 				   struct rmnet_shs_skbn_s *node_p)
 {
+	if (!hstats_p || !node_p) {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+		return 0;
+	}
+
 	if (node_p->num_skb == hstats_p->rx_skb)
 		return 0;
 
@@ -475,6 +607,11 @@
 {
 	time_t tdiff;
 
+	if (!hstats_p || !node_p) {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+		return;
+	}
+
 	tdiff = rmnet_shs_wq_tnsec - hstats_p->c_epoch;
 	hstats_p->inactive_duration = tdiff;
 
@@ -490,10 +627,16 @@
 	u64 skb_diff, bytes_diff;
 	struct rmnet_shs_skbn_s *node_p;
 
+	if (!hstats_p) {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+		return;
+	}
+
 	node_p = hstats_p->node;
 
 	if (!rmnet_shs_wq_is_hash_rx_new_pkt(hstats_p, node_p)) {
 		hstats_p->rx_pps = 0;
+		hstats_p->avg_pps = 0;
 		hstats_p->rx_bps = 0;
 		rmnet_shs_wq_update_hash_tinactive(hstats_p, node_p);
 		rmnet_shs_wq_update_hash_stats_debug(hstats_p, node_p);
@@ -522,6 +665,8 @@
 	hstats_p->rx_pps = RMNET_SHS_RX_BPNSEC_TO_BPSEC(skb_diff)/(tdiff);
 	hstats_p->rx_bps = RMNET_SHS_RX_BPNSEC_TO_BPSEC(bytes_diff)/(tdiff);
 	hstats_p->rx_bps = RMNET_SHS_BYTE_TO_BIT(hstats_p->rx_bps);
+	hstats_p->avg_pps = rmnet_shs_wq_get_flow_avg_pps(hstats_p);
+	hstats_p->last_pps = hstats_p->rx_pps;
 	rmnet_shs_wq_update_hash_stats_debug(hstats_p, node_p);
 
 	trace_rmnet_shs_wq_high(RMNET_SHS_WQ_FLOW_STATS,
@@ -537,6 +682,16 @@
 	if (!rmnet_shs_stats_enabled)
 		return;
 
+	if (cpu >= MAX_CPUS) {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_CPU_ERR]++;
+		return;
+	}
+
+	if (!cpu_p) {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+		return;
+	}
+
 	rmnet_shs_cpu_rx_bps[cpu] = cpu_p->rx_bps;
 	rmnet_shs_cpu_rx_pps[cpu] = cpu_p->rx_pps;
 	rmnet_shs_cpu_rx_flows[cpu] = cpu_p->flows;
@@ -556,7 +711,7 @@
 	tbl_p->dl_mrk_last_rx_bytes = tbl_p->dl_mrk_rx_bytes;
 	tbl_p->dl_mrk_last_rx_pkts = tbl_p->dl_mrk_rx_pkts;
 
-	port = rmnet_get_port(rmnet_shs_delayed_wq->netdev);
+	port = rmnet_shs_cfg.port;
 	if (!port) {
 		rmnet_shs_crit_err[RMNET_SHS_WQ_GET_RMNET_PORT_ERR]++;
 		return;
@@ -605,15 +760,20 @@
 	struct rmnet_shs_wq_cpu_rx_pkt_q_s *cpu_p;
 	time_t tdiff;
 	u64 new_skbs, new_bytes;
+	u64 last_rx_bps, last_rx_pps;
 	u32 new_qhead;
 
+	if (cpu >= MAX_CPUS) {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_CPU_ERR]++;
+		return;
+	}
+
 	cpu_p = &rmnet_shs_rx_flow_tbl.cpu_list[cpu];
 	new_skbs = cpu_p->rx_skbs - cpu_p->last_rx_skbs;
 
 	new_qhead = rmnet_shs_get_cpu_qhead(cpu);
-	if (cpu_p->qhead_start == 0) {
+	if (cpu_p->qhead_start == 0)
 		cpu_p->qhead_start = new_qhead;
-	}
 
 	cpu_p->last_qhead = cpu_p->qhead;
 	cpu_p->qhead = new_qhead;
@@ -627,23 +787,37 @@
 		cpu_p->l_epoch =  rmnet_shs_wq_tnsec;
 		cpu_p->rx_bps = 0;
 		cpu_p->rx_pps = 0;
+		cpu_p->avg_pps = 0;
+		if (rmnet_shs_userspace_connected) {
+			rmnet_shs_wq_cpu_caps_list_add(&rmnet_shs_rx_flow_tbl,
+						       cpu_p, &cpu_caps);
+		}
 		rmnet_shs_wq_refresh_cpu_rates_debug(cpu, cpu_p);
 		return;
 	}
 
 	tdiff = rmnet_shs_wq_tnsec - cpu_p->l_epoch;
 	new_bytes = cpu_p->rx_bytes - cpu_p->last_rx_bytes;
-	cpu_p->last_rx_bps = cpu_p->rx_bps;
-	cpu_p->last_rx_pps = cpu_p->rx_pps;
+
+	last_rx_bps = cpu_p->rx_bps;
+	last_rx_pps = cpu_p->rx_pps;
 	cpu_p->rx_pps = RMNET_SHS_RX_BPNSEC_TO_BPSEC(new_skbs)/tdiff;
 	cpu_p->rx_bps = RMNET_SHS_RX_BPNSEC_TO_BPSEC(new_bytes)/tdiff;
 	cpu_p->rx_bps = RMNET_SHS_BYTE_TO_BIT(cpu_p->rx_bps);
+	cpu_p->avg_pps = rmnet_shs_wq_get_cpu_avg_pps(cpu);
+	cpu_p->last_rx_bps = last_rx_bps;
+	cpu_p->last_rx_pps = last_rx_pps;
 
 	cpu_p->l_epoch =  rmnet_shs_wq_tnsec;
 	cpu_p->last_rx_skbs = cpu_p->rx_skbs;
 	cpu_p->last_rx_bytes = cpu_p->rx_bytes;
 	cpu_p->rx_bps_est = cpu_p->rx_bps;
 
+	if (rmnet_shs_userspace_connected) {
+		rmnet_shs_wq_cpu_caps_list_add(&rmnet_shs_rx_flow_tbl,
+					       cpu_p, &cpu_caps);
+	}
+
 	trace_rmnet_shs_wq_high(RMNET_SHS_WQ_CPU_STATS,
 				RMNET_SHS_WQ_CPU_STATS_UPDATE, cpu,
 				cpu_p->flows, cpu_p->rx_pps,
@@ -651,6 +825,7 @@
 	rmnet_shs_wq_refresh_cpu_rates_debug(cpu, cpu_p);
 
 }
+
 static void rmnet_shs_wq_refresh_all_cpu_stats(void)
 {
 	u16 cpu;
@@ -669,40 +844,45 @@
 
 void rmnet_shs_wq_update_cpu_rx_tbl(struct rmnet_shs_wq_hstat_s *hstat_p)
 {
-	struct rps_map *map;
-	struct rmnet_shs_skbn_s *node_p;
-	int cpu_num;
-	u16 map_idx;
-	u64 skb_diff, byte_diff;
 	struct rmnet_shs_wq_rx_flow_s *tbl_p = &rmnet_shs_rx_flow_tbl;
+	struct rmnet_shs_skbn_s *node_p;
+	u64 skb_diff, byte_diff;
+	u16 cpu_num;
+
+	if (!hstat_p) {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+		return;
+	}
 
 	node_p = hstat_p->node;
 
 	if (hstat_p->inactive_duration > 0)
 		return;
 
-	rcu_read_lock();
-	map = rcu_dereference(node_p->dev->_rx->rps_map);
+	cpu_num = node_p->map_cpu;
 
-	if (!map || node_p->map_index > map->len || !map->len) {
-		rcu_read_unlock();
+	if (cpu_num >= MAX_CPUS) {
+		rmnet_shs_crit_err[RMNET_SHS_INVALID_CPU_ERR]++;
 		return;
 	}
-
-	map_idx = node_p->map_index;
-	cpu_num = map->cpus[map_idx];
-
 	skb_diff = hstat_p->rx_skb - hstat_p->last_rx_skb;
 	byte_diff = hstat_p->rx_bytes - hstat_p->last_rx_bytes;
-	rcu_read_unlock();
 
 	if (hstat_p->is_new_flow) {
 		rmnet_shs_wq_cpu_list_add(hstat_p,
 				       &tbl_p->cpu_list[cpu_num].hstat_id);
+		rm_err("SHS_FLOW: adding flow 0x%x on cpu[%d] "
+		       "pps: %llu | avg_pps %llu",
+		       hstat_p->hash, hstat_p->current_cpu,
+		       hstat_p->rx_pps, hstat_p->avg_pps);
 		hstat_p->is_new_flow = 0;
 	}
 	/* check if the flow has switched to another CPU*/
 	if (cpu_num != hstat_p->current_cpu) {
+		rm_err("SHS_FLOW: moving flow 0x%x on cpu[%d] to cpu[%d] "
+		       "pps: %llu | avg_pps %llu",
+		       hstat_p->hash, hstat_p->current_cpu, cpu_num,
+		       hstat_p->rx_pps, hstat_p->avg_pps);
 		trace_rmnet_shs_wq_high(RMNET_SHS_WQ_FLOW_STATS,
 					RMNET_SHS_WQ_FLOW_STATS_UPDATE_NEW_CPU,
 					hstat_p->hash, hstat_p->current_cpu,
@@ -726,7 +906,7 @@
 
 }
 
-static void rmnet_shs_wq_chng_suggested_cpu(u16 old_cpu, u16 new_cpu,
+void rmnet_shs_wq_chng_suggested_cpu(u16 old_cpu, u16 new_cpu,
 					      struct rmnet_shs_wq_ep_s *ep)
 {
 	struct rmnet_shs_skbn_s *node_p;
@@ -743,7 +923,7 @@
 		hstat_p = node_p->hstats;
 
 		if ((hstat_p->suggested_cpu == old_cpu) &&
-		    (node_p->dev == ep->ep->egress_dev)) {
+		    (node_p->dev == ep->ep)) {
 
 			trace_rmnet_shs_wq_high(RMNET_SHS_WQ_FLOW_STATS,
 				RMNET_SHS_WQ_FLOW_STATS_SUGGEST_NEW_CPU,
@@ -755,6 +935,85 @@
 	}
 }
 
+/* Increment the per-flow counter for suggestion type */
+static void rmnet_shs_wq_inc_sugg_type(u32 sugg_type,
+				       struct rmnet_shs_wq_hstat_s *hstat_p)
+{
+	if (sugg_type >= RMNET_SHS_WQ_SUGG_MAX || hstat_p == NULL)
+		return;
+
+	hstat_p->rmnet_shs_wq_suggs[sugg_type] += 1;
+}
+
+/* Change suggested cpu, return 1 if suggestion was made, 0 otherwise */
+static int rmnet_shs_wq_chng_flow_cpu(u16 old_cpu, u16 new_cpu,
+				      struct rmnet_shs_wq_ep_s *ep,
+				      u32 hash_to_move, u32 sugg_type)
+{
+	struct rmnet_shs_skbn_s *node_p;
+	struct rmnet_shs_wq_hstat_s *hstat_p;
+	int rc = 0;
+	u16 bkt;
+
+	if (!ep) {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_EP_ACCESS_ERR]++;
+		return 0;
+	}
+
+	if (old_cpu >= MAX_CPUS || new_cpu >= MAX_CPUS) {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_CPU_ERR]++;
+		return 0;
+	}
+
+	hash_for_each(RMNET_SHS_HT, bkt, node_p, list) {
+		if (!node_p)
+			continue;
+
+		if (!node_p->hstats)
+			continue;
+
+		hstat_p = node_p->hstats;
+
+		if (hash_to_move != 0) {
+			/* If hash_to_move is given, only move that flow,
+			 * otherwise move all the flows on that cpu
+			 */
+			if (hstat_p->hash != hash_to_move)
+				continue;
+		}
+
+		rm_err("SHS_HT: >>  sugg cpu %d | old cpu %d | new_cpu %d | "
+		       "map_cpu = %d | flow 0x%x",
+		       hstat_p->suggested_cpu, old_cpu, new_cpu,
+		       node_p->map_cpu, hash_to_move);
+
+		if ((hstat_p->suggested_cpu == old_cpu) &&
+		    (node_p->dev == ep->ep)) {
+
+			trace_rmnet_shs_wq_high(RMNET_SHS_WQ_FLOW_STATS,
+				RMNET_SHS_WQ_FLOW_STATS_SUGGEST_NEW_CPU,
+				hstat_p->hash, hstat_p->suggested_cpu,
+				new_cpu, 0xDEF, hstat_p, NULL);
+
+			node_p->hstats->suggested_cpu = new_cpu;
+			rmnet_shs_wq_inc_sugg_type(sugg_type, hstat_p);
+			if (hash_to_move) { /* Stop after moving one flow */
+				rm_err("SHS_CHNG: moving single flow: flow 0x%x "
+				       "sugg_cpu changed from %d to %d",
+				       hstat_p->hash, old_cpu,
+				       node_p->hstats->suggested_cpu);
+				return 1;
+			}
+			rm_err("SHS_CHNG: moving all flows: flow 0x%x "
+			       "sugg_cpu changed from %d to %d",
+			       hstat_p->hash, old_cpu,
+			       node_p->hstats->suggested_cpu);
+			rc |= 1;
+		}
+	}
+	return rc;
+}
+
 u64 rmnet_shs_wq_get_max_pps_among_cores(u32 core_msk)
 {
 	int cpu_num;
@@ -770,37 +1029,23 @@
 	return max_pps;
 }
 
-u32 rmnet_shs_wq_get_dev_rps_msk(struct net_device *dev)
-{
-	u32 dev_rps_msk = 0;
-	struct rmnet_shs_wq_ep_s *ep;
-
-	list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
-		if (!ep)
-			continue;
-
-		if (!ep->is_ep_active)
-			continue;
-
-		if (ep->ep->egress_dev == dev)
-			dev_rps_msk = ep->rps_config_msk;
-	}
-
-	return dev_rps_msk;
-}
-
-/* Return the least utilized core from the list of cores available
- * If all the cores are fully utilized return no specific core
+/* Returns the least utilized core from a core mask
+ * In order of priority
+ *    1) Returns leftmost core with no flows (Fully Idle)
+ *    2) Returns the core with least flows with no pps (Semi Idle)
+ *    3) Returns the core with the least pps (Non-Idle)
  */
 int rmnet_shs_wq_get_least_utilized_core(u16 core_msk)
 {
-	int cpu_num;
 	struct rmnet_shs_wq_rx_flow_s *rx_flow_tbl_p = &rmnet_shs_rx_flow_tbl;
 	struct rmnet_shs_wq_cpu_rx_pkt_q_s *list_p;
-	u64 min_pps = rmnet_shs_wq_get_max_pps_among_cores(core_msk);
-	u64 max_pps = 0;
+	u64 min_pps = U64_MAX;
+	u32 min_flows = U32_MAX;
 	int ret_val = -1;
-	u8 is_cpu_in_msk;
+	int semi_idle_ret = -1;
+	int full_idle_ret = -1;
+	int cpu_num = 0;
+	u16 is_cpu_in_msk;
 
 	for (cpu_num = 0; cpu_num < MAX_CPUS; cpu_num++) {
 
@@ -809,33 +1054,38 @@
 			continue;
 
 		list_p = &rx_flow_tbl_p->cpu_list[cpu_num];
-		max_pps = rmnet_shs_wq_get_max_allowed_pps(cpu_num);
-
 		trace_rmnet_shs_wq_low(RMNET_SHS_WQ_CPU_STATS,
 				       RMNET_SHS_WQ_CPU_STATS_CURRENT_UTIL,
 				       cpu_num, list_p->rx_pps, min_pps,
-				       max_pps, NULL, NULL);
-
-		/* lets not use a core that is already kinda loaded */
-		if (list_p->rx_pps > max_pps)
-			continue;
+				       0, NULL, NULL);
 
 		/* When there are multiple free CPUs the first free CPU will
 		 * be returned
 		 */
-		if (list_p->rx_pps == 0) {
-			ret_val = cpu_num;
+		if (list_p->flows == 0) {
+			full_idle_ret = cpu_num;
 			break;
 		}
+		/* When there are semi-idle CPUs the CPU w/ least flows will
+		 * be returned
+		 */
+		if (list_p->rx_pps == 0 && list_p->flows < min_flows) {
+			min_flows = list_p->flows;
+			semi_idle_ret = cpu_num;
+		}
 
 		/* Found a core that is processing even lower packets */
 		if (list_p->rx_pps <= min_pps) {
 			min_pps = list_p->rx_pps;
 			ret_val = cpu_num;
 		}
-
 	}
 
+	if (full_idle_ret >= 0)
+		ret_val = full_idle_ret;
+	else if (semi_idle_ret >= 0)
+		ret_val = semi_idle_ret;
+
 	return ret_val;
 }
 
@@ -874,9 +1124,8 @@
 	 * for a few ticks and reset it afterwards
 	 */
 
-	if (rmnet_shs_cpu_node_tbl[current_cpu].wqprio) {
+	if (rmnet_shs_cpu_node_tbl[current_cpu].wqprio)
 		return current_cpu;
-	}
 
 	for (cpu_num = 0; cpu_num < MAX_CPUS; cpu_num++) {
 
@@ -919,7 +1168,7 @@
 
 void rmnet_shs_wq_find_cpu_and_move_flows(u16 cur_cpu)
 {
-	struct rmnet_shs_wq_ep_s *ep;
+	struct rmnet_shs_wq_ep_s *ep = NULL;
 	u16 new_cpu;
 
 	list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
@@ -935,6 +1184,273 @@
 			rmnet_shs_wq_chng_suggested_cpu(cur_cpu, new_cpu, ep);
 	}
 }
+
+/* Return 1 if we can move a flow to dest_cpu for this endpoint,
+ * otherwise return 0. Basically check rps mask and cpu is online
+ * Also check that dest cpu is not isolated
+ */
+int rmnet_shs_wq_check_cpu_move_for_ep(u16 current_cpu, u16 dest_cpu,
+				       struct rmnet_shs_wq_ep_s *ep)
+{
+	u16 cpu_in_rps_mask = 0;
+
+	if (!ep) {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_EP_ACCESS_ERR]++;
+		return 0;
+	}
+
+	if (current_cpu >= MAX_CPUS || dest_cpu >= MAX_CPUS) {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_CPU_ERR]++;
+		return 0;
+	}
+
+	cpu_in_rps_mask = (1 << dest_cpu) & ep->rps_config_msk;
+
+	rm_err("SHS_MASK:  cur cpu [%d] | dest_cpu [%d] | "
+	       "cpu isolation_mask = 0x%x | ep_rps_mask = 0x%x | "
+	       "cpu_online(dest) = %d cpu_in_rps_mask = %d | "
+	       "cpu isolated(dest) = %d",
+	       current_cpu, dest_cpu, __cpu_isolated_mask, ep->rps_config_msk,
+	       cpu_online(dest_cpu), cpu_in_rps_mask, cpu_isolated(dest_cpu));
+
+	/* We cannot move to dest cpu if the cur cpu is the same,
+	 * the dest cpu is offline, dest cpu is not in the rps mask,
+	 * or if the dest cpu is isolated
+	 */
+	if (current_cpu == dest_cpu || !cpu_online(dest_cpu) ||
+	    !cpu_in_rps_mask || cpu_isolated(dest_cpu)) {
+		return 0;
+	}
+
+	return 1;
+}
+
+/* rmnet_shs_wq_try_to_move_flow - try to make a flow suggestion
+ * return 1 if flow move was suggested, otherwise return 0
+ */
+int rmnet_shs_wq_try_to_move_flow(u16 cur_cpu, u16 dest_cpu, u32 hash_to_move,
+				  u32 sugg_type)
+{
+	struct rmnet_shs_wq_ep_s *ep;
+
+	if (cur_cpu >= MAX_CPUS || dest_cpu >= MAX_CPUS) {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_CPU_ERR]++;
+		return 0;
+	}
+
+	/* Traverse end-point list, check if cpu can be used, based
+	 * on it if is online, rps mask, isolation, etc. then make
+	 * suggestion to change the cpu for the flow by passing its hash
+	 */
+	list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
+		if (!ep)
+			continue;
+
+		if (!ep->is_ep_active)
+			continue;
+
+		if (!rmnet_shs_wq_check_cpu_move_for_ep(cur_cpu,
+							dest_cpu,
+							ep)) {
+			rm_err("SHS_FDESC: >> Cannot move flow 0x%x on ep"
+			       " from cpu[%d] to cpu[%d]",
+			       hash_to_move, cur_cpu, dest_cpu);
+			continue;
+		}
+
+		if (rmnet_shs_wq_chng_flow_cpu(cur_cpu, dest_cpu, ep,
+					       hash_to_move, sugg_type)) {
+			rm_err("SHS_FDESC: >> flow 0x%x was suggested to"
+			       " move from cpu[%d] to cpu[%d] sugg_type [%d]",
+			       hash_to_move, cur_cpu, dest_cpu, sugg_type);
+			return 1;
+		}
+	}
+	return 0;
+}
+
+/* Change flow segmentation, return 1 if set, 0 otherwise */
+int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 seg_enable)
+{
+	struct rmnet_shs_skbn_s *node_p;
+	struct rmnet_shs_wq_hstat_s *hstat_p;
+	u16 bkt;
+
+	hash_for_each(RMNET_SHS_HT, bkt, node_p, list) {
+		if (!node_p)
+			continue;
+
+		if (!node_p->hstats)
+			continue;
+
+		hstat_p = node_p->hstats;
+
+		if (hstat_p->hash != hash_to_set)
+			continue;
+
+		rm_err("SHS_HT: >> segmentation on hash 0x%x enable %u",
+		       hash_to_set, seg_enable);
+
+		trace_rmnet_shs_wq_high(RMNET_SHS_WQ_FLOW_STATS,
+				RMNET_SHS_WQ_FLOW_STATS_SET_FLOW_SEGMENTATION,
+				hstat_p->hash, seg_enable,
+				0xDEF, 0xDEF, hstat_p, NULL);
+
+		node_p->hstats->segment_enable = seg_enable;
+		return 1;
+	}
+
+	rm_err("SHS_HT: >> segmentation on hash 0x%x enable %u not set - hash not found",
+	       hash_to_set, seg_enable);
+	return 0;
+}
+
+
+/* Comparison function to sort gold flow loads - based on flow avg_pps
+ * return -1 if a is before b, 1 if a is after b, 0 if equal
+ */
+int cmp_fn_flow_pps(void *priv, struct list_head *a, struct list_head *b)
+{
+	struct rmnet_shs_wq_gold_flow_s *flow_a;
+	struct rmnet_shs_wq_gold_flow_s *flow_b;
+
+	if (!a || !b)
+		return 0;
+
+	flow_a = list_entry(a, struct rmnet_shs_wq_gold_flow_s, gflow_list);
+	flow_b = list_entry(b, struct rmnet_shs_wq_gold_flow_s, gflow_list);
+
+	if (flow_a->avg_pps > flow_b->avg_pps)
+		return -1;
+	else if (flow_a->avg_pps < flow_b->avg_pps)
+		return 1;
+
+	return 0;
+}
+
+/* Comparison function to sort cpu capacities - based on cpu avg_pps capacity
+ * return -1 if a is before b, 1 if a is after b, 0 if equal
+ */
+int cmp_fn_cpu_pps(void *priv, struct list_head *a, struct list_head *b)
+{
+	struct rmnet_shs_wq_cpu_cap_s *cpu_a;
+	struct rmnet_shs_wq_cpu_cap_s *cpu_b;
+
+	if (!a || !b)
+		return 0;
+
+	cpu_a = list_entry(a, struct rmnet_shs_wq_cpu_cap_s, cpu_cap_list);
+	cpu_b = list_entry(b, struct rmnet_shs_wq_cpu_cap_s, cpu_cap_list);
+
+	if (cpu_a->avg_pps_capacity > cpu_b->avg_pps_capacity)
+		return -1;
+	else if (cpu_a->avg_pps_capacity < cpu_b->avg_pps_capacity)
+		return 1;
+
+	return 0;
+}
+
+
+/* Prints cpu stats and flows to dmesg for debugging */
+void rmnet_shs_wq_debug_print_flows(void)
+{
+	struct rmnet_shs_wq_rx_flow_s *rx_flow_tbl_p = &rmnet_shs_rx_flow_tbl;
+	struct rmnet_shs_wq_cpu_rx_pkt_q_s *cpu_node;
+	struct rmnet_shs_wq_hstat_s *hnode;
+	int flows, i;
+	u16 cpu_num = 0;
+
+	if (!RMNET_SHS_DEBUG)
+		return;
+
+	for (cpu_num = 0; cpu_num < MAX_CPUS; cpu_num++) {
+		cpu_node = &rx_flow_tbl_p->cpu_list[cpu_num];
+		flows = rx_flow_tbl_p->cpu_list[cpu_num].flows;
+
+		rm_err("SHS_CPU: cpu[%d]: flows=%d pps=%llu bps=%llu "
+		       "qhead_diff %u qhead_total = %u qhead_start = %u "
+		       "qhead = %u qhead_last = %u isolated = %d ",
+		       cpu_num, flows, cpu_node->rx_pps, cpu_node->rx_bps,
+		       cpu_node->qhead_diff, cpu_node->qhead_total,
+		       cpu_node->qhead_start,
+		       cpu_node->qhead, cpu_node->last_qhead,
+		       cpu_isolated(cpu_num));
+
+		list_for_each_entry(hnode,
+				    &rmnet_shs_wq_hstat_tbl,
+				    hstat_node_id) {
+			if (!hnode)
+				continue;
+
+			if (hnode->in_use == 0)
+				continue;
+
+			if (hnode->node) {
+				if (hnode->current_cpu == cpu_num)
+					rm_err("SHS_CPU:         > flow 0x%x "
+					       "with pps %llu avg_pps %llu rx_bps %llu ",
+					       hnode->hash, hnode->rx_pps,
+					       hnode->avg_pps, hnode->rx_bps);
+			}
+		} /* loop per flow */
+
+		for (i = 0; i < 3 - flows; i++) {
+			rm_err("%s", "SHS_CPU:         > ");
+		}
+	} /* loop per cpu */
+}
+
+/* Prints the sorted gold flow list to dmesg */
+void rmnet_shs_wq_debug_print_sorted_gold_flows(struct list_head *gold_flows)
+{
+	struct rmnet_shs_wq_gold_flow_s *gflow_node;
+
+	if (!RMNET_SHS_DEBUG)
+		return;
+
+	if (!gold_flows) {
+		rm_err("%s", "SHS_GDMA: Gold Flows List is NULL");
+		return;
+	}
+
+	rm_err("%s", "SHS_GDMA: List of sorted gold flows:");
+	list_for_each_entry(gflow_node, gold_flows, gflow_list) {
+		if (!gflow_node)
+			continue;
+
+		rm_err("SHS_GDMA: > flow 0x%x with pps %llu on cpu[%d]",
+		       gflow_node->hash, gflow_node->rx_pps,
+		       gflow_node->cpu_num);
+	}
+}
+
+/* Userspace evaluation. we send userspace the response to the sync message
+ * after we update shared memory. shsusr will send a netlink message if
+ * flows should be moved around.
+ */
+void rmnet_shs_wq_eval_cpus_caps_and_flows(struct list_head *cpu_caps,
+					   struct list_head *gold_flows,
+					   struct list_head *ss_flows)
+{
+	if (!cpu_caps || !gold_flows || !ss_flows) {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+		return;
+	}
+
+	list_sort(NULL, cpu_caps, &cmp_fn_cpu_pps);
+	list_sort(NULL, gold_flows, &cmp_fn_flow_pps);
+
+	rmnet_shs_wq_mem_update_cached_cpu_caps(cpu_caps);
+	rmnet_shs_wq_mem_update_cached_sorted_gold_flows(gold_flows);
+	rmnet_shs_wq_mem_update_cached_sorted_ss_flows(ss_flows);
+
+	rmnet_shs_genl_send_int_to_userspace_no_info(RMNET_SHS_SYNC_RESP_INT);
+
+	trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR, RMNET_SHS_WQ_SHSUSR_SYNC_END,
+				0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
+}
+
+/* Default wq evaluation logic, use this if rmnet_shs_userspace_connected is 0 */
 void rmnet_shs_wq_eval_suggested_cpu(void)
 
 {
@@ -1069,7 +1585,7 @@
 }
 void rmnet_shs_wq_refresh_new_flow_list(void)
 {
-	struct rmnet_shs_wq_ep_s *ep;
+	struct rmnet_shs_wq_ep_s *ep = NULL;
 
 	list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
 		if (!ep)
@@ -1088,7 +1604,7 @@
 	u8 lo_max;
 	int cpu_assigned = -1;
 	u8 is_match_found = 0;
-	struct rmnet_shs_wq_ep_s *ep;
+	struct rmnet_shs_wq_ep_s *ep = NULL;
 
 	if (!dev) {
 		rmnet_shs_crit_err[RMNET_SHS_NETDEV_ERR]++;
@@ -1101,7 +1617,7 @@
 		if (!ep->is_ep_active)
 			continue;
 
-		if (ep->ep->egress_dev == dev) {
+		if (ep->ep == dev) {
 			is_match_found = 1;
 			break;
 		}
@@ -1125,7 +1641,7 @@
 	}
 
 	/* Increment CPU assignment idx to be ready for next flow assignment*/
-	if ((cpu_assigned >= 0)|| ((ep->new_lo_idx + 1) >= ep->new_lo_max))
+	if ((cpu_assigned >= 0) || ((ep->new_lo_idx + 1) >= ep->new_lo_max))
 		ep->new_lo_idx = ((ep->new_lo_idx + 1) % ep->new_lo_max);
 
 	return cpu_assigned;
@@ -1133,7 +1649,7 @@
 
 int rmnet_shs_wq_get_perf_cpu_new_flow(struct net_device *dev)
 {
-	struct rmnet_shs_wq_ep_s *ep;
+	struct rmnet_shs_wq_ep_s *ep = NULL;
 	int cpu_assigned = -1;
 	u8 hi_idx;
 	u8 hi_max;
@@ -1151,7 +1667,7 @@
 		if (!ep->is_ep_active)
 			continue;
 
-		if (ep->ep->egress_dev == dev) {
+		if (ep->ep == dev) {
 			is_match_found = 1;
 			break;
 		}
@@ -1197,11 +1713,11 @@
 
 void rmnet_shs_wq_cleanup_hash_tbl(u8 force_clean)
 {
-	struct rmnet_shs_skbn_s *node_p;
+	struct rmnet_shs_skbn_s *node_p = NULL;
 	time_t tns2s;
 	unsigned long ht_flags;
 	struct rmnet_shs_wq_hstat_s *hnode = NULL;
-	struct list_head *ptr, *next;
+	struct list_head *ptr = NULL, *next = NULL;
 
 	list_for_each_safe(ptr, next, &rmnet_shs_wq_hstat_tbl) {
 		hnode = list_entry(ptr,
@@ -1234,6 +1750,10 @@
 				hash_del_rcu(&node_p->list);
 				kfree(node_p);
 			}
+			rm_err("SHS_FLOW: removing flow 0x%x on cpu[%d] "
+			       "pps: %llu avg_pps: %llu",
+			       hnode->hash, hnode->current_cpu,
+			       hnode->rx_pps, hnode->avg_pps);
 			rmnet_shs_wq_cpu_list_remove(hnode);
 			if (hnode->is_perm == 0 || force_clean) {
 				rmnet_shs_wq_hstat_tbl_remove(hnode);
@@ -1250,56 +1770,92 @@
 
 void rmnet_shs_wq_update_ep_rps_msk(struct rmnet_shs_wq_ep_s *ep)
 {
-	u8 len = 0;
 	struct rps_map *map;
+	u8 len = 0;
 
-	if (!ep) {
+	if (!ep || !ep->ep ) {
 		rmnet_shs_crit_err[RMNET_SHS_WQ_EP_ACCESS_ERR]++;
 		return;
 	}
+
 	rcu_read_lock();
-	map = rcu_dereference(ep->ep->egress_dev->_rx->rps_map);
+	if (!ep->ep) {
+		pr_info(" rmnet_shs invalid state %p", ep->ep);
+		rmnet_shs_crit_err[RMNET_SHS_WQ_EP_ACCESS_ERR]++;
+		return;
+	}
+	map = rcu_dereference(ep->ep->_rx->rps_map);
+
 	ep->rps_config_msk = 0;
 	if (map != NULL) {
 		for (len = 0; len < map->len; len++)
 			ep->rps_config_msk |= (1 << map->cpus[len]);
 	}
 	rcu_read_unlock();
+
 	ep->default_core_msk = ep->rps_config_msk & 0x0F;
 	ep->pri_core_msk = ep->rps_config_msk & 0xF0;
 }
 
 void rmnet_shs_wq_reset_ep_active(struct net_device *dev)
 {
-	struct rmnet_shs_wq_ep_s *ep;
+	struct rmnet_shs_wq_ep_s *ep = NULL;
+	struct rmnet_shs_wq_ep_s *tmp = NULL;
+	unsigned long flags;
 
-	list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
+	if (!dev) {
+		rmnet_shs_crit_err[RMNET_SHS_NETDEV_ERR]++;
+		return;
+	}
+
+	spin_lock_irqsave(&rmnet_shs_ep_lock, flags);
+	list_for_each_entry_safe(ep, tmp, &rmnet_shs_wq_ep_tbl, ep_list_id) {
 		if (!ep)
 			continue;
 
-		if (ep->ep->egress_dev == dev)
+		if (ep->ep == dev){
 			ep->is_ep_active = 0;
+			rmnet_shs_wq_ep_tbl_remove(ep);
+			kfree(ep);
+			break;
+		}
 	}
 
+	spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);
 }
 
 void rmnet_shs_wq_set_ep_active(struct net_device *dev)
 {
-	struct rmnet_shs_wq_ep_s *ep;
+	struct rmnet_shs_wq_ep_s *ep = NULL;
+	unsigned long flags;
 
-	list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
-		if (!ep)
-			continue;
-
-		if (ep->ep->egress_dev == dev)
-			ep->is_ep_active = 1;
+	if (!dev) {
+		rmnet_shs_crit_err[RMNET_SHS_NETDEV_ERR]++;
+		return;
 	}
 
+	spin_lock_irqsave(&rmnet_shs_ep_lock, flags);
+
+	ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
+
+	if (!ep) {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_ALLOC_EP_TBL_ERR]++;
+		spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);
+		return;
+	}
+	ep->ep = dev;
+	ep->is_ep_active = 1;
+
+	INIT_LIST_HEAD(&ep->ep_list_id);
+	rmnet_shs_wq_update_ep_rps_msk(ep);
+	rmnet_shs_wq_ep_tbl_add(ep);
+
+	spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);
 }
 
 void rmnet_shs_wq_refresh_ep_masks(void)
 {
-	struct rmnet_shs_wq_ep_s *ep;
+	struct rmnet_shs_wq_ep_s *ep = NULL;
 
 	list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
 
@@ -1331,10 +1887,10 @@
 	rmnet_shs_cfg.map_len = rmnet_shs_get_mask_len(mask);
 }
 
-static void rmnet_shs_wq_update_stats(void)
+void rmnet_shs_wq_update_stats(void)
 {
 	struct timespec time;
-	struct rmnet_shs_wq_hstat_s *hnode;
+	struct rmnet_shs_wq_hstat_s *hnode = NULL;
 
 	(void) getnstimeofday(&time);
 	rmnet_shs_wq_tnsec = RMNET_SHS_SEC_TO_NSEC(time.tv_sec) + time.tv_nsec;
@@ -1351,23 +1907,54 @@
 		if (hnode->node) {
 			rmnet_shs_wq_update_hash_stats(hnode);
 			rmnet_shs_wq_update_cpu_rx_tbl(hnode);
+
+			if (rmnet_shs_userspace_connected) {
+				if (!rmnet_shs_is_lpwr_cpu(hnode->current_cpu)) {
+					/* Add golds flows to list */
+					rmnet_shs_wq_gflow_list_add(hnode, &gflows);
+				}
+				if (hnode->skb_tport_proto == IPPROTO_TCP) {
+					rmnet_shs_wq_ssflow_list_add(hnode, &ssflows);
+				}
+			} else {
+				/* Disable segmentation if userspace gets disconnected connected */
+				hnode->node->hstats->segment_enable = 0;
+			}
 		}
 	}
 	rmnet_shs_wq_refresh_all_cpu_stats();
 	rmnet_shs_wq_refresh_total_stats();
 	rmnet_shs_wq_refresh_dl_mrkr_stats();
-	rmnet_shs_wq_eval_suggested_cpu();
+
+	if (rmnet_shs_userspace_connected) {
+		rm_err("%s", "SHS_UPDATE: Userspace connected, relying on userspace evaluation");
+		rmnet_shs_wq_eval_cpus_caps_and_flows(&cpu_caps, &gflows, &ssflows);
+		rmnet_shs_wq_cleanup_gold_flow_list(&gflows);
+		rmnet_shs_wq_cleanup_ss_flow_list(&ssflows);
+		rmnet_shs_wq_cleanup_cpu_caps_list(&cpu_caps);
+	} else {
+		rm_err("%s", "SHS_UPDATE: shs userspace not connected, using default logic");
+		rmnet_shs_wq_eval_suggested_cpu();
+	}
+
 	rmnet_shs_wq_refresh_new_flow_list();
 	/*Invoke after both the locks are released*/
 	rmnet_shs_wq_cleanup_hash_tbl(PERIODIC_CLEAN);
+	rmnet_shs_wq_debug_print_flows();
 }
 
 void rmnet_shs_wq_process_wq(struct work_struct *work)
 {
+	unsigned long flags;
+
 	trace_rmnet_shs_wq_high(RMNET_SHS_WQ_PROCESS_WQ,
 				RMNET_SHS_WQ_PROCESS_WQ_START,
 				0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
+
+	spin_lock_irqsave(&rmnet_shs_ep_lock, flags);
 	rmnet_shs_wq_update_stats();
+	spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);
+
 	queue_delayed_work(rmnet_shs_wq, &rmnet_shs_delayed_wq->wq,
 					rmnet_shs_wq_frequency);
 
@@ -1378,8 +1965,8 @@
 
 void rmnet_shs_wq_clean_ep_tbl(void)
 {
-	struct rmnet_shs_wq_ep_s *ep;
-	struct list_head *ptr, *next;
+	struct rmnet_shs_wq_ep_s *ep = NULL;
+	struct list_head *ptr = NULL, *next = NULL;
 
 	list_for_each_safe(ptr, next, &rmnet_shs_wq_ep_tbl) {
 		ep = list_entry(ptr, struct rmnet_shs_wq_ep_s, ep_list_id);
@@ -1402,6 +1989,8 @@
 	if (!rmnet_shs_wq || !rmnet_shs_delayed_wq)
 		return;
 
+	rmnet_shs_wq_mem_deinit();
+
 	trace_rmnet_shs_wq_high(RMNET_SHS_WQ_EXIT, RMNET_SHS_WQ_EXIT_START,
 				   0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
 
@@ -1418,34 +2007,6 @@
 				   0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
 }
 
-void rmnet_shs_wq_gather_rmnet_ep(struct net_device *dev)
-{
-	u8 mux_id;
-	struct rmnet_port *port;
-	struct rmnet_endpoint *ep;
-	struct rmnet_shs_wq_ep_s *ep_wq;
-
-	port = rmnet_get_port(dev);
-
-	for (mux_id = 1; mux_id < 255; mux_id++) {
-		ep = rmnet_get_endpoint(port, mux_id);
-		if (!ep)
-			continue;
-
-		trace_rmnet_shs_wq_high(RMNET_SHS_WQ_EP_TBL,
-					RMNET_SHS_WQ_EP_TBL_INIT,
-					0xDEF, 0xDEF, 0xDEF, 0xDEF, ep, NULL);
-		ep_wq = kzalloc(sizeof(*ep_wq), GFP_ATOMIC);
-		if (!ep_wq) {
-			rmnet_shs_crit_err[RMNET_SHS_WQ_ALLOC_EP_TBL_ERR]++;
-			return;
-		}
-		INIT_LIST_HEAD(&ep_wq->ep_list_id);
-		ep_wq->ep = ep;
-		rmnet_shs_wq_update_ep_rps_msk(ep_wq);
-		rmnet_shs_wq_ep_tbl_add(ep_wq);
-	}
-}
 void rmnet_shs_wq_init_cpu_rx_flow_tbl(void)
 {
 	u8 cpu_num;
@@ -1460,6 +2021,7 @@
 
 		rx_flow_tbl_p = &rmnet_shs_rx_flow_tbl.cpu_list[cpu_num];
 		INIT_LIST_HEAD(&rx_flow_tbl_p->hstat_id);
+		rx_flow_tbl_p->cpu_num = cpu_num;
 	}
 
 }
@@ -1484,9 +2046,15 @@
 	if (rmnet_shs_wq)
 		return;
 
+	if (!dev) {
+		rmnet_shs_crit_err[RMNET_SHS_NETDEV_ERR]++;
+		return;
+	}
+
+	rmnet_shs_wq_mem_init();
+
 	trace_rmnet_shs_wq_high(RMNET_SHS_WQ_INIT, RMNET_SHS_WQ_INIT_START,
 				0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
-	spin_lock_init(&rmnet_shs_wq_splock);
 	rmnet_shs_wq = alloc_workqueue("rmnet_shs_wq",
 					WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
 	if (!rmnet_shs_wq) {
@@ -1503,24 +2071,16 @@
 		return;
 	}
 
-	rmnet_shs_delayed_wq->netdev = dev;
-	rmnet_shs_wq_gather_rmnet_ep(dev);
-
 	/*All hstat nodes allocated during Wq init will be held for ever*/
 	rmnet_shs_wq_hstat_alloc_nodes(RMNET_SHS_MIN_HSTAT_NODES_REQD, 1);
 	rmnet_shs_wq_init_cpu_rx_flow_tbl();
 	INIT_DEFERRABLE_WORK(&rmnet_shs_delayed_wq->wq,
 			     rmnet_shs_wq_process_wq);
 
-	/* During initialization, we can start workqueue without a delay
-	 * to initialize all meta data and pre allocated memory
-	 * for hash stats, if required
-	 */
-	queue_delayed_work(rmnet_shs_wq, &rmnet_shs_delayed_wq->wq, 0);
-
 	trace_rmnet_shs_wq_high(RMNET_SHS_WQ_INIT, RMNET_SHS_WQ_INIT_END,
 				0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
 }
+
 int rmnet_shs_wq_get_num_cpu_flows(u16 cpu)
 {
 	int flows = -1;
@@ -1592,6 +2152,11 @@
 
 void rmnet_shs_wq_inc_cpu_flow(u16 cpu)
 {
+	if (cpu >= MAX_CPUS) {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_CPU_ERR]++;
+		return;
+	}
+
 	rmnet_shs_rx_flow_tbl.cpu_list[cpu].flows++;
 
 	trace_rmnet_shs_wq_low(RMNET_SHS_WQ_CPU_STATS,
@@ -1602,6 +2167,11 @@
 
 void rmnet_shs_wq_dec_cpu_flow(u16 cpu)
 {
+	if (cpu >= MAX_CPUS) {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_CPU_ERR]++;
+		return;
+	}
+
 	if (rmnet_shs_rx_flow_tbl.cpu_list[cpu].flows > 0)
 		rmnet_shs_rx_flow_tbl.cpu_list[cpu].flows--;
 
@@ -1613,5 +2183,11 @@
 
 u64 rmnet_shs_wq_get_max_allowed_pps(u16 cpu)
 {
+
+	if (cpu >= MAX_CPUS) {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_CPU_ERR]++;
+		return 0;
+	}
+
 	return rmnet_shs_cpu_rx_max_pps_thresh[cpu];
 }
diff --git a/drivers/rmnet/shs/rmnet_shs_wq.h b/drivers/rmnet/shs/rmnet_shs_wq.h
index da85906..ed37dc8 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq.h
+++ b/drivers/rmnet/shs/rmnet_shs_wq.h
@@ -19,6 +19,11 @@
 #include "rmnet_shs_config.h"
 #include "rmnet_shs.h"
 
+#define RMNET_SHS_DEBUG 0
+
+#define rm_err(fmt, ...)  \
+	do { if (RMNET_SHS_DEBUG) pr_err(fmt, __VA_ARGS__); } while (0)
+
 #define MAX_SUPPORTED_FLOWS_DEBUG 16
 
 #define RMNET_SHS_RX_BPNSEC_TO_BPSEC(x) ((x)*1000000000)
@@ -28,11 +33,14 @@
 #define RMNET_SHS_MIN_HSTAT_NODES_REQD 16
 #define RMNET_SHS_WQ_DELAY_TICKS  10
 
+extern unsigned long long rmnet_shs_cpu_rx_max_pps_thresh[MAX_CPUS]__read_mostly;
+extern unsigned long long rmnet_shs_cpu_rx_min_pps_thresh[MAX_CPUS]__read_mostly;
+
 /* stores wq and end point details */
 
 struct rmnet_shs_wq_ep_s {
 	struct list_head ep_list_id;
-	struct rmnet_endpoint *ep;
+	struct net_device *ep;
 	int  new_lo_core[MAX_CPUS];
 	int  new_hi_core[MAX_CPUS];
 	u16 default_core_msk;
@@ -50,7 +58,17 @@
 	struct rmnet_shs_wq_ep_s ep;
 };
 
+/* Types of suggestions made by shs wq */
+enum rmnet_shs_wq_suggestion_type {
+	RMNET_SHS_WQ_SUGG_NONE,
+	RMNET_SHS_WQ_SUGG_SILVER_TO_GOLD,
+	RMNET_SHS_WQ_SUGG_GOLD_TO_SILVER,
+	RMNET_SHS_WQ_SUGG_GOLD_BALANCE,
+	RMNET_SHS_WQ_SUGG_MAX,
+};
+
 struct rmnet_shs_wq_hstat_s {
+	unsigned long int rmnet_shs_wq_suggs[RMNET_SHS_WQ_SUGG_MAX];
 	struct list_head cpu_node_id;
 	struct list_head hstat_node_id;
 	struct rmnet_shs_skbn_s *node; //back pointer to node
@@ -61,6 +79,8 @@
 	u64 rx_bytes;
 	u64 rx_pps; /*pkts per second*/
 	u64 rx_bps; /*bits per second*/
+	u64 last_pps;
+	u64 avg_pps;
 	u64 last_rx_skb;
 	u64 last_rx_bytes;
 	u32 rps_config_msk; /*configured rps mask for net device*/
@@ -69,13 +89,14 @@
 	u32 pri_core_msk; /* priority cores availability mask*/
 	u32 available_core_msk; /* other available cores for this flow*/
 	u32 hash; /*skb hash*/
+	int stat_idx; /*internal used for datatop*/
 	u16 suggested_cpu; /* recommended CPU to stamp pkts*/
 	u16 current_cpu; /* core where the flow is being processed*/
 	u16 skb_tport_proto;
-	int stat_idx; /*internal used for datatop*/
 	u8 in_use;
 	u8 is_perm;
 	u8 is_new_flow;
+	u8 segment_enable; /* segment coalesces packets */
 };
 
 struct rmnet_shs_wq_cpu_rx_pkt_q_s {
@@ -97,6 +118,7 @@
 	u32 qhead_start; /* start mark of total pp*/
 	u32 qhead_total; /* end mark of total pp*/
 	int flows;
+	u16 cpu_num;
 };
 
 struct rmnet_shs_wq_rx_flow_s {
@@ -132,10 +154,34 @@
 
 struct rmnet_shs_delay_wq_s {
 	struct delayed_work wq;
-	struct net_device *netdev;
 };
 
+/* Structures to be used for creating sorted versions of flow and cpu lists */
+struct rmnet_shs_wq_cpu_cap_s {
+	struct list_head cpu_cap_list;
+	u64 pps_capacity;
+	u64 avg_pps_capacity;
+	u16 cpu_num;
+};
 
+struct rmnet_shs_wq_gold_flow_s {
+	struct list_head gflow_list;
+	u64 rx_pps;
+	u64 avg_pps;
+	u32 hash;
+	u16 cpu_num;
+};
+
+struct rmnet_shs_wq_ss_flow_s {
+	struct list_head ssflow_list;
+	u64 rx_pps;
+	u64 avg_pps;
+	u64 rx_bps;
+	u32 hash;
+	u16 cpu_num;
+};
+
+/* Tracing Definitions */
 enum rmnet_shs_wq_trace_func {
 	RMNET_SHS_WQ_INIT,
 	RMNET_SHS_WQ_PROCESS_WQ,
@@ -146,6 +192,7 @@
 	RMNET_SHS_WQ_FLOW_STATS,
 	RMNET_SHS_WQ_CPU_STATS,
 	RMNET_SHS_WQ_TOTAL_STATS,
+	RMNET_SHS_WQ_SHSUSR,
 };
 
 enum rmnet_shs_wq_trace_evt {
@@ -202,8 +249,13 @@
 	RMNET_SHS_WQ_INIT_END,
 	RMNET_SHS_WQ_EXIT_START,
 	RMNET_SHS_WQ_EXIT_END,
-
-
+	RMNET_SHS_WQ_TRY_PASS,
+	RMNET_SHS_WQ_TRY_FAIL,
+	RMNET_SHS_WQ_SHSUSR_SYNC_START,
+	RMNET_SHS_WQ_SHSUSR_SYNC_END,
+	RMNET_SHS_WQ_FLOW_STATS_SET_FLOW_SEGMENTATION,
+	RMNET_SHS_WQ_FLOW_SEG_SET_PASS,
+	RMNET_SHS_WQ_FLOW_SEG_SET_FAIL,
 };
 
 extern struct rmnet_shs_cpu_node_s rmnet_shs_cpu_node_tbl[MAX_CPUS];
@@ -213,6 +265,8 @@
 void rmnet_shs_wq_restart(void);
 void rmnet_shs_wq_pause(void);
 
+void rmnet_shs_update_cfg_mask(void);
+
 u64 rmnet_shs_wq_get_max_pps_among_cores(u32 core_msk);
 void rmnet_shs_wq_create_new_flow(struct rmnet_shs_skbn_s *node_p);
 int rmnet_shs_wq_get_least_utilized_core(u16 core_msk);
@@ -220,9 +274,15 @@
 int rmnet_shs_wq_get_perf_cpu_new_flow(struct net_device *dev);
 u64 rmnet_shs_wq_get_max_allowed_pps(u16 cpu);
 void rmnet_shs_wq_inc_cpu_flow(u16 cpu);
-u32 rmnet_shs_wq_get_dev_rps_msk(struct net_device *dev);
 void rmnet_shs_wq_dec_cpu_flow(u16 cpu);
 void rmnet_shs_hstat_tbl_delete(void);
 void rmnet_shs_wq_set_ep_active(struct net_device *dev);
 void rmnet_shs_wq_reset_ep_active(struct net_device *dev);
+void rmnet_shs_wq_refresh_new_flow_list(void);
+
+int rmnet_shs_wq_try_to_move_flow(u16 cur_cpu, u16 dest_cpu, u32 hash_to_move,
+				  u32 sugg_type);
+
+int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 seg_enable);
+
 #endif /*_RMNET_SHS_WQ_H_*/
diff --git a/drivers/rmnet/shs/rmnet_shs_wq_genl.c b/drivers/rmnet/shs/rmnet_shs_wq_genl.c
new file mode 100644
index 0000000..b28f0c2
--- /dev/null
+++ b/drivers/rmnet/shs/rmnet_shs_wq_genl.c
@@ -0,0 +1,358 @@
+/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data Smart Hash Workqueue Generic Netlink Functions
+ *
+ */
+
+#include "rmnet_shs_wq_genl.h"
+#include <net/sock.h>
+#include <linux/skbuff.h>
+
+MODULE_LICENSE("GPL v2");
+
+static struct net *last_net;
+static u32 last_snd_portid;
+
+uint32_t rmnet_shs_genl_seqnum;
+int rmnet_shs_userspace_connected;
+
+/* Static Functions and Definitions */
+static struct nla_policy rmnet_shs_genl_attr_policy[RMNET_SHS_GENL_ATTR_MAX + 1] = {
+	[RMNET_SHS_GENL_ATTR_INT] = { .type = NLA_S32 },
+	[RMNET_SHS_GENL_ATTR_SUGG] = { .len = sizeof(struct rmnet_shs_wq_sugg_info) },
+	[RMNET_SHS_GENL_ATTR_SEG] = { .len = sizeof(struct rmnet_shs_wq_seg_info) },
+	[RMNET_SHS_GENL_ATTR_STR] = { .type = NLA_NUL_STRING },
+};
+
+#define RMNET_SHS_GENL_OP(_cmd, _func)			\
+	{						\
+		.cmd	= _cmd,				\
+		.policy	= rmnet_shs_genl_attr_policy,	\
+		.doit	= _func,			\
+		.dumpit	= NULL,				\
+		.flags	= 0,				\
+	}
+
+static const struct genl_ops rmnet_shs_genl_ops[] = {
+	RMNET_SHS_GENL_OP(RMNET_SHS_GENL_CMD_INIT_DMA,
+			  rmnet_shs_genl_dma_init),
+	RMNET_SHS_GENL_OP(RMNET_SHS_GENL_CMD_TRY_TO_MOVE_FLOW,
+			  rmnet_shs_genl_try_to_move_flow),
+	RMNET_SHS_GENL_OP(RMNET_SHS_GENL_CMD_SET_FLOW_SEGMENTATION,
+			  rmnet_shs_genl_set_flow_segmentation),
+	RMNET_SHS_GENL_OP(RMNET_SHS_GENL_CMD_MEM_SYNC,
+			  rmnet_shs_genl_mem_sync),
+};
+
+struct genl_family rmnet_shs_genl_family = {
+	.hdrsize = 0,
+	.name    = RMNET_SHS_GENL_FAMILY_NAME,
+	.version = RMNET_SHS_GENL_VERSION,
+	.maxattr = RMNET_SHS_GENL_ATTR_MAX,
+	.ops     = rmnet_shs_genl_ops,
+	.n_ops   = ARRAY_SIZE(rmnet_shs_genl_ops),
+};
+
+int rmnet_shs_genl_send_int_to_userspace(struct genl_info *info, int val)
+{
+	struct sk_buff *skb;
+	void *msg_head;
+	int rc;
+
+	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+	if (skb == NULL)
+		goto out;
+
+	msg_head = genlmsg_put(skb, 0, info->snd_seq+1, &rmnet_shs_genl_family,
+			       0, RMNET_SHS_GENL_CMD_INIT_DMA);
+	if (msg_head == NULL) {
+		rc = -ENOMEM;
+		goto out;
+	}
+	rc = nla_put_u32(skb, RMNET_SHS_GENL_ATTR_INT, val);
+	if (rc != 0)
+		goto out;
+
+	genlmsg_end(skb, msg_head);
+
+	rc = genlmsg_unicast(genl_info_net(info), skb, info->snd_portid);
+	if (rc != 0)
+		goto out;
+
+	rm_err("SHS_GNL: Successfully sent int %d\n", val);
+	return 0;
+
+out:
+	/* TODO: Need to free skb?? */
+	rm_err("SHS_GNL: FAILED to send int %d\n", val);
+	return -1;
+}
+
+int rmnet_shs_genl_send_int_to_userspace_no_info(int val)
+{
+	struct sk_buff *skb;
+	void *msg_head;
+	int rc;
+
+	if (last_net == NULL) {
+		rm_err("SHS_GNL: FAILED to send int %d - last_net is NULL\n",
+		       val);
+		return -1;
+	}
+
+	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+	if (skb == NULL)
+		goto out;
+
+	msg_head = genlmsg_put(skb, 0, rmnet_shs_genl_seqnum++, &rmnet_shs_genl_family,
+			       0, RMNET_SHS_GENL_CMD_INIT_DMA);
+	if (msg_head == NULL) {
+		rc = -ENOMEM;
+		goto out;
+	}
+	rc = nla_put_u32(skb, RMNET_SHS_GENL_ATTR_INT, val);
+	if (rc != 0)
+		goto out;
+
+	genlmsg_end(skb, msg_head);
+
+	rc = genlmsg_unicast(last_net, skb, last_snd_portid);
+	if (rc != 0)
+		goto out;
+
+	rm_err("SHS_GNL: Successfully sent int %d\n", val);
+	return 0;
+
+out:
+	/* TODO: Need to free skb?? */
+	rm_err("SHS_GNL: FAILED to send int %d\n", val);
+	rmnet_shs_userspace_connected = 0;
+	return -1;
+}
+
+
+int rmnet_shs_genl_send_msg_to_userspace(void)
+{
+	struct sk_buff *skb;
+	void *msg_head;
+	int rc;
+	int val = rmnet_shs_genl_seqnum++;
+
+	rm_err("SHS_GNL: Trying to send msg %d\n", val);
+	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+	if (skb == NULL)
+		goto out;
+
+	msg_head = genlmsg_put(skb, 0, rmnet_shs_genl_seqnum++, &rmnet_shs_genl_family,
+			       0, RMNET_SHS_GENL_CMD_INIT_DMA);
+	if (msg_head == NULL) {
+		rc = -ENOMEM;
+		goto out;
+	}
+	rc = nla_put_u32(skb, RMNET_SHS_GENL_ATTR_INT, val);
+	if (rc != 0)
+		goto out;
+
+	genlmsg_end(skb, msg_head);
+
+	genlmsg_multicast(&rmnet_shs_genl_family, skb, 0, 0, GFP_ATOMIC);
+
+	rm_err("SHS_GNL: Successfully sent int %d\n", val);
+	return 0;
+
+out:
+	/* TODO: Need to free skb?? */
+	rm_err("SHS_GNL: FAILED to send int %d\n", val);
+	rmnet_shs_userspace_connected = 0;
+	return -1;
+}
+
+/* Currently unused - handles message from userspace to initialize the shared memory,
+ * memory is inited by kernel wq automatically
+ */
+int rmnet_shs_genl_dma_init(struct sk_buff *skb_2, struct genl_info *info)
+{
+	rm_err("%s", "SHS_GNL: rmnet_shs_genl_dma_init");
+
+	if (info == NULL) {
+		rm_err("%s", "SHS_GNL: an error occured - info is null");
+		return -1;
+	}
+
+	return 0;
+}
+
+
+int rmnet_shs_genl_set_flow_segmentation(struct sk_buff *skb_2, struct genl_info *info)
+{
+	struct nlattr *na;
+	struct rmnet_shs_wq_seg_info seg_info;
+	int rc = 0;
+
+	rm_err("%s", "SHS_GNL: rmnet_shs_genl_set_flow_segmentation");
+
+	if (info == NULL) {
+		rm_err("%s", "SHS_GNL: an error occured - info is null");
+		return -1;
+	}
+
+	na = info->attrs[RMNET_SHS_GENL_ATTR_SEG];
+	if (na) {
+		if (nla_memcpy(&seg_info, na, sizeof(seg_info)) > 0) {
+			rm_err("SHS_GNL: recv segmentation req "
+			       "hash_to_set = 0x%x segment_enable = %u",
+			       seg_info.hash_to_set,
+			       seg_info.segment_enable);
+
+			rc = rmnet_shs_wq_set_flow_segmentation(seg_info.hash_to_set,
+								seg_info.segment_enable);
+
+			if (rc == 1) {
+				rmnet_shs_genl_send_int_to_userspace(info, 0);
+				trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR,
+					RMNET_SHS_WQ_FLOW_SEG_SET_PASS,
+					seg_info.hash_to_set, seg_info.segment_enable,
+					0xDEF, 0xDEF, NULL, NULL);
+			} else {
+				rmnet_shs_genl_send_int_to_userspace(info, -1);
+				trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR,
+					RMNET_SHS_WQ_FLOW_SEG_SET_FAIL,
+					seg_info.hash_to_set, seg_info.segment_enable,
+					0xDEF, 0xDEF, NULL, NULL);
+				return 0;
+			}
+		} else {
+			rm_err("SHS_GNL: nla_memcpy failed %d\n",
+			       RMNET_SHS_GENL_ATTR_SEG);
+			rmnet_shs_genl_send_int_to_userspace(info, -1);
+			return 0;
+		}
+	} else {
+		rm_err("SHS_GNL: no info->attrs %d\n",
+		       RMNET_SHS_GENL_ATTR_SEG);
+		rmnet_shs_genl_send_int_to_userspace(info, -1);
+		return 0;
+	}
+
+	return 0;
+}
+
+int rmnet_shs_genl_try_to_move_flow(struct sk_buff *skb_2, struct genl_info *info)
+{
+	struct nlattr *na;
+	struct rmnet_shs_wq_sugg_info sugg_info;
+	int rc = 0;
+
+	rm_err("%s", "SHS_GNL: rmnet_shs_genl_try_to_move_flow");
+
+	if (info == NULL) {
+		rm_err("%s", "SHS_GNL: an error occured - info is null");
+		return -1;
+	}
+
+	na = info->attrs[RMNET_SHS_GENL_ATTR_SUGG];
+	if (na) {
+		if (nla_memcpy(&sugg_info, na, sizeof(sugg_info)) > 0) {
+			rm_err("SHS_GNL: cur_cpu =%u dest_cpu = %u "
+			       "hash_to_move = 0x%x sugg_type = %u",
+			       sugg_info.cur_cpu,
+			       sugg_info.dest_cpu,
+			       sugg_info.hash_to_move,
+			       sugg_info.sugg_type);
+			rc = rmnet_shs_wq_try_to_move_flow(sugg_info.cur_cpu,
+							   sugg_info.dest_cpu,
+							   sugg_info.hash_to_move,
+							   sugg_info.sugg_type);
+			if (rc == 1) {
+				rmnet_shs_genl_send_int_to_userspace(info, 0);
+				trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR, RMNET_SHS_WQ_TRY_PASS,
+				   sugg_info.cur_cpu, sugg_info.dest_cpu,
+				   sugg_info.hash_to_move, sugg_info.sugg_type, NULL, NULL);
+
+			} else {
+				rmnet_shs_genl_send_int_to_userspace(info, -1);
+				trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR, RMNET_SHS_WQ_TRY_FAIL,
+				   sugg_info.cur_cpu, sugg_info.dest_cpu,
+				   sugg_info.hash_to_move, sugg_info.sugg_type, NULL, NULL);
+				return 0;
+			}
+		} else {
+			rm_err("SHS_GNL: nla_memcpy failed %d\n",
+			       RMNET_SHS_GENL_ATTR_SUGG);
+			rmnet_shs_genl_send_int_to_userspace(info, -1);
+			return 0;
+		}
+	} else {
+		rm_err("SHS_GNL: no info->attrs %d\n",
+		       RMNET_SHS_GENL_ATTR_SUGG);
+		rmnet_shs_genl_send_int_to_userspace(info, -1);
+		return 0;
+	}
+
+	return 0;
+}
+
+int rmnet_shs_genl_mem_sync(struct sk_buff *skb_2, struct genl_info *info)
+{
+	rm_err("%s", "SHS_GNL: rmnet_shs_genl_mem_sync");
+
+	if (!rmnet_shs_userspace_connected)
+		rmnet_shs_userspace_connected = 1;
+
+	/* Todo: detect when userspace is disconnected. If we dont get
+	 * a sync message in the next 2 wq ticks, we got disconnected
+	 */
+
+	trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR, RMNET_SHS_WQ_SHSUSR_SYNC_START,
+				0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
+
+	if (info == NULL) {
+		rm_err("%s", "SHS_GNL: an error occured - info is null");
+		return -1;
+	}
+
+	last_net = genl_info_net(info);
+	last_snd_portid = info->snd_portid;
+	return 0;
+}
+
+/* register new generic netlink family */
+int rmnet_shs_wq_genl_init(void)
+{
+	int ret;
+
+	rmnet_shs_userspace_connected = 0;
+	ret = genl_register_family(&rmnet_shs_genl_family);
+	if (ret != 0) {
+		rm_err("SHS_GNL: register family failed: %i", ret);
+		genl_unregister_family(&rmnet_shs_genl_family);
+		return -1;
+	}
+
+	rm_err("SHS_GNL: successfully registered generic netlink familiy: %s",
+	       RMNET_SHS_GENL_FAMILY_NAME);
+
+	return 0;
+}
+
+/* Unregister the generic netlink family */
+int rmnet_shs_wq_genl_deinit(void)
+{
+	int ret;
+
+	ret = genl_unregister_family(&rmnet_shs_genl_family);
+	if(ret != 0){
+		rm_err("SHS_GNL: unregister family failed: %i\n",ret);
+	}
+	rmnet_shs_userspace_connected = 0;
+	return 0;
+}
diff --git a/drivers/rmnet/shs/rmnet_shs_wq_genl.h b/drivers/rmnet/shs/rmnet_shs_wq_genl.h
new file mode 100644
index 0000000..333de48
--- /dev/null
+++ b/drivers/rmnet/shs/rmnet_shs_wq_genl.h
@@ -0,0 +1,76 @@
+/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data Smart Hash stamping solution
+ *
+ */
+
+#ifndef _RMNET_SHS_WQ_GENL_H_
+#define _RMNET_SHS_WQ_GENL_H_
+
+#include "rmnet_shs.h"
+#include <net/genetlink.h>
+
+/* Generic Netlink Definitions */
+#define RMNET_SHS_GENL_VERSION 1
+#define RMNET_SHS_GENL_FAMILY_NAME "RMNET_SHS"
+#define RMNET_SHS_SYNC_RESP_INT 828 /* Any number, sent after mem update */
+
+extern int rmnet_shs_userspace_connected;
+
+enum {
+	RMNET_SHS_GENL_CMD_UNSPEC,
+	RMNET_SHS_GENL_CMD_INIT_DMA,
+	RMNET_SHS_GENL_CMD_TRY_TO_MOVE_FLOW,
+	RMNET_SHS_GENL_CMD_SET_FLOW_SEGMENTATION,
+	RMNET_SHS_GENL_CMD_MEM_SYNC,
+	__RMNET_SHS_GENL_CMD_MAX,
+};
+
+enum {
+	RMNET_SHS_GENL_ATTR_UNSPEC,
+	RMNET_SHS_GENL_ATTR_STR,
+	RMNET_SHS_GENL_ATTR_INT,
+	RMNET_SHS_GENL_ATTR_SUGG,
+	RMNET_SHS_GENL_ATTR_SEG,
+	__RMNET_SHS_GENL_ATTR_MAX,
+};
+#define RMNET_SHS_GENL_ATTR_MAX (__RMNET_SHS_GENL_ATTR_MAX - 1)
+
+struct rmnet_shs_wq_sugg_info {
+	uint32_t hash_to_move;
+	uint32_t sugg_type;
+	uint16_t cur_cpu;
+	uint16_t dest_cpu;
+};
+
+struct rmnet_shs_wq_seg_info {
+	uint32_t hash_to_set;
+	uint32_t segment_enable;
+};
+
+/* Function Prototypes */
+int rmnet_shs_genl_dma_init(struct sk_buff *skb_2, struct genl_info *info);
+int rmnet_shs_genl_try_to_move_flow(struct sk_buff *skb_2, struct genl_info *info);
+int rmnet_shs_genl_set_flow_segmentation(struct sk_buff *skb_2, struct genl_info *info);
+int rmnet_shs_genl_mem_sync(struct sk_buff *skb_2, struct genl_info *info);
+
+int rmnet_shs_genl_send_int_to_userspace(struct genl_info *info, int val);
+
+int rmnet_shs_genl_send_int_to_userspace_no_info(int val);
+
+int rmnet_shs_genl_send_msg_to_userspace(void);
+
+int rmnet_shs_wq_genl_init(void);
+
+int rmnet_shs_wq_genl_deinit(void);
+
+#endif /*_RMNET_SHS_WQ_GENL_H_*/
diff --git a/drivers/rmnet/shs/rmnet_shs_wq_mem.c b/drivers/rmnet/shs/rmnet_shs_wq_mem.c
new file mode 100644
index 0000000..e80d424
--- /dev/null
+++ b/drivers/rmnet/shs/rmnet_shs_wq_mem.c
@@ -0,0 +1,626 @@
+/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data Smart Hash Workqueue Generic Netlink Functions
+ *
+ */
+
+#include "rmnet_shs_wq_mem.h"
+#include <linux/proc_fs.h>
+
+MODULE_LICENSE("GPL v2");
+
+struct proc_dir_entry *shs_proc_dir;
+
+/* Fixed arrays to copy to userspace over netlink */
+struct rmnet_shs_wq_cpu_cap_usr_s rmnet_shs_wq_cap_list_usr[MAX_CPUS];
+struct rmnet_shs_wq_gflows_usr_s rmnet_shs_wq_gflows_usr[RMNET_SHS_MAX_USRFLOWS];
+struct rmnet_shs_wq_ssflows_usr_s rmnet_shs_wq_ssflows_usr[RMNET_SHS_MAX_USRFLOWS];
+
+struct list_head gflows   = LIST_HEAD_INIT(gflows);   /* gold flows */
+struct list_head ssflows  = LIST_HEAD_INIT(ssflows);  /* slow start flows */
+struct list_head cpu_caps = LIST_HEAD_INIT(cpu_caps); /* capacities */
+
+struct rmnet_shs_mmap_info *cap_shared;
+struct rmnet_shs_mmap_info *gflow_shared;
+struct rmnet_shs_mmap_info *ssflow_shared;
+
+/* Static Functions and Definitions */
+static void rmnet_shs_vm_open(struct vm_area_struct *vma)
+{
+	return;
+}
+
+static void rmnet_shs_vm_close(struct vm_area_struct *vma)
+{
+	return;
+}
+
+static int rmnet_shs_vm_fault(struct vm_fault *vmf)
+{
+	struct page *page = NULL;
+	struct rmnet_shs_mmap_info *info;
+
+
+	info = (struct rmnet_shs_mmap_info *) vmf->vma->vm_private_data;
+	if (info->data) {
+		page = virt_to_page(info->data);
+		get_page(page);
+		vmf->page = page;
+	}
+
+	return 0;
+}
+
+static const struct vm_operations_struct rmnet_shs_vm_ops = {
+	.close = rmnet_shs_vm_close,
+	.open = rmnet_shs_vm_open,
+	.fault = rmnet_shs_vm_fault,
+};
+
+static int rmnet_shs_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	vma->vm_ops = &rmnet_shs_vm_ops;
+	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+	vma->vm_private_data = filp->private_data;
+
+	return 0;
+}
+
+static int rmnet_shs_open_caps(struct inode *inode, struct file *filp)
+{
+	struct rmnet_shs_mmap_info *info;
+
+	rm_err("%s", "SHS_MEM: rmnet_shs_open - entry\n");
+	if (!cap_shared) {
+		info = kzalloc(sizeof(struct rmnet_shs_mmap_info), GFP_KERNEL);
+		if (!info) {
+			rm_err("%s", "SHS_MEM: rmnet_shs_open - FAILED\n");
+			return -ENOMEM;
+		}
+		info->data = (char *)get_zeroed_page(GFP_KERNEL);
+		cap_shared = info;
+		rm_err("SHS_MEM: virt_to_phys = 0x%llx cap_shared = 0x%llx\n",
+		       (unsigned long long)virt_to_phys((void *)info),
+		       (unsigned long long)virt_to_phys((void *)cap_shared));
+	}
+
+	filp->private_data = cap_shared;
+
+	rm_err("%s", "SHS_MEM: rmnet_shs_open - OK\n");
+
+	return 0;
+}
+
+static int rmnet_shs_open_g_flows(struct inode *inode, struct file *filp)
+{
+	struct rmnet_shs_mmap_info *info;
+
+	rm_err("%s", "SHS_MEM: rmnet_shs_open g_flows - entry\n");
+	if (!gflow_shared) {
+		info = kzalloc(sizeof(struct rmnet_shs_mmap_info), GFP_KERNEL);
+		if (!info) {
+			rm_err("%s", "SHS_MEM: rmnet_shs_open - FAILED\n");
+			return -ENOMEM;
+		}
+		info->data = (char *)get_zeroed_page(GFP_KERNEL);
+		gflow_shared = info;
+		rm_err("SHS_MEM: virt_to_phys = 0x%llx gflow_shared = 0x%llx\n",
+		       (unsigned long long)virt_to_phys((void *)info),
+		       (unsigned long long)virt_to_phys((void *)gflow_shared));
+	}
+	filp->private_data = gflow_shared;
+	return 0;
+}
+
+static int rmnet_shs_open_ss_flows(struct inode *inode, struct file *filp)
+{
+	struct rmnet_shs_mmap_info *info;
+
+	rm_err("%s", "SHS_MEM: rmnet_shs_open ss_flows - entry\n");
+	if (!ssflow_shared) {
+		info = kzalloc(sizeof(struct rmnet_shs_mmap_info), GFP_KERNEL);
+		if (!info) {
+			rm_err("%s", "SHS_MEM: rmnet_shs_open - FAILED\n");
+			return -ENOMEM;
+		}
+		info->data = (char *)get_zeroed_page(GFP_KERNEL);
+		ssflow_shared = info;
+		rm_err("SHS_MEM: virt_to_phys = 0x%llx ssflow_shared = 0x%llx\n",
+		       (unsigned long long)virt_to_phys((void *)info),
+		       (unsigned long long)virt_to_phys((void *)ssflow_shared));
+	}
+	filp->private_data = ssflow_shared;
+	return 0;
+}
+
+static ssize_t rmnet_shs_read(struct file *filp, char __user *buf, size_t len, loff_t *off)
+{
+	struct rmnet_shs_mmap_info *info;
+	int ret;
+
+	rm_err("%s", "SHS_MEM: rmnet_shs_read - entry\n");
+	info = filp->private_data;
+	ret = min_t(size_t, len, RMNET_SHS_BUFFER_SIZE);
+	if (copy_to_user(buf, info->data, ret))
+		ret = -EFAULT;
+
+	return 0;
+}
+
+static ssize_t rmnet_shs_write(struct file *filp, const char __user *buf, size_t len, loff_t *off)
+{
+	struct rmnet_shs_mmap_info *info;
+	int ret;
+
+	rm_err("%s", "SHS_MEM: rmnet_shs_write - entry\n");
+	info = filp->private_data;
+	ret = min_t(size_t, len, RMNET_SHS_BUFFER_SIZE);
+	if (copy_from_user(info->data, buf, ret))
+		return -EFAULT;
+	else
+		return len;
+}
+
+static int rmnet_shs_release_caps(struct inode *inode, struct file *filp)
+{
+	struct rmnet_shs_mmap_info *info;
+
+	rm_err("%s", "SHS_MEM: rmnet_shs_release - entry\n");
+	if (cap_shared) {
+		info = filp->private_data;
+		cap_shared = NULL;
+		free_page((unsigned long)info->data);
+		kfree(info);
+		filp->private_data = NULL;
+	}
+	return 0;
+}
+
+static int rmnet_shs_release_g_flows(struct inode *inode, struct file *filp)
+{
+	struct rmnet_shs_mmap_info *info;
+
+	rm_err("%s", "SHS_MEM: rmnet_shs_release - entry\n");
+	if (gflow_shared) {
+		info = filp->private_data;
+		gflow_shared = NULL;
+		free_page((unsigned long)info->data);
+		kfree(info);
+		filp->private_data = NULL;
+	}
+	return 0;
+}
+
+static int rmnet_shs_release_ss_flows(struct inode *inode, struct file *filp)
+{
+	struct rmnet_shs_mmap_info *info;
+
+	rm_err("%s", "SHS_MEM: rmnet_shs_release - entry\n");
+	if (ssflow_shared) {
+		info = filp->private_data;
+		ssflow_shared = NULL;
+		free_page((unsigned long)info->data);
+		kfree(info);
+		filp->private_data = NULL;
+	}
+	return 0;
+}
+
+static const struct file_operations rmnet_shs_caps_fops = {
+	.owner   = THIS_MODULE,
+	.mmap    = rmnet_shs_mmap,
+	.open    = rmnet_shs_open_caps,
+	.release = rmnet_shs_release_caps,
+	.read    = rmnet_shs_read,
+	.write   = rmnet_shs_write,
+};
+
+static const struct file_operations rmnet_shs_g_flows_fops = {
+	.owner   = THIS_MODULE,
+	.mmap    = rmnet_shs_mmap,
+	.open    = rmnet_shs_open_g_flows,
+	.release = rmnet_shs_release_g_flows,
+	.read    = rmnet_shs_read,
+	.write   = rmnet_shs_write,
+};
+
+static const struct file_operations rmnet_shs_ss_flows_fops = {
+	.owner   = THIS_MODULE,
+	.mmap    = rmnet_shs_mmap,
+	.open    = rmnet_shs_open_ss_flows,
+	.release = rmnet_shs_release_ss_flows,
+	.read    = rmnet_shs_read,
+	.write   = rmnet_shs_write,
+};
+
+
+/* Global Functions */
+/* Add a flow to the slow start flow list */
+void rmnet_shs_wq_ssflow_list_add(struct rmnet_shs_wq_hstat_s *hnode,
+				 struct list_head *ss_flows)
+{
+	struct rmnet_shs_wq_ss_flow_s *ssflow_node;
+
+	if (!hnode || !ss_flows) {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+		return;
+	}
+
+	ssflow_node = kzalloc(sizeof(*ssflow_node), GFP_ATOMIC);
+	if (ssflow_node != NULL) {
+		ssflow_node->avg_pps = hnode->avg_pps;
+		ssflow_node->cpu_num = hnode->current_cpu;
+		ssflow_node->hash = hnode->hash;
+		ssflow_node->rx_pps = hnode->rx_pps;
+		ssflow_node->rx_bps = hnode->rx_bps;
+
+		list_add(&ssflow_node->ssflow_list, ss_flows);
+	} else {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_NODE_MALLOC_ERR]++;
+	}
+}
+
+/* Clean up slow start flow list */
+void rmnet_shs_wq_cleanup_ss_flow_list(struct list_head *ss_flows)
+{
+	struct rmnet_shs_wq_ss_flow_s *ssflow_node;
+	struct list_head *ptr, *next;
+
+	if (!ss_flows) {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+		return;
+	}
+
+	list_for_each_safe(ptr, next, ss_flows) {
+		ssflow_node = list_entry(ptr,
+					struct rmnet_shs_wq_ss_flow_s,
+					ssflow_list);
+		if (!ssflow_node)
+			continue;
+
+		list_del_init(&ssflow_node->ssflow_list);
+		kfree(ssflow_node);
+	}
+}
+
+/* Add a flow to the gold flow list */
+void rmnet_shs_wq_gflow_list_add(struct rmnet_shs_wq_hstat_s *hnode,
+				 struct list_head *gold_flows)
+{
+	struct rmnet_shs_wq_gold_flow_s *gflow_node;
+
+	if (!hnode || !gold_flows) {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+		return;
+	}
+
+	if (!rmnet_shs_is_lpwr_cpu(hnode->current_cpu)) {
+		gflow_node = kzalloc(sizeof(*gflow_node), GFP_ATOMIC);
+		if (gflow_node != NULL) {
+			gflow_node->avg_pps = hnode->avg_pps;
+			gflow_node->cpu_num = hnode->current_cpu;
+			gflow_node->hash = hnode->hash;
+			gflow_node->rx_pps = hnode->rx_pps;
+
+			list_add(&gflow_node->gflow_list, gold_flows);
+		} else {
+			rmnet_shs_crit_err[RMNET_SHS_WQ_NODE_MALLOC_ERR]++;
+		}
+	}
+}
+
+/* Clean up gold flow list */
+void rmnet_shs_wq_cleanup_gold_flow_list(struct list_head *gold_flows)
+{
+	struct rmnet_shs_wq_gold_flow_s *gflow_node;
+	struct list_head *ptr, *next;
+
+	if (!gold_flows) {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+		return;
+	}
+
+	list_for_each_safe(ptr, next, gold_flows) {
+		gflow_node = list_entry(ptr,
+					struct rmnet_shs_wq_gold_flow_s,
+					gflow_list);
+		if (!gflow_node)
+			continue;
+
+		list_del_init(&gflow_node->gflow_list);
+		kfree(gflow_node);
+	}
+}
+
+/* Add a cpu to the cpu capacities list */
+void rmnet_shs_wq_cpu_caps_list_add(
+				struct rmnet_shs_wq_rx_flow_s *rx_flow_tbl_p,
+				struct rmnet_shs_wq_cpu_rx_pkt_q_s *cpu_node,
+				struct list_head *cpu_caps)
+{
+	u64 pps_uthresh, pps_lthresh = 0;
+	struct rmnet_shs_wq_cpu_cap_s *cap_node;
+	int flows = 0;
+
+	if (!cpu_node || !cpu_caps) {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+		return;
+	}
+
+	flows = rx_flow_tbl_p->cpu_list[cpu_node->cpu_num].flows;
+
+	pps_uthresh = rmnet_shs_cpu_rx_max_pps_thresh[cpu_node->cpu_num];
+	pps_lthresh = rmnet_shs_cpu_rx_min_pps_thresh[cpu_node->cpu_num];
+
+	cap_node = kzalloc(sizeof(*cap_node), GFP_ATOMIC);
+	if (cap_node == NULL) {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_NODE_MALLOC_ERR]++;
+		return;
+	}
+
+	cap_node->cpu_num = cpu_node->cpu_num;
+
+	/* No flows means capacity is upper threshold */
+	if (flows <= 0) {
+		cap_node->pps_capacity = pps_uthresh;
+		cap_node->avg_pps_capacity = pps_uthresh;
+		list_add(&cap_node->cpu_cap_list, cpu_caps);
+		return;
+	}
+
+	/* Instantaneous PPS capacity */
+	if (cpu_node->rx_pps < pps_uthresh) {
+		cap_node->pps_capacity =
+			pps_uthresh - cpu_node->rx_pps;
+	} else {
+		cap_node->pps_capacity = 0;
+	}
+
+	/* Average PPS capacity */
+	if (cpu_node->avg_pps < pps_uthresh) {
+		cap_node->avg_pps_capacity =
+			pps_uthresh - cpu_node->avg_pps;
+	} else {
+		cap_node->avg_pps_capacity = 0;
+	}
+
+	list_add(&cap_node->cpu_cap_list, cpu_caps);
+}
+
+/* Clean up cpu capacities list */
+/* Can reuse this memory since num cpus doesnt change */
+void rmnet_shs_wq_cleanup_cpu_caps_list(struct list_head *cpu_caps)
+{
+	struct rmnet_shs_wq_cpu_cap_s *cap_node;
+	struct list_head *ptr, *next;
+
+	if (!cpu_caps) {
+		rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+		return;
+	}
+
+	list_for_each_safe(ptr, next, cpu_caps) {
+		cap_node = list_entry(ptr,
+					struct rmnet_shs_wq_cpu_cap_s,
+					cpu_cap_list);
+		if (!cap_node)
+			continue;
+
+		list_del_init(&cap_node->cpu_cap_list);
+		kfree(cap_node);
+	}
+}
+
+/* Converts the kernel linked list to an array. Then memcpy to shared mem
+ * > The cpu capacity linked list is sorted: highest capacity first
+ *     | cap_0 | cap_1 | cap_2 | ... | cap_7 |
+ */
+void rmnet_shs_wq_mem_update_cached_cpu_caps(struct list_head *cpu_caps)
+{
+	struct rmnet_shs_wq_cpu_cap_s *cap_node;
+
+	uint16_t idx = 0;
+
+	if (!cpu_caps) {
+		rm_err("%s", "SHS_SCAPS: CPU Capacities List is NULL");
+		return;
+	}
+
+	rm_err("%s", "SHS_SCAPS: Sorted CPU Capacities:");
+	list_for_each_entry(cap_node, cpu_caps, cpu_cap_list) {
+		if (!cap_node)
+			continue;
+
+		if (idx >= MAX_CPUS)
+			break;
+
+		rm_err("SHS_SCAPS: > cpu[%d] with pps capacity = %llu | "
+		       "avg pps cap = %llu",
+		       cap_node->cpu_num, cap_node->pps_capacity,
+		       cap_node->avg_pps_capacity);
+
+		rmnet_shs_wq_cap_list_usr[idx].avg_pps_capacity = cap_node->avg_pps_capacity;
+		rmnet_shs_wq_cap_list_usr[idx].pps_capacity = cap_node->pps_capacity;
+		rmnet_shs_wq_cap_list_usr[idx].cpu_num = cap_node->cpu_num;
+		idx += 1;
+	}
+
+	rm_err("SHS_MEM: cap_dma_ptr = 0x%llx addr = 0x%pK\n",
+	       (unsigned long long)virt_to_phys((void *)cap_shared), cap_shared);
+	if (!cap_shared) {
+		rm_err("%s", "SHS_WRITE: cap_shared is NULL");
+		return;
+	}
+	memcpy((char *) cap_shared->data,
+	       (void *) &rmnet_shs_wq_cap_list_usr[0],
+	       sizeof(rmnet_shs_wq_cap_list_usr));
+}
+
+/* Convert the kernel linked list of gold flows into an array that can be
+ * memcpy'd to shared memory.
+ * > Add number of flows at the beginning of the shared memory address.
+ * > After memcpy is complete, send userspace a message indicating that memcpy
+ *   has just completed.
+ * > The gold flow list is sorted: heaviest gold flow is first
+ *    | num_flows | flow_1 | flow_2 | ... | flow_n | ... |
+ *    |  16 bits  | ...                                  |
+ */
+void rmnet_shs_wq_mem_update_cached_sorted_gold_flows(struct list_head *gold_flows)
+{
+	struct rmnet_shs_wq_gold_flow_s *gflow_node;
+	uint16_t idx = 0;
+	int num_gold_flows = 0;
+
+	if (!gold_flows) {
+		rm_err("%s", "SHS_SGOLD: Gold Flows List is NULL");
+		return;
+	}
+
+	rm_err("%s", "SHS_SGOLD: List of sorted gold flows:");
+	list_for_each_entry(gflow_node, gold_flows, gflow_list) {
+		if (!gflow_node)
+			continue;
+
+		if (gflow_node->rx_pps == 0) {
+			continue;
+		}
+
+		if (idx >= RMNET_SHS_MAX_USRFLOWS) {
+			break;
+		}
+
+		rm_err("SHS_SGOLD: > flow 0x%x with pps %llu on cpu[%d]",
+		       gflow_node->hash, gflow_node->rx_pps,
+		       gflow_node->cpu_num);
+		num_gold_flows += 1;
+
+
+		/* Update the cached gold flow list */
+		rmnet_shs_wq_gflows_usr[idx].cpu_num = gflow_node->cpu_num;
+		rmnet_shs_wq_gflows_usr[idx].hash = gflow_node->hash;
+		rmnet_shs_wq_gflows_usr[idx].avg_pps = gflow_node->avg_pps;
+		rmnet_shs_wq_gflows_usr[idx].rx_pps = gflow_node->rx_pps;
+		idx += 1;
+	}
+
+	rm_err("SHS_MEM: gflow_dma_ptr = 0x%llx addr = 0x%pK\n",
+	       (unsigned long long)virt_to_phys((void *)gflow_shared),
+	       gflow_shared);
+
+	if (!gflow_shared) {
+		rm_err("%s", "SHS_WRITE: gflow_shared is NULL");
+		return;
+	}
+
+	rm_err("SHS_SGOLD: num gold flows = %u\n", idx);
+
+	/* Copy num gold flows into first 2 bytes,
+	   then copy in the cached gold flow array */
+	memcpy(((char *)gflow_shared->data), &idx, sizeof(idx));
+	memcpy(((char *)gflow_shared->data + sizeof(uint16_t)),
+	       (void *) &rmnet_shs_wq_gflows_usr[0],
+	       sizeof(rmnet_shs_wq_gflows_usr));
+}
+
+/* Convert the kernel linked list of slow start tcp flows into an array that can be
+ * memcpy'd to shared memory.
+ * > Add number of flows at the beginning of the shared memory address.
+ * > After memcpy is complete, send userspace a message indicating that memcpy
+ *   has just completed.
+ * > The ss flow list is sorted: heaviest ss flow is first
+ *    | num_flows | flow_1 | flow_2 | ... | flow_n | ... |
+ *    |  16 bits  | ...                                  |
+ */
+void rmnet_shs_wq_mem_update_cached_sorted_ss_flows(struct list_head *ss_flows)
+{
+	struct rmnet_shs_wq_ss_flow_s *ssflow_node;
+	uint16_t idx = 0;
+	int num_ss_flows = 0;
+
+	if (!ss_flows) {
+		rm_err("%s", "SHS_SLOW: SS Flows List is NULL");
+		return;
+	}
+
+	rm_err("%s", "SHS_SLOW: List of sorted ss flows:");
+	list_for_each_entry(ssflow_node, ss_flows, ssflow_list) {
+		if (!ssflow_node)
+			continue;
+
+
+		if (ssflow_node->rx_pps == 0) {
+			continue;
+		}
+
+		if (idx >= RMNET_SHS_MAX_USRFLOWS) {
+			break;
+		}
+
+		rm_err("SHS_SLOW: > flow 0x%x with pps %llu on cpu[%d]",
+		       ssflow_node->hash, ssflow_node->rx_pps,
+		       ssflow_node->cpu_num);
+		num_ss_flows += 1;
+
+		/* Update the cached ss flow list */
+		rmnet_shs_wq_ssflows_usr[idx].cpu_num = ssflow_node->cpu_num;
+		rmnet_shs_wq_ssflows_usr[idx].hash = ssflow_node->hash;
+		rmnet_shs_wq_ssflows_usr[idx].avg_pps = ssflow_node->avg_pps;
+		rmnet_shs_wq_ssflows_usr[idx].rx_pps = ssflow_node->rx_pps;
+		rmnet_shs_wq_ssflows_usr[idx].rx_bps = ssflow_node->rx_bps;
+		idx += 1;
+	}
+
+	rm_err("SHS_MEM: ssflow_dma_ptr = 0x%llx addr = 0x%pK\n",
+	       (unsigned long long)virt_to_phys((void *)ssflow_shared),
+	       ssflow_shared);
+
+	if (!ssflow_shared) {
+		rm_err("%s", "SHS_WRITE: ssflow_shared is NULL");
+		return;
+	}
+
+	rm_err("SHS_SLOW: num ss flows = %u\n", idx);
+
+	/* Copy num ss flows into first 2 bytes,
+	   then copy in the cached gold flow array */
+	memcpy(((char *)ssflow_shared->data), &idx, sizeof(idx));
+	memcpy(((char *)ssflow_shared->data + sizeof(uint16_t)),
+	       (void *) &rmnet_shs_wq_ssflows_usr[0],
+	       sizeof(rmnet_shs_wq_ssflows_usr));
+}
+
+/* Creates the proc folder and files for shs shared memory */
+void rmnet_shs_wq_mem_init(void)
+{
+	shs_proc_dir = proc_mkdir("shs", NULL);
+
+	proc_create(RMNET_SHS_PROC_CAPS, 0644, shs_proc_dir, &rmnet_shs_caps_fops);
+	proc_create(RMNET_SHS_PROC_G_FLOWS, 0644, shs_proc_dir, &rmnet_shs_g_flows_fops);
+	proc_create(RMNET_SHS_PROC_SS_FLOWS, 0644, shs_proc_dir, &rmnet_shs_ss_flows_fops);
+
+	cap_shared = NULL;
+	gflow_shared = NULL;
+	ssflow_shared = NULL;
+}
+
+/* Remove shs files and folders from proc fs */
+void rmnet_shs_wq_mem_deinit(void)
+{
+	remove_proc_entry(RMNET_SHS_PROC_CAPS, shs_proc_dir);
+	remove_proc_entry(RMNET_SHS_PROC_G_FLOWS, shs_proc_dir);
+	remove_proc_entry(RMNET_SHS_PROC_SS_FLOWS, shs_proc_dir);
+	remove_proc_entry(RMNET_SHS_PROC_DIR, NULL);
+
+	cap_shared = NULL;
+	gflow_shared = NULL;
+	ssflow_shared = NULL;
+}
diff --git a/drivers/rmnet/shs/rmnet_shs_wq_mem.h b/drivers/rmnet/shs/rmnet_shs_wq_mem.h
new file mode 100644
index 0000000..2e5e889
--- /dev/null
+++ b/drivers/rmnet/shs/rmnet_shs_wq_mem.h
@@ -0,0 +1,89 @@
+/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data Smart Hash stamping solution
+ *
+ */
+
+#ifndef _RMNET_SHS_WQ_MEM_H_
+#define _RMNET_SHS_WQ_MEM_H_
+
+#include "rmnet_shs.h"
+
+/* Shared memory files */
+#define RMNET_SHS_PROC_DIR      "shs"
+#define RMNET_SHS_PROC_CAPS     "rmnet_shs_caps"
+#define RMNET_SHS_PROC_G_FLOWS  "rmnet_shs_flows"
+#define RMNET_SHS_PROC_SS_FLOWS "rmnet_shs_ss_flows"
+
+#define RMNET_SHS_MAX_USRFLOWS (128)
+
+struct __attribute__((__packed__)) rmnet_shs_wq_cpu_cap_usr_s {
+	u64 pps_capacity;
+	u64 avg_pps_capacity;
+	u64 bps_capacity;
+	u16 cpu_num;
+};
+
+struct __attribute__((__packed__)) rmnet_shs_wq_gflows_usr_s {
+	u64 rx_pps;
+	u64 avg_pps;
+	u64 rx_bps;
+	u32 hash;
+	u16 cpu_num;
+};
+
+struct __attribute__((__packed__)) rmnet_shs_wq_ssflows_usr_s {
+	u64 rx_pps;
+	u64 avg_pps;
+	u64 rx_bps;
+	u32 hash;
+	u16 cpu_num;
+};
+
+extern struct list_head gflows;
+extern struct list_head ssflows;
+extern struct list_head cpu_caps;
+
+/* Buffer size for read and write syscalls */
+enum {RMNET_SHS_BUFFER_SIZE = 4096};
+
+struct rmnet_shs_mmap_info {
+	char *data;
+};
+
+/* Function Definitions */
+
+void rmnet_shs_wq_ssflow_list_add(struct rmnet_shs_wq_hstat_s *hnode,
+				  struct list_head *ss_flows);
+void rmnet_shs_wq_gflow_list_add(struct rmnet_shs_wq_hstat_s *hnode,
+				 struct list_head *gold_flows);
+
+void rmnet_shs_wq_cleanup_gold_flow_list(struct list_head *gold_flows);
+void rmnet_shs_wq_cleanup_ss_flow_list(struct list_head *ss_flows);
+
+void rmnet_shs_wq_cpu_caps_list_add(
+				struct rmnet_shs_wq_rx_flow_s *rx_flow_tbl_p,
+				struct rmnet_shs_wq_cpu_rx_pkt_q_s *cpu_node,
+				struct list_head *cpu_caps);
+
+void rmnet_shs_wq_cleanup_cpu_caps_list(struct list_head *cpu_caps);
+
+void rmnet_shs_wq_mem_update_cached_cpu_caps(struct list_head *cpu_caps);
+
+void rmnet_shs_wq_mem_update_cached_sorted_gold_flows(struct list_head *gold_flows);
+void rmnet_shs_wq_mem_update_cached_sorted_ss_flows(struct list_head *ss_flows);
+
+void rmnet_shs_wq_mem_init(void);
+
+void rmnet_shs_wq_mem_deinit(void);
+
+#endif /*_RMNET_SHS_WQ_GENL_H_*/