Update kernel to ab/13436313
Change-Id: I313018e007a2ceae70980e9ac90204cb4eeb8c9e
diff --git a/BUILD.bazel b/BUILD.bazel
new file mode 100644
index 0000000..21a236f
--- /dev/null
+++ b/BUILD.bazel
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+load("//build/kernel/kleaf:kernel.bzl", "kernel_module")
+
+filegroup(
+ name = "cpif.kconfig",
+ srcs = [
+ "Kconfig",
+ ],
+ visibility = [
+ "//private/devices/google:__subpackages__",
+ ],
+)
+
+kernel_module(
+ name = "cpif",
+ srcs = glob([
+ "**/*.c",
+ "**/*.h",
+ ]) + [
+ "Kbuild",
+ "//private/google-modules/bms/misc:headers",
+ "//private/google-modules/soc/gs:gs_soc_headers",
+ ],
+ outs = [
+ "cp_pmic.ko",
+ "cp_thermal_zone.ko",
+ "cpif.ko",
+ "cpif_page.ko",
+ "shm_ipc.ko",
+ ],
+ kernel_build = "//private/devices/google/common:kernel",
+ visibility = [
+ "//private/devices/google:__subpackages__",
+ "//private/google-modules:__subpackages__",
+ ],
+ deps = [
+ "//private/google-modules/bms/misc:bms-misc",
+ "//private/google-modules/soc/gs:gs_soc_module",
+ ],
+)
diff --git a/Kbuild b/Kbuild
new file mode 100644
index 0000000..401814c
--- /dev/null
+++ b/Kbuild
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile of cpif
+
+ccflags-y += -Wformat
+ccflags-y += -Wformat-zero-length
+ccflags-y += -DCPIF_WAKEPKT_SET_MARK=0x80000000
+ccflags-y += -DCONFIG_OPTION_REGION=\"$(PROJECT_REGION)\"
+# This gives the drivers access to the logbuffer interface
+subdir-ccflags-y += -I$(KERNEL_SRC)/../private/google-modules/bms
+subdir-ccflags-y += -I$(srctree)/$(src)
+
+obj-$(CONFIG_MCU_IPC) += mcu_ipc.o
+obj-$(CONFIG_SHM_IPC) += shm_ipc.o
+obj-$(CONFIG_BOOT_DEVICE_SPI) += boot_device_spi.o
+
+obj-$(CONFIG_EXYNOS_DIT) += dit/
+
+obj-$(CONFIG_CPIF_PAGE_RECYCLING) += cpif_page.o
+
+obj-$(CONFIG_CPIF_DIRECT_DM) += direct_dm.o
+
+obj-$(CONFIG_CPIF_VENDOR_HOOK) += hook.o
+
+obj-$(CONFIG_EXYNOS_MODEM_IF) += cpif.o
+cpif-y += modem_main.o modem_variation.o
+cpif-y += modem_io_device.o net_io_device.o bootdump_io_device.o ipc_io_device.o modem_toe_device.o
+cpif-y += modem_utils.o modem_dump.o
+cpif-y += link_device.o link_device_memory_flow_control.o
+cpif-y += link_device_memory_debug.o
+cpif-y += link_device_memory_snapshot.o link_device_memory_legacy.o
+
+cpif-$(CONFIG_LINK_DEVICE_WITH_SBD_ARCH) += link_device_memory_sbd.o
+
+cpif-$(CONFIG_EXYNOS_CPIF_IOMMU) += cpif_netrx_mng.o cpif_vmapper.o
+
+cpif-$(CONFIG_LINK_DEVICE_PCIE) += s51xx_pcie.o
+cpif-$(CONFIG_LINK_DEVICE_PCIE_IOMMU) += link_device_pcie_iommu.o
+
+cpif-$(CONFIG_SEC_MODEM_S5000AP) += modem_ctrl_s5000ap.o modem_ctrl.o
+cpif-$(CONFIG_SEC_MODEM_S5100) += modem_ctrl_s5100.o modem_ctrl.o
+
+cpif-$(CONFIG_CP_PKTPROC) += link_rx_pktproc.o
+cpif-$(CONFIG_CP_PKTPROC_UL) += link_tx_pktproc.o
+
+cpif-$(CONFIG_CP_BTL) += cp_btl.o
+
+cpif-$(CONFIG_CPIF_TP_MONITOR) += cpif_tp_monitor.o
+
+cpif-$(CONFIG_MODEM_IF_LEGACY_QOS) += cpif_qos_info.o
+
+obj-$(CONFIG_CP_THERMAL) += cp_thermal_zone.o
+
+obj-$(CONFIG_CP_PMIC) += cp_pmic.o
+
diff --git a/Kconfig b/Kconfig
new file mode 100644
index 0000000..0618b61
--- /dev/null
+++ b/Kconfig
@@ -0,0 +1,234 @@
+# SPDX-License-Identifier: GPL-2.0
+menuconfig EXYNOS_MODEM_IF
+ tristate "Samsung Mobile CP Interface"
+ default n
+ depends on S5910
+ depends on GOOGLE_MODEMCTL
+ help
+ Samsung Dual Modem Interface Driver
+
+if EXYNOS_MODEM_IF
+
+config SEC_MODEM_S5000AP
+ tristate "Exynos ModAP S5xxxAP"
+ select LINK_DEVICE_SHMEM
+ select CP_SECURE_BOOT
+ select MCU_IPC
+ select SHM_IPC
+ select CP_PMUCAL
+ select CPIF_PAGE_RECYCLING
+ default n
+
+config SEC_MODEM_S5100
+ tristate "Exynos S51xx"
+ select LINK_DEVICE_PCIE
+ select CPIF_PAGE_RECYCLING
+ default n
+
+menu "Configuration Description"
+config MCU_IPC
+ tristate "MCU IPC Support"
+ default n
+ help
+ This enables MCU_IPC driver to control the MCU_IPC Device.
+ MCU_IPC is the Mailbox which has 16 interrupts for TX/RX each
+ and 256 bytes memory for communicating messages.
+ AP and CP can share the messages through this device.
+
+config SHM_IPC
+ tristate "Shared Memory for IPC support"
+ default n
+ help
+ This enables SHM_IPC driver to control the Shared memory
+ for AP-CP Interface.
+
+config BOOT_DEVICE_SPI
+ tristate "boot device: SPI"
+ default n
+
+config LINK_DEVICE_WITH_SBD_ARCH
+ tristate "Link device with the SBD architecture from MIPI-LLI"
+ default n
+
+config CP_PKTPROC
+ tristate "Support packet processor"
+ default n
+ help
+ Packet processor
+
+config CP_PKTPROC_UL
+ tristate "Support packet processor for UL"
+ default n
+ help
+ Packet processor UL
+
+config LINK_DEVICE_SHMEM
+ tristate "Real system-level shared-memory on a system bus"
+ default n
+
+config LINK_DEVICE_PCIE
+ tristate "modem driver link device PCIe"
+ default n
+
+config CP_SECURE_BOOT
+ tristate "Support CP Secure Boot"
+ default n
+
+config GPIO_DS_DETECT
+ tristate "Support GPIO DS Detect"
+ default n
+
+config CP_UART_NOTI
+ tristate "Support CP UART notification API"
+ default n
+
+config PMU_UART_SWITCH
+ tristate "Support AP <-> CP UART switch by PMU"
+ depends on CP_UART_NOTI
+ default n
+
+config CP_BTL
+ tristate "Support CP BTL(Back Trace Log) feature"
+ default n
+
+config HW_REV_DETECT
+ tristate "Support HW REV Detect"
+ default n
+
+config MODEM_IF_QOS
+ tristate "Implement QoS"
+ default n
+ help
+ This enables QoS
+
+config MODEM_IF_LEGACY_QOS
+ tristate "Implement QoS for legacy buffer"
+ select MODEM_IF_QOS
+ default n
+ help
+ This enables QoS for legacy buffer model
+
+config CPIF_AP_SUSPEND_DURING_VOICE_CALL
+ bool "control wake_lock by voice call start/end notification"
+ depends on LINK_DEVICE_PCIE
+ default n
+ help
+ This enables AP suspend during PCM over PCIE
+
+config CPIF_TP_MONITOR
+ tristate "Support CP network throughput monitor"
+ default n
+ help
+ This enables CP network throughput monitor
+
+config LINK_DEVICE_PCIE_SOC_EXYNOS
+ bool "Support for Exynos SOC PCIE"
+ depends on LINK_DEVICE_PCIE
+ default n
+ help
+ PCIE support on Exynos SOC
+
+config LINK_DEVICE_PCIE_IOCC
+ bool "Use PCIE IOCC"
+ depends on LINK_DEVICE_PCIE
+ default n
+ help
+ This enables PCIe IOCC
+
+config LINK_DEVICE_PCIE_IOMMU
+ bool "Use PCIE IOMMU"
+ depends on LINK_DEVICE_PCIE && EXYNOS_PCIE_IOMMU
+ default n
+ help
+ This enables PCIe IOMMU
+
+config CPIF_CHECK_SJTAG_STATUS
+ tristate "Check secure JTAG status to disable CP memory dump"
+ default n
+ help
+ Check secure JTAG status to disable CP memory dump
+
+config CP_LCD_NOTIFIER
+ tristate "Enable LCD notifier for CP"
+ default n
+ help
+ This enables LCD notifier for CP
+
+config LINK_DEVICE_PCIE_GPIO_WA
+ bool "Workaround for discarded GPIOs"
+ depends on LINK_DEVICE_PCIE
+ default n
+ help
+ Use a GPIO for the other purpose instead
+
+config CP_WRESET_WA
+ bool "CP warm reset WA due to WRSTBI disabled"
+ depends on LINK_DEVICE_PCIE
+ default n
+ help
+ CP warm reset WA
+
+config EXYNOS_DIT
+ tristate "Enable DIT"
+ default n
+ help
+ Enable DIT (Direct Internet Packet Transfer)
+
+config EXYNOS_DIT_VERSION
+ hex "DIT version"
+ depends on EXYNOS_DIT
+ default 0x02010000
+ help
+ DIT version
+
+config CPIF_DIRECT_DM
+ tristate "Enable direct dm path"
+ default n
+ help
+ This enables direct dm path
+
+config CH_EXTENSION
+ tristate "Enable Channel Extension"
+ default n
+ help
+ Enable Channel Extension
+
+config CPIF_VENDOR_HOOK
+ tristate "Enable Android vendor hook"
+ default n
+ help
+ This enables Android vendor hook
+
+config CPIF_PAGE_RECYCLING
+ tristate "Enable page recycling"
+ default n
+ help
+ This enables page recycling to improve alloc overhead
+
+config CP_PKTPROC_CLAT
+ tristate "Enable HW PKTPROC CLAT"
+ default n
+ help
+ Enable HW pktproc clat
+
+config CP_PKTPROC_LRO
+ tristate "Enable HW PKTPROC LRO"
+ default n
+ help
+ Enable HW pktproc LRO
+
+config CP_THERMAL
+ tristate "Enable CP thermal zones"
+ depends on THERMAL
+ help
+ Enables support for reporting CP temperature sensor data to
+ kernel thermal framework.
+
+config CP_PMIC
+ tristate "Enable CP PMIC"
+ default n
+ help
+ Enable CP PMIC module
+
+endmenu
+endif
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..e79e4e6
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,15 @@
+KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build
+M ?= $(shell pwd)
+
+KCFLAGS += -I$(KERNEL_SRC)/../private/google-modules/soc/gs/include
+KCFLAGS += -I$(KERNEL_SRC)/../private/google-modules/soc/gs/include/uapi
+
+EXTRA_SYMBOLS += $(O)/../private/google-modules/soc/gs/Module.symvers
+EXTRA_SYMBOLS += $(O)/../private/google-modules/bms/misc/Module.symvers
+
+EXTRA_CFLAGS+="-Wno-missing-prototypes"
+
+modules modules_install clean:
+ $(MAKE) -C $(KERNEL_SRC) M=$(M) W=1 \
+ $(KBUILD_OPTIONS) EXTRA_CFLAGS="$(EXTRA_CFLAGS)" \
+ KBUILD_EXTRA_SYMBOLS="$(EXTRA_SYMBOLS)" $(@)
diff --git a/boot_device_spi.c b/boot_device_spi.c
new file mode 100644
index 0000000..34ea169
--- /dev/null
+++ b/boot_device_spi.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2014-2019, Samsung Electronics.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/spi/spi.h>
+
+#include "modem_prj.h"
+#include "modem_utils.h"
+#include "link_device_memory.h"
+#include "boot_device_spi.h"
+
+#define MAX_IMAGE_SIZE SZ_128K
+#define MAX_SPI_DEVICE 2
+
+static int _count;
+static struct cpboot_spi _cpboot[MAX_SPI_DEVICE];
+
+/*
+ * Export functions
+ */
+int cpboot_spi_load_cp_image(struct link_device *ld, struct io_device *iod, unsigned long arg)
+{
+ int ret = 0;
+ struct mem_link_device *mld = ld_to_mem_link_device(ld);
+ struct cpboot_spi *cpboot = cpboot_spi_get_device(mld->spi_bus_num);
+ struct cp_image img;
+ char *buff = NULL;
+ struct spi_message msg;
+ struct spi_transfer xfer;
+
+ if (!cpboot || !cpboot->spi) {
+ mif_err("spi is null\n");
+ return -EPERM;
+ }
+
+ mutex_lock(&cpboot->lock);
+
+ ret = copy_from_user(&img, (const void __user *)arg, sizeof(struct cp_image));
+ if (ret) {
+ mif_err("copy_from_user() arg error:%d\n", ret);
+ goto exit;
+ }
+
+ mif_info("size:%d bus_num:%d\n", img.size, cpboot->spi->controller->bus_num);
+ if ((img.size == 0) || (img.size > MAX_IMAGE_SIZE)) {
+ mif_err("size error:%d\n", img.size);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* MUST not enable dma-mode on SPI
+ * dma-mode does not support non-contiguous buffer
+ */
+ buff = vzalloc(img.size);
+ if (!buff) {
+ mif_err("vzalloc(%u) error\n", img.size);
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ret = copy_from_user(buff, (const void __user *)img.binary, img.size);
+ if (ret) {
+ mif_err("copy_from_user() buff error:%d\n", ret);
+ goto exit;
+ }
+
+ memset(&xfer, 0, sizeof(struct spi_transfer));
+ xfer.len = img.size;
+ xfer.tx_buf = buff;
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+ ret = spi_sync(cpboot->spi, &msg);
+ if (ret < 0) {
+ mif_err("spi_sync() error:%d\n", ret);
+ goto exit;
+ }
+
+exit:
+ if (buff)
+ vfree(buff);
+ mutex_unlock(&cpboot->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(cpboot_spi_load_cp_image);
+
+struct cpboot_spi *cpboot_spi_get_device(int bus_num)
+{
+ int i;
+ struct cpboot_spi *cpboot = NULL;
+
+ for (i = 0; i < MAX_SPI_DEVICE; i++) {
+ if (_cpboot[i].spi && (bus_num == _cpboot[i].spi->controller->bus_num)) {
+ mif_info("Get bus_num:%d\n", bus_num);
+ cpboot = &_cpboot[i];
+ return cpboot;
+ }
+ }
+
+ mif_err("Can not get bus_num:%d\n", bus_num);
+
+ return NULL;
+}
+EXPORT_SYMBOL(cpboot_spi_get_device);
+
+/*
+ * Probe
+ */
+static int cpboot_spi_probe(struct spi_device *spi)
+{
+ int ret = 0;
+
+ mif_info("bus_num:%d count:%d\n", spi->controller->bus_num, _count);
+
+ if (_count >= MAX_SPI_DEVICE) {
+ mif_err("_count is over %d\n", MAX_SPI_DEVICE);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ mutex_init(&_cpboot[_count].lock);
+
+ spi->bits_per_word = 8;
+ if (spi_setup(spi)) {
+ mif_err("ERR! spi_setup fail\n");
+ ret = -EINVAL;
+ goto err_setup;
+ }
+ spi_set_drvdata(spi, &_cpboot[_count]);
+ _cpboot[_count].spi = spi;
+
+ _count++;
+ return 0;
+
+err_setup:
+ mutex_destroy(&_cpboot[_count].lock);
+
+err:
+ panic("CP SPI driver probe failed\n");
+ return ret;
+}
+
+static int cpboot_spi_remove(struct spi_device *spi)
+{
+ struct cpboot_spi *cpboot = spi_get_drvdata(spi);
+
+ mutex_destroy(&cpboot->lock);
+
+ return 0;
+}
+
+static const struct of_device_id cpboot_spi_dt_match[] = {
+ { .compatible = "samsung,exynos-cp-spi" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cpboot_spi_dt_match);
+
+static struct spi_driver cpboot_spi_driver = {
+ .probe = cpboot_spi_probe,
+ .remove = cpboot_spi_remove,
+ .driver = {
+ .name = "cpboot_spi",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(cpboot_spi_dt_match),
+ .suppress_bind_attrs = true,
+ },
+};
+module_spi_driver(cpboot_spi_driver);
+
+MODULE_DESCRIPTION("Exynos SPI driver to load CP bootloader");
+MODULE_LICENSE("GPL");
diff --git a/boot_device_spi.h b/boot_device_spi.h
new file mode 100644
index 0000000..c322a3b
--- /dev/null
+++ b/boot_device_spi.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019, Samsung Electronics.
+ *
+ */
+
+#ifndef __BOOT_DEVICE_SPI_H__
+#define __BOOT_DEVICE_SPI_H__
+
+#include <linux/spi/spi.h>
+
+struct cpboot_spi {
+ struct spi_device *spi;
+ struct mutex lock;
+};
+
+#if IS_ENABLED(CONFIG_BOOT_DEVICE_SPI)
+extern struct cpboot_spi *cpboot_spi_get_device(int bus_num);
+extern int cpboot_spi_load_cp_image(struct link_device *ld, struct io_device *iod,
+ unsigned long arg);
+#else
+static inline struct cpboot_spi *cpboot_spi_get_device(int bus_num) { return NULL; }
+static inline int cpboot_spi_load_cp_image(struct link_device *ld, struct io_device *iod,
+ unsigned long arg) { return 0; }
+#endif
+
+#endif /* __BOOT_DEVICE_SPI_H__ */
diff --git a/bootdump_io_device.c b/bootdump_io_device.c
new file mode 100644
index 0000000..cb1b649
--- /dev/null
+++ b/bootdump_io_device.c
@@ -0,0 +1,730 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Samsung Electronics.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/if_arp.h>
+#include <linux/ip.h>
+#include <linux/if_ether.h>
+#include <linux/etherdevice.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <trace/events/napi.h>
+#include <net/ip.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/netdevice.h>
+
+#include "modem_prj.h"
+#include "modem_utils.h"
+#include "modem_dump.h"
+
+static int bootdump_open(struct inode *inode, struct file *filp)
+{
+ struct io_device *iod = to_io_device(inode->i_cdev);
+ struct modem_shared *msd = iod->msd;
+ struct link_device *ld;
+ int ret;
+
+ filp->private_data = (void *)iod;
+
+ atomic_inc(&iod->opened);
+
+ list_for_each_entry(ld, &msd->link_dev_list, list) {
+ if (IS_CONNECTED(iod, ld) && ld->init_comm) {
+ ret = ld->init_comm(ld, iod);
+ if (ret < 0) {
+ mif_err("%s<->%s: ERR! init_comm fail(%d)\n",
+ iod->name, ld->name, ret);
+ atomic_dec(&iod->opened);
+ return ret;
+ }
+ }
+ }
+
+ mif_info("%s (opened %d) by %s\n",
+ iod->name, atomic_read(&iod->opened), current->comm);
+
+ return 0;
+}
+
+static int bootdump_release(struct inode *inode, struct file *filp)
+{
+ struct io_device *iod = (struct io_device *)filp->private_data;
+ struct modem_shared *msd = iod->msd;
+ struct link_device *ld;
+ int i;
+
+ if (atomic_dec_and_test(&iod->opened) ||
+ !strncmp(current->comm, "cbd", 3)) {
+ skb_queue_purge(&iod->sk_rx_q);
+
+ /* purge multi_frame queue */
+ for (i = 0; i < NUM_SIPC_MULTI_FRAME_IDS; i++)
+ skb_queue_purge(&iod->sk_multi_q[i]);
+ }
+
+ list_for_each_entry(ld, &msd->link_dev_list, list) {
+ if (IS_CONNECTED(iod, ld) && ld->terminate_comm)
+ ld->terminate_comm(ld, iod);
+ }
+
+ mif_info("%s (opened %d) by %s\n",
+ iod->name, atomic_read(&iod->opened), current->comm);
+
+ return 0;
+}
+
+static unsigned int bootdump_poll(struct file *filp, struct poll_table_struct *wait)
+{
+ struct io_device *iod = (struct io_device *)filp->private_data;
+ struct modem_ctl *mc;
+ struct sk_buff_head *rxq;
+ struct link_device *ld;
+
+ if (!iod)
+ return POLLERR;
+
+ mc = iod->mc;
+ rxq = &iod->sk_rx_q;
+ ld = get_current_link(iod);
+
+ if (skb_queue_empty(rxq))
+ poll_wait(filp, &iod->wq, wait);
+
+ switch (mc->phone_state) {
+ case STATE_BOOTING:
+ case STATE_ONLINE:
+ if (!skb_queue_empty(rxq))
+ return POLLIN | POLLRDNORM;
+ break;
+ case STATE_CRASH_EXIT:
+ case STATE_CRASH_RESET:
+ case STATE_NV_REBUILDING:
+ case STATE_CRASH_WATCHDOG:
+ if (iod->format == IPC_BOOT || ld->is_boot_ch(iod->ch) ||
+ iod->format == IPC_DUMP || ld->is_dump_ch(iod->ch)) {
+ if (!skb_queue_empty(rxq))
+ return POLLIN | POLLRDNORM;
+ }
+
+ mif_err_limited("%s: %s.state == %s\n", iod->name, mc->name, mc_state(mc));
+
+ if (iod->format == IPC_BOOT || ld->is_boot_ch(iod->ch))
+ return POLLHUP;
+ break;
+ case STATE_RESET:
+ mif_err_limited("%s: %s.state == %s\n", iod->name, mc->name, mc_state(mc));
+
+ if (iod->attrs & IO_ATTR_STATE_RESET_NOTI)
+ return POLLHUP;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static long bootdump_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct io_device *iod = (struct io_device *)filp->private_data;
+ struct link_device *ld = get_current_link(iod);
+ struct modem_ctl *mc = iod->mc;
+ enum modem_state p_state;
+ struct cpif_version version;
+ int ret = 0, value;
+ struct mem_link_device *mld;
+
+ switch (cmd) {
+ case IOCTL_POWER_ON:
+ if (!mc->ops.power_on) {
+ mif_err("%s: power_on is null\n", iod->name);
+ return -EINVAL;
+ }
+ mif_info("%s: IOCTL_POWER_ON\n", iod->name);
+ return mc->ops.power_on(mc);
+
+ case IOCTL_POWER_OFF:
+ if (!mc->ops.power_off) {
+ mif_err("%s: power_off is null\n", iod->name);
+ return -EINVAL;
+ }
+ mif_info("%s: IOCTL_POWER_OFF\n", iod->name);
+ return mc->ops.power_off(mc);
+
+ case IOCTL_POWER_RESET:
+ {
+ void __user *uarg = (void __user *)arg;
+ struct boot_mode mode;
+
+ mif_info("%s: IOCTL_POWER_RESET\n", iod->name);
+ ret = copy_from_user(&mode, uarg, sizeof(mode));
+ if (ret) {
+ mif_err("copy_from_user() error:%d\n", ret);
+ return ret;
+ }
+
+#if IS_ENABLED(CONFIG_CPIF_TP_MONITOR)
+ tpmon_init();
+#endif
+
+ switch (mode.idx) {
+ case CP_BOOT_MODE_NORMAL:
+ mif_info("%s: normal boot mode\n", iod->name);
+ if (!mc->ops.power_reset) {
+ mif_err("%s: power_reset is null\n", iod->name);
+ return -EINVAL;
+ }
+ ret = mc->ops.power_reset(mc);
+ break;
+
+ case CP_BOOT_MODE_DUMP:
+ mif_info("%s: dump boot mode\n", iod->name);
+ if (!mc->ops.power_reset_dump) {
+ mif_err("%s: power_reset_dump is null\n", iod->name);
+ return -EINVAL;
+ }
+ ret = mc->ops.power_reset_dump(mc, 0);
+ break;
+
+ case CP_BOOT_MODE_SILENT:
+ mif_info("%s: silent mode\n", iod->name);
+ if (!mc->ops.power_reset_dump) {
+ mif_err("%s: power_reset_dump is null\n", iod->name);
+ return -EINVAL;
+ }
+ ret = mc->ops.power_reset_dump(mc, 1);
+ break;
+
+ case CP_BOOT_MODE_DUMP_PARTIAL:
+ mif_info("%s: dump partail boot mode\n", iod->name);
+ if (!mc->ops.power_reset_partial) {
+ mif_err("%s: power_reset_partail is null\n", iod->name);
+ return -EINVAL;
+ }
+ ret = mc->ops.power_reset_partial(mc);
+ break;
+
+ case CP_BOOT_MODE_DUMP_WARM:
+ mif_info("%s: dump warm boot mode\n", iod->name);
+ if (!mc->ops.power_reset_warm) {
+ mif_err("%s: power_reset_warm is null\n", iod->name);
+ return -EINVAL;
+ }
+ ret = mc->ops.power_reset_warm(mc);
+ break;
+ default:
+ mif_err("boot_mode is invalid:%d\n", mode.idx);
+ return -EINVAL;
+ }
+
+ return ret;
+ }
+
+ case IOCTL_SILENT_RESET:
+ if (!mc->ops.silent_reset) {
+ mif_err("%s: silent_reset is null\n", iod->name);
+ return -EINVAL;
+ }
+ mif_info("%s: IOCTL_SILENT_RESET\n", iod->name);
+ return mc->ops.silent_reset(mc);
+
+ case IOCTL_REQ_SECURITY:
+ if (!ld->security_req) {
+ mif_err("%s: security_req is null\n", iod->name);
+ return -EINVAL;
+ }
+
+ mif_info("%s: IOCTL_REQ_SECURITY\n", iod->name);
+ ret = ld->security_req(ld, iod, arg);
+ if (ret) {
+ mif_err("security_req() error:%d\n", ret);
+ return ret;
+ }
+ return ret;
+
+ case IOCTL_LOAD_CP_IMAGE:
+ if (!ld->load_cp_image) {
+ mif_err("%s: load_cp_image is null\n", iod->name);
+ return -EINVAL;
+ }
+
+ mif_debug("%s: IOCTL_LOAD_CP_IMAGE\n", iod->name);
+ return ld->load_cp_image(ld, iod, arg);
+
+ case IOCTL_START_CP_BOOTLOADER:
+ {
+ void __user *uarg = (void __user *)arg;
+ struct boot_mode mode;
+
+ mif_info("%s: IOCTL_START_CP_BOOTLOADER\n", iod->name);
+ ret = copy_from_user(&mode, uarg, sizeof(mode));
+ if (ret) {
+ mif_err("copy_from_user() error:%d\n", ret);
+ return ret;
+ }
+
+ switch (mode.idx) {
+ case CP_BOOT_MODE_NORMAL:
+ mif_info("%s: normal boot mode\n", iod->name);
+ if (!mc->ops.start_normal_boot) {
+ mif_err("%s: start_normal_boot is null\n", iod->name);
+ return -EINVAL;
+ }
+ return mc->ops.start_normal_boot(mc);
+
+ case CP_BOOT_MODE_DUMP:
+ mif_info("%s: dump boot mode\n", iod->name);
+ if (!mc->ops.start_dump_boot) {
+ mif_err("%s: start_dump_boot is null\n", iod->name);
+ return -EINVAL;
+ }
+ return mc->ops.start_dump_boot(mc);
+
+ case CP_BOOT_MODE_NORMAL_BL1:
+ mif_info("%s: bl1 boot mode\n", iod->name);
+ if (!mc->ops.start_normal_boot_bl1) {
+ mif_err("%s: start_normal_boot_bl1 is null\n", iod->name);
+ return -EINVAL;
+ }
+ return mc->ops.start_normal_boot_bl1(mc);
+
+ case CP_BOOT_MODE_NORMAL_BOOTLOADER:
+ mif_info("%s: bootloader boot mode\n", iod->name);
+ if (!mc->ops.start_normal_boot_bootloader) {
+ mif_err("%s: start_normal_boot_bootloader is null\n", iod->name);
+ return -EINVAL;
+ }
+ return mc->ops.start_normal_boot_bootloader(mc);
+
+ case CP_BOOT_MODE_DUMP_BL1:
+ mif_info("%s: bl1 dump boot mode\n", iod->name);
+ if (!mc->ops.start_dump_boot_bl1) {
+ mif_err("%s: start_dump_boot_bl1 is null\n", iod->name);
+ return -EINVAL;
+ }
+ return mc->ops.start_dump_boot_bl1(mc);
+
+ case CP_BOOT_MODE_DUMP_BOOTLOADER:
+ mif_info("%s: bootloader dump boot mode\n", iod->name);
+ if (!mc->ops.start_dump_boot_bootloader) {
+ mif_err("%s: start_dump_boot_bootloader is null\n", iod->name);
+ return -EINVAL;
+ }
+ return mc->ops.start_dump_boot_bootloader(mc);
+
+ case CP_BOOT_MODE_DUMP_PARTIAL:
+ mif_info("%s: partial dump boot mode\n", iod->name);
+ if (!mc->ops.start_dump_boot_partial) {
+ mif_err("%s: start_dump_boot_partial is null\n", iod->name);
+ return -EINVAL;
+ }
+ return mc->ops.start_dump_boot_partial(mc);
+
+ default:
+ mif_err("boot_mode is invalid:%d\n", mode.idx);
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ case IOCTL_COMPLETE_NORMAL_BOOTUP:
+ if (!mc->ops.complete_normal_boot) {
+ mif_err("%s: complete_normal_boot is null\n", iod->name);
+ return -EINVAL;
+ }
+
+ mif_info("%s: IOCTL_COMPLETE_NORMAL_BOOTUP\n", iod->name);
+ return mc->ops.complete_normal_boot(mc);
+
+ case IOCTL_GET_CP_STATUS:
+ mif_debug("%s: IOCTL_GET_CP_STATUS\n", iod->name);
+
+ p_state = mc->phone_state;
+
+ if (p_state != STATE_ONLINE) {
+ mif_debug("%s: IOCTL_GET_CP_STATUS (state %s)\n",
+ iod->name, cp_state_str(p_state));
+ }
+
+ switch (p_state) {
+ case STATE_NV_REBUILDING:
+ mc->phone_state = STATE_ONLINE;
+ break;
+ /* Do not return an internal state */
+ case STATE_RESET:
+ p_state = STATE_OFFLINE;
+ break;
+ default:
+ break;
+ }
+
+ return p_state;
+
+ case IOCTL_TRIGGER_CP_CRASH:
+ {
+ char *buff = ld->crash_reason.string;
+ void __user *user_buff = (void __user *)arg;
+
+ switch (ld->protocol) {
+ case PROTOCOL_SIPC:
+ if (arg)
+ ld->crash_reason.type = (u32)arg;
+ mif_err("%s: IOCTL_TRIGGER_CP_CRASH (%lu)\n",
+ iod->name, arg);
+ break;
+
+ case PROTOCOL_SIT:
+ ld->crash_reason.type =
+ CRASH_REASON_RIL_TRIGGER_CP_CRASH;
+
+ if (arg) {
+ if (copy_from_user(buff, user_buff, CP_CRASH_INFO_SIZE))
+ mif_info("No argument from USER\n");
+ } else
+ mif_info("No argument from USER\n");
+
+ mif_info("Crash Reason:%s\n", buff);
+ break;
+
+ default:
+ mif_err("ERR - unknown protocol\n");
+ break;
+ }
+
+ if (!mc->ops.trigger_cp_crash) {
+ mif_err("%s: trigger_cp_crash is null\n", iod->name);
+ return -EINVAL;
+ }
+
+ return mc->ops.trigger_cp_crash(mc);
+ }
+
+ case IOCTL_TRIGGER_KERNEL_PANIC:
+ {
+ char *buff = ld->crash_reason.string;
+ void __user *user_buff = (void __user *)arg;
+
+ mif_info("%s: IOCTL_TRIGGER_KERNEL_PANIC\n", iod->name);
+
+ strcpy(buff, CP_CRASH_TAG);
+ if (arg)
+ if (copy_from_user((void *)((unsigned long)buff +
+ strlen(CP_CRASH_TAG)), user_buff,
+ CP_CRASH_INFO_SIZE - strlen(CP_CRASH_TAG)))
+ return -EFAULT;
+ mif_info("Crash Reason: %s\n", buff);
+ panic("%s", buff);
+ return 0;
+ }
+
+ case IOCTL_GET_LOG_DUMP:
+ mif_info("%s: IOCTL_GET_LOG_DUMP\n", iod->name);
+
+ return cp_get_log_dump(iod, ld, arg);
+
+ case IOCTL_GET_CP_CRASH_REASON:
+ if (!ld->get_cp_crash_reason) {
+ mif_err("%s: get_cp_crash_reason is null\n", iod->name);
+ return -EINVAL;
+ }
+
+ mif_info("%s: IOCTL_GET_CP_CRASH_REASON\n", iod->name);
+ return ld->get_cp_crash_reason(ld, iod, arg);
+
+ case IOCTL_GET_CPIF_VERSION:
+ mif_info("%s: IOCTL_GET_CPIF_VERSION\n", iod->name);
+
+ strncpy(version.string, get_cpif_driver_version(), sizeof(version.string) - 1);
+ ret = copy_to_user((void __user *)arg, &version, sizeof(version));
+ if (ret) {
+ mif_err("copy_to_user() error:%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+
+ case IOCTL_HANDOVER_BLOCK_INFO:
+ if (!ld->handover_block_info) {
+ mif_err("%s: handover_block_info is null\n", iod->name);
+ return -EINVAL;
+ }
+ mif_info("%s: IOCTL_HANDOVER_BLOCK_INFO\n", iod->name);
+ return ld->handover_block_info(ld, arg);
+
+ case IOCTL_SET_SPI_BOOT_MODE:
+ mld = to_mem_link_device(ld);
+ if (!mld) {
+ mif_err("%s: mld is null\n", iod->name);
+ return -EINVAL;
+ }
+ if (mld->spi_bus_num < 0) {
+ mif_err("invalid cpboot_spi_bus_num\n");
+ return -ENODEV;
+ }
+ mld->attrs |= LINK_ATTR_XMIT_BTDLR_SPI;
+ mld->attrs &= (~LINK_ATTR_XMIT_BTDLR_PCIE);
+
+ ld->load_cp_image = cpboot_spi_load_cp_image;
+
+ mif_info("%s: IOCTL_SET_SPI_BOOT_MODE\n", iod->name);
+ return 0;
+
+ case IOCTL_GET_OPENED_STATUS:
+ mif_debug("%s: IOCTL_GET_OPENED_STATUS\n", iod->name);
+ value = atomic_read(&iod->opened);
+ ret = copy_to_user((void __user *)arg, &value, sizeof(value));
+ if (ret)
+ mif_err("IOCTL_GET_OPENED_STATUS error: %d\n", ret);
+ return ret;
+
+ default:
+ /* If you need to handle the ioctl for specific link device,
+ * then assign the link ioctl handler to ld->ioctl
+ * It will be call for specific link ioctl
+ */
+ if (ld->ioctl)
+ return ld->ioctl(ld, iod, cmd, arg);
+
+ mif_info("%s: ERR! undefined cmd 0x%X\n", iod->name, cmd);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static ssize_t bootdump_write(struct file *filp, const char __user *data,
+ size_t count, loff_t *fpos)
+{
+ struct io_device *iod = (struct io_device *)filp->private_data;
+ struct link_device *ld = get_current_link(iod);
+ struct modem_ctl *mc = iod->mc;
+ struct sk_buff *skb;
+ char *buff;
+ int ret;
+ u8 cfg = 0;
+ u16 cfg_sit = 0;
+ unsigned int headroom;
+ unsigned int tailroom;
+ unsigned int tx_bytes;
+ unsigned int copied = 0, tot_frame = 0, copied_frm = 0;
+ unsigned int remains;
+ unsigned int alloc_size;
+ /* 64bit prevent */
+ unsigned int cnt = (unsigned int)count;
+ struct timespec64 ts;
+
+ /* Record the timestamp */
+ ktime_get_ts64(&ts);
+
+ if (iod->format <= IPC_RFS && iod->ch == 0)
+ return -EINVAL;
+
+ if (unlikely(!cp_online(mc)) && ld->is_ipc_ch(iod->ch)) {
+ mif_debug("%s: ERR! %s->state == %s\n",
+ iod->name, mc->name, mc_state(mc));
+ return -EPERM;
+ }
+
+ if (iod->link_header) {
+ switch (ld->protocol) {
+ case PROTOCOL_SIPC:
+ cfg = sipc5_build_config(iod, ld, cnt);
+ headroom = sipc5_get_hdr_len(&cfg);
+ break;
+ case PROTOCOL_SIT:
+ cfg_sit = exynos_build_fr_config(iod, ld, cnt);
+ headroom = EXYNOS_HEADER_SIZE;
+ break;
+ default:
+ mif_err("protocol error %d\n", ld->protocol);
+ return -EINVAL;
+ }
+ } else {
+ cfg = 0;
+ cfg_sit = 0;
+ headroom = 0;
+ }
+
+ while (copied < cnt) {
+ remains = cnt - copied;
+
+ if (check_add_overflow(remains, headroom, &alloc_size))
+ alloc_size = SZ_2K;
+
+ if (iod->max_tx_size)
+ alloc_size = min_t(unsigned int, alloc_size,
+ iod->max_tx_size);
+
+ /* Calculate tailroom for padding size */
+ if (iod->link_header && ld->aligned)
+ tailroom = ld->calc_padding_size(alloc_size);
+ else
+ tailroom = 0;
+
+ alloc_size += tailroom;
+
+ skb = alloc_skb(alloc_size, GFP_KERNEL);
+ if (!skb) {
+ mif_info("%s: ERR! alloc_skb fail (alloc_size:%d)\n",
+ iod->name, alloc_size);
+ return -ENOMEM;
+ }
+
+ tx_bytes = alloc_size - headroom - tailroom;
+
+ /* Reserve the space for a link header */
+ skb_reserve(skb, headroom);
+
+ /* Copy an IPC message from the user space to the skb */
+ buff = skb_put(skb, tx_bytes);
+ if (copy_from_user(buff, data + copied, tx_bytes)) {
+ mif_err("%s->%s: ERR! copy_from_user fail(count %lu)\n",
+ iod->name, ld->name, (unsigned long)count);
+ dev_kfree_skb_any(skb);
+ return -EFAULT;
+ }
+
+ /* Update size of copied payload */
+ copied += tx_bytes;
+ /* Update size of total frame included hdr, pad size */
+ tot_frame += alloc_size;
+
+ /* Store the IO device, the link device, etc. */
+ skbpriv(skb)->iod = iod;
+ skbpriv(skb)->ld = ld;
+
+ skbpriv(skb)->lnk_hdr = iod->link_header;
+ skbpriv(skb)->sipc_ch = iod->ch;
+
+ /* Copy the timestamp to the skb */
+ skbpriv(skb)->ts = ts;
+#ifdef DEBUG_MODEM_IF_IODEV_TX
+ mif_pkt(iod->ch, "IOD-TX", skb);
+#endif
+
+ /* Build SIPC5 link header*/
+ if (cfg || cfg_sit) {
+ buff = skb_push(skb, headroom);
+
+ switch (ld->protocol) {
+ case PROTOCOL_SIPC:
+ sipc5_build_header(iod, buff, cfg,
+ tx_bytes, cnt - copied);
+ break;
+ case PROTOCOL_SIT:
+ exynos_build_header(iod, ld, buff, cfg_sit, 0, tx_bytes);
+ break;
+ default:
+ mif_err("protocol error %d\n", ld->protocol);
+ return -EINVAL;
+ }
+ }
+
+ /* Apply padding */
+ if (tailroom)
+ skb_put(skb, tailroom);
+
+ /**
+ * Send the skb with a link device
+ */
+ ret = ld->send(ld, iod, skb);
+ if (ret < 0) {
+ mif_err("%s->%s: %s->send fail(%d, tx:%d len:%lu)\n",
+ iod->name, mc->name, ld->name,
+ ret, tx_bytes, (unsigned long)count);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ copied_frm += ret;
+ }
+
+ if (copied_frm != tot_frame) {
+ mif_info("%s->%s: WARN! %s->send ret:%d (len:%lu)\n",
+ iod->name, mc->name, ld->name,
+ copied_frm, (unsigned long)count);
+ }
+
+ return count;
+}
+
+static ssize_t bootdump_read(struct file *filp, char *buf, size_t count,
+ loff_t *fpos)
+{
+ struct io_device *iod = (struct io_device *)filp->private_data;
+ struct sk_buff_head *rxq = &iod->sk_rx_q;
+ struct sk_buff *skb;
+ int copied;
+
+ if (skb_queue_empty(rxq)) {
+ long tmo = msecs_to_jiffies(100);
+
+ wait_event_timeout(iod->wq, !skb_queue_empty(rxq), tmo);
+ }
+
+ skb = skb_dequeue(rxq);
+ if (unlikely(!skb)) {
+ mif_info("%s: NO data in RXQ\n", iod->name);
+ return 0;
+ }
+
+ copied = skb->len > count ? count : skb->len;
+
+ if (copy_to_user(buf, skb->data, copied)) {
+ mif_err("%s: ERR! copy_to_user fail\n", iod->name);
+ dev_kfree_skb_any(skb);
+ return -EFAULT;
+ }
+
+ if (iod->ch == SIPC_CH_ID_CPLOG1) {
+ struct net_device *ndev = iod->ndev;
+
+ if (!ndev) {
+ mif_err("%s: ERR! no iod->ndev\n", iod->name);
+ } else {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += copied;
+ }
+ }
+
+#ifdef DEBUG_MODEM_IF_IODEV_RX
+ mif_pkt(iod->ch, "IOD-RX", skb);
+#endif
+ mif_debug("%s: data:%d copied:%d qlen:%d\n",
+ iod->name, skb->len, copied, rxq->qlen);
+
+ if (skb->len > copied) {
+ skb_pull(skb, copied);
+ skb_queue_head(rxq, skb);
+ } else {
+ dev_consume_skb_any(skb);
+ }
+
+ return copied;
+}
+
+
+const struct file_operations bootdump_io_fops = {
+ .owner = THIS_MODULE,
+ .open = bootdump_open,
+ .release = bootdump_release,
+ .poll = bootdump_poll,
+ .unlocked_ioctl = bootdump_ioctl,
+ .compat_ioctl = bootdump_ioctl,
+ .write = bootdump_write,
+ .read = bootdump_read,
+};
+
+const struct file_operations *get_bootdump_io_fops(void)
+{
+ return &bootdump_io_fops;
+}
+
diff --git a/cp_btl.c b/cp_btl.c
new file mode 100644
index 0000000..da91ec6
--- /dev/null
+++ b/cp_btl.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019, Samsung Electronics.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/of_reserved_mem.h>
+
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/memblock.h>
+
+#include <soc/google/exynos-smc.h>
+
+#include "modem_utils.h"
+#include "modem_ctrl.h"
+#include "cp_btl.h"
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE)
+#include "s51xx_pcie.h"
+#endif
+
+#define BTL_READ_SIZE_MAX SZ_1M
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE)
+#define BTL_MAP_SIZE SZ_1M /* per PCI BAR2 limit */
+#endif
+
+#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
+
+/* fops */
+static int btl_open(struct inode *inode, struct file *filep)
+{
+ struct cp_btl *btl = container_of(filep->private_data, struct cp_btl, miscdev);
+
+ filep->private_data = (void *)btl;
+ if (!btl) {
+ mif_err("btl is null\n");
+ return -ENODEV;
+ }
+
+ if ((btl->link_type == LINKDEV_SHMEM) && !btl->mem.v_base) {
+ mif_err("%s: v_base is null\n", btl->name);
+ return -ENOMEM;
+ }
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE)
+ btl->last_pcie_atu_grp = -1;
+#endif
+
+ mif_info("%s opened by %s\n", btl->name, current->comm);
+ return 0;
+}
+
+static int btl_release(struct inode *inode, struct file *filep)
+{
+ struct cp_btl *btl = NULL;
+
+ btl = (struct cp_btl *)filep->private_data;
+ if (!btl) {
+ mif_err("btl is null\n");
+ return -ENODEV;
+ }
+
+ mif_info("%s closed by %s\n", btl->name, current->comm);
+ return 0;
+}
+
+static ssize_t btl_read(struct file *filep, char __user *buf, size_t count, loff_t *pos)
+{
+ struct cp_btl *btl = NULL;
+ unsigned long remainder = 0;
+ int len = 0;
+ int ret = 0;
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE)
+ struct link_device *ld;
+ struct modem_ctl *mc;
+ void *btl_buf;
+ u32 atu_pos;
+#endif
+
+ btl = (struct cp_btl *)filep->private_data;
+ if (!btl) {
+ mif_err("btl is null\n");
+ return -ENODEV;
+ }
+
+ if ((filep->f_flags & O_NONBLOCK) && !atomic_read(&btl->active))
+ return -EAGAIN;
+
+ if (*pos < 0 || *pos >= btl->mem.size) {
+ mif_err("Tried to read over %d:%lld\n", btl->mem.size, *pos);
+ return 0;
+ }
+
+ remainder = btl->mem.size - *pos;
+ if (remainder == 0) { /* EOF */
+ mif_info("%s: %lld bytes read\n", btl->name, *pos);
+ *pos = 0;
+ return 0;
+ }
+
+ len = min_t(size_t, count, BTL_READ_SIZE_MAX);
+ len = min_t(unsigned long, len, remainder);
+
+ switch (btl->link_type) {
+ case LINKDEV_SHMEM:
+ if (!btl->mem.v_base) {
+ mif_err("%s: v_base is null\n", btl->name);
+ ret = -ENOMEM;
+ break;
+ }
+
+ ret = copy_to_user(buf, btl->mem.v_base + *pos, len);
+ if (ret)
+ mif_err("%s: copy_to_user() error:%d", btl->name, ret);
+ break;
+ case LINKDEV_PCIE:
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE)
+ ld = &btl->mld->link_dev;
+ mc = ld->mc;
+
+ btl_buf = NULL;
+ btl->mem.v_base = NULL;
+
+ atu_pos = (*pos) % BTL_MAP_SIZE;
+ len = min_t(unsigned long, len, BTL_MAP_SIZE - atu_pos);
+
+ mutex_lock(&mc->pcie_check_lock);
+ /* assume that pci link is up after CP BTL trigger */
+ if (!mc->pcie_powered_on ||
+ (s51xx_check_pcie_link_status(mc->pcie_ch_num) == 0)) {
+ mif_err("pci link is not ready\n");
+ ret = -EWOULDBLOCK;
+ goto link_exit;
+ }
+
+ ret = s5100_set_outbound_atu(ld->mc, btl, pos, BTL_MAP_SIZE);
+ if (ret != 0) {
+ mif_err("%s: failed to set ATU error:%d\n", btl->name, ret);
+ goto link_exit;
+ }
+
+ btl->mem.v_base = devm_ioremap(ld->dev, btl->mem.p_base, BTL_MAP_SIZE);
+ if (IS_ERR_OR_NULL(btl->mem.v_base)) {
+ mif_err("%s: failed to map\n", btl->name);
+ ret = -EFAULT;
+ goto link_exit;
+ }
+
+ btl_buf = vzalloc(len);
+ if (!btl_buf) {
+ mif_err("%s: failed to alloc\n", btl->name);
+ ret = -ENOMEM;
+ goto link_exit;
+ }
+
+ memcpy_fromio(btl_buf, btl->mem.v_base + atu_pos, len);
+ ret = copy_to_user(buf, btl_buf, len);
+ if (ret)
+ mif_err("%s: copy_to_user() error:%d", btl->name, ret);
+
+link_exit:
+ if (btl_buf)
+ vfree(btl_buf);
+ if (!IS_ERR_OR_NULL(btl->mem.v_base)) {
+ devm_iounmap(ld->dev, btl->mem.v_base);
+ btl->mem.v_base = NULL;
+ }
+ mutex_unlock(&mc->pcie_check_lock);
+#endif
+ break;
+ default:
+ break;
+ }
+
+ if (ret) {
+ *pos = 0;
+ return ret;
+ }
+
+ *pos += len;
+ return len;
+}
+
+#define IOCTL_GET_BTL_SIZE _IO('o', 0x59)
+static long btl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+ struct cp_btl *btl = NULL;
+ size_t btl_size;
+ int ret = 0;
+
+ btl = (struct cp_btl *)filep->private_data;
+ if (!btl) {
+ mif_err("btl is null\n");
+ return -ENODEV;
+ }
+ btl_size = btl->mem.size;
+
+ switch (cmd) {
+ case IOCTL_GET_BTL_SIZE:
+ mif_info("IOCTL_BTL_FULL_DUMP:%d 0x%08lx\n", btl->id, btl_size);
+ ret = copy_to_user((void __user *)arg, &btl_size, sizeof(btl_size));
+ if (ret) {
+ mif_err("copy_to_user() error:%d\n", ret);
+ return ret;
+ }
+ break;
+
+ default:
+ mif_err("Invalid ioctl:0x%08x\n", cmd);
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+
+ return ret;
+}
+
+/* Command line parameter */
+static bool _is_enabled[MAX_BTL_ID] = {false, false};
+
+#if defined(MODULE)
+static int btl_set_enable(const char *str, const struct kernel_param *kp)
+{
+ if (!strcmp(str, "ON") || !strcmp(str, "on"))
+ _is_enabled[BTL_ID_0] = true;
+ if (!strcmp(str, "DUAL_ON") || !strcmp(str, "dual_on")) {
+ _is_enabled[BTL_ID_0] = true;
+ _is_enabled[BTL_ID_1] = true;
+ }
+ mif_info("%s enable:%d/%d\n", str, _is_enabled[BTL_ID_0], _is_enabled[BTL_ID_1]);
+ return 0;
+}
+static const struct kernel_param_ops cp_btl_param_ops = {
+ .set = &btl_set_enable,
+};
+module_param_cb(cp_btl, &cp_btl_param_ops, NULL, 0644);
+#else /* MODULE */
+static int btl_set_enable(char *str)
+{
+ if (!strcmp(str, "ON") || !strcmp(str, "on"))
+ _is_enabled[BTL_ID_0] = true;
+
+ if (!strcmp(str, "DUAL_ON") || !strcmp(str, "dual_on")) {
+ _is_enabled[BTL_ID_0] = true;
+ _is_enabled[BTL_ID_1] = true;
+ }
+
+ mif_info("%s enable:%d/%d\n", str, _is_enabled[BTL_ID_0], _is_enabled[BTL_ID_1]);
+
+ return 0;
+}
+
+static int __init btl_console_setup(char *str)
+{
+ return btl_set_enable(str);
+}
+__setup("androidboot.cp_reserved_mem=", btl_console_setup);
+
+static int __init btl_console_setup_alt(char *str)
+{
+ return btl_set_enable(str);
+}
+__setup("androidboot.cp_btl=", btl_console_setup_alt);
+#endif /* MODULE */
+
+/* Create */
+static const struct file_operations btl_file_ops = {
+ .open = btl_open,
+ .release = btl_release,
+ .read = btl_read,
+ .unlocked_ioctl = btl_ioctl,
+ .compat_ioctl = btl_ioctl,
+};
+
+int cp_btl_create(struct cp_btl *btl, struct device *dev)
+{
+ struct modem_data *pdata = NULL;
+ int ret = 0;
+ struct sysinfo s;
+
+ if (!dev) {
+ mif_err("dev is null\n");
+ return -ENODEV;
+ }
+
+ pdata = dev->platform_data;
+ if (!pdata) {
+ mif_err("pdata is null\n");
+ return -ENODEV;
+ }
+
+ if (!btl) {
+ mif_err("btl is null\n");
+ return -ENOMEM;
+ }
+ atomic_set(&btl->active, 0);
+
+ mif_dt_read_string(dev->of_node, "cp_btl_node_name", btl->name);
+ mif_dt_read_u32_noerr(dev->of_node, "cp_btl_support_extension", btl->support_extension);
+ mif_dt_read_u32_noerr(dev->of_node, "cp_btl_extension_dram_size", btl->extension_dram_size);
+
+ if (btl->support_extension)
+ btl->extension_enabled = true;
+
+ btl->id = pdata->cp_num;
+ if (btl->id >= MAX_BTL_ID) {
+ mif_err("id is over max:%d\n", btl->id);
+ ret = -EINVAL;
+ goto create_exit;
+ }
+ if (!_is_enabled[btl->id]) {
+ mif_err("CP BTL is disabled for %d\n", btl->id);
+ ret = 0;
+ goto create_exit;
+ }
+ btl->enabled = true;
+ btl->link_type = pdata->link_type;
+
+ mif_info("name:%s id:%d link:%d\n", btl->name, btl->id, btl->link_type);
+ switch (btl->link_type) {
+ case LINKDEV_SHMEM:
+ btl->mem.size = cp_shmem_get_size(btl->id, SHMEM_BTL);
+
+ if (btl->support_extension) {
+ si_meminfo(&s);
+ mif_info("total mem (%ld kb)\n", convert_to_kb(s.totalram));
+ /* DRAM size: over 8GB -> BTL size: 64MB */
+ /* DRAM size: under 8GB -> BTL size: 32MB */
+ if (convert_to_kb(s.totalram) > btl->extension_dram_size) {
+ btl->mem.size += cp_shmem_get_size(btl->id, SHMEM_BTL_EXT);
+ } else {
+ cp_shmem_release_rmem(btl->id, SHMEM_BTL_EXT, 0);
+ btl->extension_enabled = false;
+ }
+ }
+
+ /* TODO: cached */
+ btl->mem.v_base = cp_shmem_get_nc_region(cp_shmem_get_base(btl->id, SHMEM_BTL),
+ btl->mem.size);
+ if (!btl->mem.v_base) {
+ mif_err("cp_shmem_get_region() error:v_base\n");
+ ret = -ENOMEM;
+ goto create_exit;
+ }
+
+ /* BAAW */
+ exynos_smc(SMC_ID_CLK, SSS_CLK_ENABLE, 0, 0);
+
+ ret = (int)exynos_smc(SMC_ID, CP_BOOT_REQ_CP_RAM_LOGGING, 0, 0);
+ if (ret) {
+ mif_err("exynos_smc() error:%d\n", ret);
+ goto create_exit;
+ }
+ exynos_smc(SMC_ID_CLK, SSS_CLK_DISABLE, 0, 0);
+ break;
+
+ case LINKDEV_PCIE:
+ btl->mem.v_base = NULL;
+ btl->mem.p_base = 0x14200000;
+ /* actual cp address is 0x47200000, but needs additional 0x40000000 */
+ btl->mem.cp_p_base = 0x87200000;
+ btl->mem.size = (SZ_32M - SZ_2M);
+ break;
+
+ default:
+ mif_err("link_type error:%d\n", btl->link_type);
+ ret = -EINVAL;
+ goto create_exit;
+ }
+
+ btl->mld = pdata->mld;
+
+ btl->miscdev.minor = MISC_DYNAMIC_MINOR;
+ btl->miscdev.name = btl->name;
+ btl->miscdev.fops = &btl_file_ops;
+ ret = misc_register(&btl->miscdev);
+ if (ret) {
+ mif_err("misc_register() error for %s:%d", btl->name, ret);
+ goto create_exit;
+ }
+
+ if (btl->mem.v_base)
+ memset(btl->mem.v_base, 0, btl->mem.size);
+ atomic_set(&btl->active, 1);
+
+ return 0;
+
+create_exit:
+ if (btl->mem.v_base)
+ vunmap(btl->mem.v_base);
+
+ cp_shmem_release_rmem(btl->id, SHMEM_BTL, 0);
+ if (btl->extension_enabled)
+ cp_shmem_release_rmem(btl->id, SHMEM_BTL_EXT, 0);
+
+ return ret;
+}
+
+int cp_btl_destroy(struct cp_btl *btl)
+{
+ if (!btl) {
+ mif_err("btl is null\n");
+ return -ENODEV;
+ }
+
+ if (btl->mem.v_base)
+ vunmap(btl->mem.v_base);
+
+ misc_deregister(&btl->miscdev);
+
+ return 0;
+}
diff --git a/cp_btl.h b/cp_btl.h
new file mode 100644
index 0000000..09593d6
--- /dev/null
+++ b/cp_btl.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019, Samsung Electronics.
+ *
+ */
+
+#ifndef __CP_BTL_H__
+#define __CP_BTL_H__
+
+enum cp_btl_id {
+ BTL_ID_0,
+ BTL_ID_1,
+ MAX_BTL_ID,
+};
+
+struct cp_btl_mem_region {
+ unsigned long p_base;
+ void __iomem *v_base;
+ unsigned long cp_p_base;
+ u32 size;
+};
+
+struct cp_btl {
+ char *name;
+ u32 id;
+
+ bool enabled;
+ atomic_t active;
+
+ u32 link_type;
+ struct cp_btl_mem_region mem;
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE)
+ int last_pcie_atu_grp;
+#endif
+
+ struct mem_link_device *mld;
+ struct miscdevice miscdev;
+
+ bool support_extension;
+ u32 extension_dram_size;
+ bool extension_enabled;
+};
+
+#if IS_ENABLED(CONFIG_CP_BTL)
+extern int cp_btl_create(struct cp_btl *btl, struct device *dev);
+extern int cp_btl_destroy(struct cp_btl *btl);
+#else
+static inline int cp_btl_create(struct cp_btl *btl, struct device *dev) { return 0; }
+static inline int cp_btl_destroy(struct cp_btl *btl) { return 0; }
+#endif
+
+#endif /* __CP_BTL_H__ */
diff --git a/cp_pmic.c b/cp_pmic.c
new file mode 100644
index 0000000..47a8c34
--- /dev/null
+++ b/cp_pmic.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CP PMIC (Power Management IC) driver.
+ *
+ * Copyright (c) 2023, Google LLC. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/spmi.h>
+#include "cp_pmic.h"
+
+struct reg_entry {
+ u32 reg;
+ u32 val;
+ u32 delay_ms;
+};
+
+struct pmic_reg_sequence {
+ size_t num_entries;
+ struct reg_entry *reg_entries;
+};
+
+struct pmic_info {
+ struct spmi_device *sdev;
+ struct regmap *regmap;
+ struct pmic_reg_sequence warm_reset_seq;
+ u32 pmic_otp_reg;
+};
+
+static const struct regmap_range pmic_wr_range[] = {
+ regmap_reg_range(0x0675, 0x067d),
+};
+
+static const struct regmap_access_table pmic_wr_table = {
+ .yes_ranges = pmic_wr_range,
+ .n_yes_ranges = ARRAY_SIZE(pmic_wr_range),
+};
+
+static const struct regmap_range pmic_rd_range[] = {
+ regmap_reg_range(0x030E, 0x067d),
+};
+
+static const struct regmap_access_table pmic_rd_table = {
+ .yes_ranges = pmic_rd_range,
+ .n_yes_ranges = ARRAY_SIZE(pmic_rd_range),
+};
+
+static struct regmap_config pmic_regmap_config = {
+ .name = "modem_pmic",
+ .reg_bits = 16,
+ .val_bits = 8,
+ .val_format_endian = REGMAP_ENDIAN_NATIVE,
+ .max_register = 0x67d,
+ .wr_table = &pmic_wr_table,
+ .rd_table = &pmic_rd_table,
+};
+
+static ssize_t pmic_read_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct pmic_info *info = dev_get_drvdata(dev);
+ u32 reg, val;
+ int ret;
+
+ if (!info) {
+ dev_err(dev, "pmic_info not available.\n");
+ return -EINVAL;
+ }
+
+ if (sscanf(buf, "%x", ®) != 1) {
+ dev_err(dev, "Invalid format. Use '<reg_addr (hex)>.'\n");
+ return -EINVAL;
+ }
+
+ ret = regmap_read(info->regmap, reg, &val);
+ if (ret) {
+ dev_err(dev, "Failed to read register 0x%08x: %d\n", reg, ret);
+ return ret;
+ }
+
+ dev_info(dev, "Read PMIC register 0x%08x with value 0x%08x", reg, val);
+
+ return count;
+}
+static DEVICE_ATTR_WO(pmic_read);
+
+static ssize_t pmic_write_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct pmic_info *info = dev_get_drvdata(dev);
+ u32 reg, val;
+ int ret;
+
+ if (!info) {
+ dev_err(dev, "pmic_info not available.\n");
+ return -EINVAL;
+ }
+
+ if (sscanf(buf, "%x %x", ®, &val) != 2) {
+ dev_err(dev, "Invalid format. Use '<reg_addr (hex)> <value (hex)>.'\n");
+ return -EINVAL;
+ }
+
+ ret = regmap_write(info->regmap, reg, val);
+ if (ret) {
+ dev_err(dev, "Failed to write register 0x%08x: %d\n", reg, ret);
+ return ret;
+ }
+
+ return count;
+}
+static DEVICE_ATTR_WO(pmic_write);
+
+static const struct attribute *pmic_attrs[] = {
+ &dev_attr_pmic_read.attr,
+ &dev_attr_pmic_write.attr,
+ NULL,
+};
+
+static const struct attribute_group pmic_attr_group = {
+ .attrs = (struct attribute **)pmic_attrs,
+ .name = "pmic",
+};
+
+void pmic_warm_reset_sequence(struct device *dev)
+{
+ struct pmic_info *info = dev_get_drvdata(dev);
+ size_t i;
+ struct pmic_reg_sequence *seq;
+ struct reg_entry *entry;
+
+ seq = &info->warm_reset_seq;
+ for (i = 0; i < seq->num_entries; i++) {
+ int retry;
+ entry = &seq->reg_entries[i];
+ for (retry = 0; retry < 2; retry++) {
+ if (regmap_write(info->regmap, entry->reg, entry->val) == 0)
+ break;
+ msleep(2);
+ }
+ if (retry == 2) {
+ dev_info(dev, "Failed to write register %#x\n", entry->reg);
+ return;
+ }
+ if (entry->delay_ms)
+ msleep(entry->delay_ms);
+ }
+ dev_info(dev, "Warm reset sequence completed.\n");
+}
+EXPORT_SYMBOL_GPL(pmic_warm_reset_sequence);
+
+int pmic_get_otp(struct device *dev)
+{
+ struct pmic_info *info = dev_get_drvdata(dev);
+ int retry;
+ int otp_version = -1;
+
+ for (retry = 0; retry < 2; retry++) {
+ if (regmap_read(info->regmap, info->pmic_otp_reg, &otp_version) == 0)
+ break;
+ msleep(2);
+ }
+
+ return otp_version;
+}
+EXPORT_SYMBOL_GPL(pmic_get_otp);
+
+static int pmic_dt_init(struct device *dev, struct pmic_info *info)
+{
+ struct property *prop;
+ size_t num_entries, i;
+ u32 *seq_data;
+
+ if (!dev->of_node) {
+ dev_err(dev, "of_node not found.\n");
+ return -ENOENT;
+ }
+
+ /* read the pmic otp register */
+ if (of_property_read_u32(dev->of_node, "pmic-otp-reg", &info->pmic_otp_reg))
+ dev_info(dev, "PMIC otp reg not available!\n");
+
+ dev_info(dev, "PMIC otp reg: %#x\n", info->pmic_otp_reg);
+
+ /* read the pmic warm reset sequence */
+ prop = of_find_property(dev->of_node, "warm_reset_seq", NULL);
+ if (!prop) {
+ dev_err(dev, "Missing or invalid warm_reset_seq property in DT.\n");
+ return -ENODATA;
+ }
+
+ // Check the number of entries in the "warm_reset_seq" property
+ num_entries = prop->length / sizeof(struct reg_entry);
+ info->warm_reset_seq.num_entries = num_entries;
+
+ info->warm_reset_seq.reg_entries = devm_kmalloc(dev,
+ num_entries * sizeof(struct reg_sequence), GFP_KERNEL);
+ if (!info->warm_reset_seq.reg_entries)
+ return -ENOMEM;
+
+ // Access the data from the property and populate the reg_sequence structure
+ seq_data = (u32 *)prop->value;
+ for (i = 0; i < num_entries; i++) {
+ info->warm_reset_seq.reg_entries[i].reg = be32_to_cpup(seq_data++);
+ info->warm_reset_seq.reg_entries[i].val = be32_to_cpup(seq_data++);
+ info->warm_reset_seq.reg_entries[i].delay_ms = be32_to_cpup(seq_data++);
+ }
+
+ return 0;
+}
+
+static int pmic_probe(struct spmi_device *sdev)
+{
+ struct device *dev = &sdev->dev;
+ struct pmic_info *info;
+
+ // Create pmic_info
+ info = devm_kzalloc(dev, sizeof(struct pmic_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ // Initialize pmic_info
+ spmi_device_set_drvdata(sdev, info);
+ info->sdev = sdev;
+
+ // Initialize regmap for PMIC access
+ info->regmap = devm_regmap_init_spmi_ext(sdev, &pmic_regmap_config);
+ if (IS_ERR(&info->regmap)) {
+ dev_err(&sdev->dev, "Failed to initialize PMIC regmap\n");
+ return PTR_ERR(&info->regmap);
+ }
+
+ if (pmic_dt_init(dev, info))
+ dev_err(dev, "Failed to initialize PMIC DT data\n");
+
+ if (sysfs_create_group(&dev->kobj, &pmic_attr_group))
+ dev_err(dev, "Failed to create PMIC sysfs group\n");
+
+ return 0;
+}
+
+static const struct of_device_id pmic_of_match[] = {
+ { .compatible = "google,cp-pmic-spmi", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pmic_of_match);
+
+static struct spmi_driver pmic_driver = {
+ .probe = pmic_probe,
+ .driver = {
+ .name = "cp_pmic_driver",
+ .of_match_table = pmic_of_match,
+ },
+};
+
+static int of_dev_node_match(struct device *dev, const void *node)
+{
+ return dev->of_node == node;
+}
+
+struct device *pmic_get_device(struct device_node *node)
+{
+ struct device *dev = NULL;
+ struct bus_type *sbt = pmic_driver.driver.bus;
+
+ if (sbt)
+ dev = bus_find_device(sbt, NULL, node, of_dev_node_match);
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(pmic_get_device);
+
+struct spmi_device *pmic_get_spmi_device(struct device_node *node)
+{
+ struct device *dev;
+ struct spmi_device *sdev = NULL;
+
+ dev = pmic_get_device(node);
+ if (dev)
+ sdev = to_spmi_device(dev);
+ return sdev;
+}
+EXPORT_SYMBOL_GPL(pmic_get_spmi_device);
+
+static int __init pmic_init(void)
+{
+ spmi_driver_register(&pmic_driver);
+ return 0;
+}
+module_init(pmic_init);
+
+static void __exit pmic_exit(void)
+{
+ spmi_driver_unregister(&pmic_driver);
+}
+module_exit(pmic_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Google CP PMIC Driver");
+MODULE_AUTHOR("Salmax Chang <salmaxchang@google.com>");
+
diff --git a/cp_pmic.h b/cp_pmic.h
new file mode 100644
index 0000000..f572a63
--- /dev/null
+++ b/cp_pmic.h
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CP PMIC (Power Management IC) driver.
+ *
+ * Copyright (c) 2023, Google LLC. All rights reserved.
+ */
+
+#ifndef __CP_PMIC_H__
+#define __CP_PMIC_H__
+
+int pmic_get_otp(struct device *dev);
+void pmic_warm_reset_sequence(struct device *dev);
+struct device *pmic_get_device(struct device_node *node);
+struct spmi_device *pmic_get_spmi_device(struct device_node *node);
+
+#endif
+
diff --git a/cp_thermal_zone.c b/cp_thermal_zone.c
new file mode 100644
index 0000000..30ae70f
--- /dev/null
+++ b/cp_thermal_zone.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CP thermal zone driver.
+ *
+ * Copyright (c) 2020, Google LLC. All rights reserved.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/thermal.h>
+
+#define MAX_CP_TEMP_SENSOR 9
+
+struct cp_temp_sensor {
+ struct thermal_zone_device *tzd;
+ bool valid;
+ int temp;
+};
+
+struct cp_temp_manager {
+ u32 num_sensors;
+ struct cp_temp_sensor sensor[MAX_CP_TEMP_SENSOR];
+};
+
+static ssize_t cp_temp_store(struct device *dev, struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ int ret = 0;
+ struct cp_temp_manager *temp_manager = dev_get_drvdata(dev);
+ u32 index;
+ int temp, trigger;
+
+ if (!temp_manager) {
+ dev_err(dev, "Could not get CP temperature manager");
+ return -EINVAL;
+ }
+
+ ret = sscanf(buf, "%d %d %d", &index, &trigger, &temp);
+ if (ret != 3) {
+ dev_err(dev, "Invalid CP temperature update");
+ return -EINVAL;
+ }
+
+ if (index >= temp_manager->num_sensors) {
+ dev_err(dev, "Invalid CP temperature sensor index - %d", index);
+ return -EINVAL;
+ }
+
+ if (!temp_manager->sensor[index].valid && temp_manager->sensor[index].tzd)
+ thermal_zone_device_enable(temp_manager->sensor[index].tzd);
+
+ temp_manager->sensor[index].valid = true;
+ temp_manager->sensor[index].temp = temp;
+
+ if (trigger && temp_manager->sensor[index].tzd) {
+ dev_info_ratelimited(dev, "Update CP temperature sensor %d", index);
+ thermal_zone_device_update(temp_manager->sensor[index].tzd,
+ THERMAL_EVENT_UNSPECIFIED);
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_WO(cp_temp);
+
+static struct attribute *cp_temp_attrs[] = {
+ &dev_attr_cp_temp.attr,
+ NULL,
+};
+
+static const struct attribute_group cp_temp_group = {
+ .attrs = cp_temp_attrs,
+};
+
+static int cp_sensor_get_temp(struct thermal_zone_device *tz, int *temp)
+{
+ int ret = 0;
+ struct cp_temp_sensor *s = tz->devdata;
+
+ if (s && s->valid)
+ *temp = s->temp;
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static struct thermal_zone_device_ops cp_thermal_zone_ops = { .get_temp = cp_sensor_get_temp };
+
+static int cp_thermal_zone_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ unsigned int i;
+ struct cp_temp_manager *temp_manager;
+ struct thermal_zone_device *tzd;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+
+ temp_manager = devm_kzalloc(dev, sizeof(struct cp_temp_manager), GFP_KERNEL);
+
+ if (of_property_read_u32(np, "num_sensors", &temp_manager->num_sensors)) {
+ dev_err(dev, "Cannot read number of CP sensors");
+ ret = -EINVAL;
+ goto fail;
+ }
+ if (temp_manager->num_sensors > MAX_CP_TEMP_SENSOR) {
+ dev_err(dev, "Invalid number of CP temp sensor - %d", temp_manager->num_sensors);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (sysfs_create_group(&dev->kobj, &cp_temp_group)) {
+ dev_err(dev, "Error creating sysfs node for CP temperature");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ for (i = 0; i < temp_manager->num_sensors; ++i) {
+ dev_info(dev, "Registering CP temp sensor %d", i);
+ tzd = devm_thermal_of_zone_register(dev, i, &temp_manager->sensor[i],
+ &cp_thermal_zone_ops);
+ if (IS_ERR(tzd)) {
+ dev_err(dev, "Error registering CP temperature sensor %d", i);
+ continue;
+ }
+ temp_manager->sensor[i].tzd = tzd;
+ thermal_zone_device_disable(tzd);
+ }
+ platform_set_drvdata(pdev, temp_manager);
+fail:
+ return ret;
+}
+
+static int cp_thermal_zone_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id cp_thermal_zone_match[] = {
+ {
+ .compatible = "google,gs101-cp-thermal",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cp_thermal_zone_match);
+
+static struct platform_driver cp_thermal_zone_driver = {
+ .driver = {
+ .name = "gs101-cp-thermal-zone",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(cp_thermal_zone_match),
+ },
+ .probe = cp_thermal_zone_probe,
+ .remove = cp_thermal_zone_remove,
+};
+module_platform_driver(cp_thermal_zone_driver);
+
+MODULE_DESCRIPTION("Google LLC CP Thermal Zone Driver");
+MODULE_AUTHOR("Eddie Tashjian <etashjian@google.com>");
+MODULE_LICENSE("GPL");
diff --git a/cpif_netrx_mng.c b/cpif_netrx_mng.c
new file mode 100644
index 0000000..2657345
--- /dev/null
+++ b/cpif_netrx_mng.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Samsung Electronics.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <net/sock.h>
+
+#include "cpif_netrx_mng.h"
+
+#define NETRX_POOL_PAGE_SIZE 32768
+struct cpif_netrx_mng *cpif_create_netrx_mng(struct cpif_addr_pair *desc_addr_pair,
+ u64 desc_size, u64 databuf_cp_pbase,
+ u64 frag_size, u64 num_packet)
+{
+ struct cpif_netrx_mng *cm;
+ u64 temp;
+ u64 num_packet_per_page;
+ u64 total_page_count;
+
+ if (!desc_addr_pair) {
+ mif_err("desc addr pair not given\n");
+ return NULL;
+ }
+
+ cm = kvzalloc(sizeof(struct cpif_netrx_mng), GFP_KERNEL);
+ if (cm == NULL)
+ goto fail_cm;
+
+ cm->frag_size = frag_size;
+ cm->num_packet = num_packet;
+
+ /* finding the least number of pages required for data map */
+ num_packet_per_page = NETRX_POOL_PAGE_SIZE / frag_size;
+ total_page_count = num_packet / num_packet_per_page + 1;
+ /**
+ * total buffer size is calculated based on worst case. buffer
+ * composed of 4KB pages only
+ */
+ cm->total_buf_size = (num_packet + 100) * PAGE_SIZE;
+
+ cm->desc_map = cpif_vmap_create(desc_addr_pair->cp_addr, desc_size, desc_size);
+ if (!cm->desc_map)
+ goto fail_vmap;
+ cm->data_map = cpif_vmap_create(databuf_cp_pbase, cm->total_buf_size, frag_size);
+ if (!cm->data_map) {
+ cpif_vmap_free(cm->desc_map);
+ goto fail_vmap;
+ }
+
+ /* map descriptor region in advance */
+ temp = cpif_vmap_map_area(cm->desc_map, 0, 0, virt_to_phys(desc_addr_pair->ap_addr));
+ if (temp != desc_addr_pair->cp_addr)
+ goto fail;
+
+ /* create recycling page array */
+ cm->data_pool = cpif_page_pool_create(total_page_count, NETRX_POOL_PAGE_SIZE);
+ if (unlikely(!cm->data_pool))
+ goto fail;
+
+ spin_lock_init(&cm->lock);
+
+ /* initialize data address list */
+ INIT_LIST_HEAD(&cm->data_addr_list);
+
+ mif_info("netrx mng: num_packet: %llu frag_size: %llu total_buf_size: %llu\n",
+ cm->num_packet, cm->frag_size, cm->total_buf_size);
+ mif_info("desc vmap: va_start: 0x%llX va_end: 0x%llX va_size: %llu\n",
+ cm->desc_map->va_start, cm->desc_map->va_end, cm->desc_map->va_size);
+ mif_info("data vmap: va_start: 0x%llX va_end: 0x%llX va_size: %llu\n",
+ cm->data_map->va_start, cm->data_map->va_end, cm->data_map->va_size);
+ mif_info("data_pool: num_pages: %d\n", cm->data_pool->rpage_arr_len);
+
+ return cm;
+
+fail:
+ cpif_vmap_free(cm->desc_map);
+ cpif_vmap_free(cm->data_map);
+
+fail_vmap:
+ kvfree(cm);
+
+fail_cm:
+ return NULL;
+}
+EXPORT_SYMBOL(cpif_create_netrx_mng);
+
+void cpif_exit_netrx_mng(struct cpif_netrx_mng *cm)
+{
+ if (cm) {
+ struct cpif_addr_pair *temp, *temp2;
+
+ if (cm->data_pool)
+ cpif_page_pool_delete(cm->data_pool);
+
+ cpif_vmap_free(cm->desc_map);
+ cpif_vmap_free(cm->data_map);
+ list_for_each_entry_safe(temp, temp2, &cm->data_addr_list, addr_item) {
+ list_del(&temp->addr_item);
+ kfree(temp);
+ }
+ kvfree(cm);
+ }
+}
+EXPORT_SYMBOL(cpif_exit_netrx_mng);
+
+void cpif_init_netrx_mng(struct cpif_netrx_mng *cm)
+{
+ if (cm && cm->data_map && cm->data_pool) {
+ struct cpif_addr_pair *temp, *temp2;
+
+ list_for_each_entry_safe(temp, temp2, &cm->data_addr_list, addr_item)
+ cpif_unmap_rx_buf(cm, temp->cp_addr, true);
+
+ cpif_page_init_tmp_page(cm->data_pool);
+ }
+}
+EXPORT_SYMBOL(cpif_init_netrx_mng);
+
+struct cpif_addr_pair *cpif_map_rx_buf(struct cpif_netrx_mng *cm)
+{
+ struct page *page;
+ void *data;
+ u64 page_size, cp_addr;
+ unsigned long flags;
+ struct cpif_addr_pair *ret = NULL;
+ bool used_tmp_alloc = false;
+
+ spin_lock_irqsave(&cm->lock, flags);
+
+ if (unlikely(!cm->data_map)) {
+ mif_err_limited("data map is not created yet\n");
+ goto done;
+ }
+
+ data = cpif_page_alloc(cm->data_pool, cm->frag_size, &used_tmp_alloc);
+ if (!data) {
+ mif_err_limited("failed to page alloc: return\n");
+ goto done;
+ }
+
+ page = cpif_get_cur_page(cm->data_pool, used_tmp_alloc);
+ page_size = cpif_cur_page_size(cm->data_pool, used_tmp_alloc);
+
+ ret = kzalloc(sizeof(struct cpif_addr_pair), GFP_ATOMIC);
+ if (!ret) {
+ mif_err_limited("failed to kzalloc for addr_pair\n");
+ goto done;
+ }
+
+ cp_addr = cpif_vmap_map_area(cm->data_map, page_to_phys(page),
+ page_size, virt_to_phys(data));
+ if (!cp_addr) { /* cp_addr cannot be allocated */
+ mif_err_limited("failed to vmap and get cp_addr\n");
+ kfree(ret);
+ ret = NULL;
+ goto done;
+ }
+
+ /* returns addr that cp is allowed to write */
+ ret->cp_addr = cp_addr;
+ ret->ap_addr = data;
+ ret->page = page;
+ ret->page_order = get_order(page_size);
+ list_add_tail(&ret->addr_item, &cm->data_addr_list);
+
+done:
+ spin_unlock_irqrestore(&cm->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(cpif_map_rx_buf);
+
+void *cpif_unmap_rx_buf(struct cpif_netrx_mng *cm, u64 cp_addr, bool free)
+{
+ unsigned long flags;
+ u64 ap_paddr = 0;
+ void *ap_addr = NULL;
+ struct cpif_addr_pair *apair;
+
+ spin_lock_irqsave(&cm->lock, flags);
+
+ if (unlikely(!cm->data_map)) {
+ mif_err_limited("data map does not exist\n");
+ goto done;
+ }
+
+ if (cm->already_retrieved) {
+ ap_addr = cm->already_retrieved;
+ cm->already_retrieved = NULL;
+ goto done;
+ }
+
+ ap_paddr = cpif_vmap_unmap_area(cm->data_map, cp_addr);
+ if (unlikely(ap_paddr == 0)) {
+ mif_err_limited("failed to receive ap_addr\n");
+ goto done;
+ }
+ ap_addr = phys_to_virt(ap_paddr);
+
+ apair = list_first_entry_or_null(&cm->data_addr_list, struct cpif_addr_pair, addr_item);
+ if (unlikely(!apair) || ap_addr != apair->ap_addr) {
+ mif_err_limited("ERR! ap_addr: %pK apair->ap_addr:%pK\n", ap_addr,
+ apair ? apair->ap_addr : 0);
+ ap_addr = NULL;
+ goto done;
+ }
+
+ if (ap_addr && free) {
+ __free_pages(apair->page, apair->page_order);
+ ap_addr = NULL;
+ }
+ list_del(&apair->addr_item);
+ kfree(apair);
+
+done:
+ spin_unlock_irqrestore(&cm->lock, flags);
+
+ return ap_addr; /* returns NULL or unmapped AP virtual address */
+}
+EXPORT_SYMBOL(cpif_unmap_rx_buf);
diff --git a/cpif_netrx_mng.h b/cpif_netrx_mng.h
new file mode 100644
index 0000000..2dda973
--- /dev/null
+++ b/cpif_netrx_mng.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Samsung Electronics.
+ *
+ */
+
+#ifndef __CPIF_NETRX_MNG_H__
+#define __CPIF_NETRX_MNG_H__
+
+#include "modem_prj.h"
+#include "link_device_memory.h"
+#include "cpif_page.h"
+#include "cpif_vmapper.h"
+
+struct cpif_addr_pair {
+ u64 cp_addr; /* cp address */
+ void *ap_addr; /* ap virtual address */
+
+ struct page *page; /* page holding the ap address */
+ u64 page_order;
+
+ struct list_head addr_item;
+};
+
+struct cpif_netrx_mng {
+ u64 num_packet;
+ u64 frag_size;
+ u64 total_buf_size;
+
+ struct cpif_va_mapper *desc_map;
+ struct cpif_va_mapper *data_map;
+
+ struct cpif_page_pool *data_pool;
+ struct list_head data_addr_list;
+ spinlock_t lock;
+
+ /* contains pre-unmapped AP addr which couldn't be delivered to kernel yet */
+ void *already_retrieved;
+};
+
+#if IS_ENABLED(CONFIG_EXYNOS_CPIF_IOMMU)
+struct cpif_netrx_mng *cpif_create_netrx_mng(struct cpif_addr_pair *desc_addr_pair,
+ u64 desc_size, u64 databuf_cp_pbase,
+ u64 max_packet_size, u64 num_packet);
+void cpif_exit_netrx_mng(struct cpif_netrx_mng *cm);
+void cpif_init_netrx_mng(struct cpif_netrx_mng *cm);
+struct cpif_addr_pair *cpif_map_rx_buf(struct cpif_netrx_mng *cm);
+void *cpif_unmap_rx_buf(struct cpif_netrx_mng *cm,
+ u64 cp_data_paddr, bool free);
+#else
+static inline struct cpif_netrx_mng *cpif_create_netrx_mng(
+ struct cpif_addr_pair *desc_addr_pair,
+ u64 desc_size, u64 databuf_cp_pbase,
+ u64 max_packet_size, u64 num_packet) { return NULL; }
+static inline void cpif_exit_netrx_mng(struct cpif_netrx_mng *cm) { return; }
+static inline void cpif_init_netrx_mng(struct cpif_netrx_mng *cm) { return; }
+static inline struct cpif_addr_pair *cpif_map_rx_buf(struct cpif_netrx_mng *cm)
+{ return NULL; }
+static inline void *cpif_unmap_rx_buf(struct cpif_netrx_mng *cm,
+ u64 cp_data_paddr, bool free) {return NULL; }
+#endif
+#endif /* __CPIF_NETRX_MNG_H__ */
diff --git a/cpif_page.c b/cpif_page.c
new file mode 100644
index 0000000..3483931
--- /dev/null
+++ b/cpif_page.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Samsung Electronics.
+ *
+ */
+
+#include <linux/slab.h>
+#include "cpif_page.h"
+
+void cpif_page_pool_delete(struct cpif_page_pool *pool)
+{
+ int i;
+ struct cpif_page **rpage_arr = pool->recycling_page_arr;
+ struct cpif_page *tmp_page = pool->tmp_page;
+
+ if (rpage_arr) {
+ for (i = 0; i < pool->rpage_arr_len; i++) {
+ struct cpif_page *cur = rpage_arr[i];
+
+ if (!cur)
+ continue;
+ if (cur->page) {
+ init_page_count(cur->page);
+ __free_pages(cur->page, pool->page_order);
+ }
+ kvfree(cur);
+ }
+ kvfree(rpage_arr);
+ }
+
+ if (tmp_page) {
+ if (tmp_page->page) {
+ init_page_count(tmp_page->page);
+ __free_pages(tmp_page->page, get_order(pool->tmp_page_size));
+ }
+ kvfree(tmp_page);
+ }
+
+ kvfree(pool);
+ pool = NULL;
+}
+EXPORT_SYMBOL(cpif_page_pool_delete);
+
+void cpif_page_init_tmp_page(struct cpif_page_pool *pool)
+{
+ if (pool->tmp_page) {
+ pool->tmp_page->usable = false;
+ pool->tmp_page->offset = 0;
+ if (pool->tmp_page->page) {
+ __free_pages(pool->tmp_page->page, get_order(pool->tmp_page_size));
+ pool->tmp_page->page = NULL;
+ }
+ }
+}
+EXPORT_SYMBOL(cpif_page_init_tmp_page);
+
+struct cpif_page_pool *cpif_page_pool_create(u64 num_page, u64 page_size)
+{
+ int i;
+ struct cpif_page_pool *pool;
+ struct cpif_page **rpage_arr;
+ struct cpif_page *tmp_page;
+
+ pool = kvzalloc(sizeof(struct cpif_page_pool), GFP_KERNEL);
+ if (unlikely(!pool)) {
+ mif_err("failed to create page pool\n");
+ return NULL;
+ }
+
+ num_page *= 2; /* reserve twice as large of the least required */
+ rpage_arr = kvzalloc(sizeof(struct cpif_page *) * num_page, GFP_KERNEL);
+ if (unlikely(!rpage_arr)) {
+ mif_err("failed to alloc recycling_page_arr\n");
+ goto fail;
+ }
+
+ pool->page_size = page_size;
+ pool->page_order = get_order(page_size);
+
+ mif_info("num_page: %llu page_size: %llu page_order: %llu\n",
+ num_page, page_size, pool->page_order);
+
+ for (i = 0; i < num_page; i++) {
+ struct cpif_page *cur = kvzalloc(sizeof(struct cpif_page), GFP_KERNEL);
+
+ if (unlikely(!cur)) {
+ mif_err("failed to alloc cpif_page\n");
+ goto fail;
+ }
+ cur->page = __dev_alloc_pages(GFP_KERNEL | CPIF_GFP_MASK, pool->page_order);
+ if (unlikely(!cur->page)) {
+ mif_err("failed to get page\n");
+ cur->usable = false;
+ goto fail;
+ }
+ cur->usable = true;
+ cur->offset = 0;
+ rpage_arr[i] = cur;
+ }
+
+ tmp_page = kvzalloc(sizeof(struct cpif_page), GFP_KERNEL);
+ if (unlikely(!tmp_page)) {
+ mif_err("failed to allocate temporary page\n");
+ goto fail;
+ }
+ tmp_page->offset = 0;
+ tmp_page->usable = false;
+
+ pool->recycling_page_arr = rpage_arr;
+ pool->tmp_page = tmp_page;
+ pool->rpage_arr_idx = 0;
+ pool->rpage_arr_len = num_page;
+
+ return pool;
+
+fail:
+ cpif_page_pool_delete(pool);
+ return NULL;
+}
+EXPORT_SYMBOL(cpif_page_pool_create);
+
+struct page *cpif_get_cur_page(struct cpif_page_pool *pool, bool used_tmp_alloc)
+{
+ if (!used_tmp_alloc)
+ return pool->recycling_page_arr[pool->rpage_arr_idx]->page;
+ else
+ return pool->tmp_page->page;
+}
+EXPORT_SYMBOL(cpif_get_cur_page);
+
+u64 cpif_cur_page_size(struct cpif_page_pool *pool, bool used_tmp_alloc)
+{
+ if (!used_tmp_alloc)
+ return pool->page_size;
+ else
+ return pool->tmp_page_size;
+}
+EXPORT_SYMBOL(cpif_cur_page_size);
+
+#define RECYCLING_MAX_TRIAL 100
+static void *cpif_alloc_recycling_page(struct cpif_page_pool *pool, u64 alloc_size)
+{
+ u32 ret;
+ u32 idx = pool->rpage_arr_idx;
+ struct cpif_page *cur = pool->recycling_page_arr[idx];
+ int retry_count = RECYCLING_MAX_TRIAL / (pool->page_order + 1);
+
+ if (cur->offset < 0) { /* this page cannot handle next packet */
+ cur->usable = false;
+ cur->offset = 0;
+try_next_rpage:
+ if (++idx == pool->rpage_arr_len)
+ pool->rpage_arr_idx = 0;
+ else
+ pool->rpage_arr_idx++;
+
+ idx = pool->rpage_arr_idx;
+ cur = pool->recycling_page_arr[idx];
+ }
+
+ if (page_ref_count(cur->page) == 1) { /* no one uses this page */
+ cur->offset = pool->page_size - alloc_size;
+ cur->usable = true;
+ goto assign_page;
+ }
+
+ if (cur->usable == true) /* page is in use, but still has some space left */
+ goto assign_page;
+
+ /* else, the page is not ready to be used, need to see next one */
+ if (retry_count > 0) {
+ retry_count--;
+ goto try_next_rpage;
+ }
+
+ return NULL;
+
+assign_page:
+ ret = cur->offset;
+ cur->offset -= alloc_size;
+ page_ref_inc(cur->page);
+
+ return page_to_virt(cur->page) + ret;
+}
+
+static void *cpif_alloc_tmp_page(struct cpif_page_pool *pool, u64 alloc_size)
+{
+ struct cpif_page *tmp = pool->tmp_page;
+ int ret;
+
+ /* new page is required */
+ if (!tmp->usable) {
+ u64 page_order = pool->page_order;
+ struct page *new_pg = __dev_alloc_pages(GFP_ATOMIC | CPIF_GFP_MASK, page_order);
+
+ if (unlikely(!new_pg)) {
+ if (alloc_size > PAGE_SIZE) {
+ mif_err_limited("cannot alloc page for size: %llu\n",
+ alloc_size);
+ return NULL;
+ }
+ /* try PAGE_SIZE */
+ new_pg = __dev_alloc_pages(GFP_ATOMIC | CPIF_GFP_MASK, 0);
+ if (unlikely(!new_pg)) {
+ mif_err_limited("cannot alloc new page\n");
+ return NULL;
+ }
+ page_order = 0;
+ }
+
+ if (tmp->page) /* unref, or possibly free the page */
+ __free_pages(tmp->page, get_order(pool->tmp_page_size));
+ tmp->page = new_pg;
+ pool->tmp_page_size = PAGE_SIZE * (1 << page_order);
+ pool->using_tmp_alloc = true;
+ tmp->usable = true;
+ tmp->offset = pool->tmp_page_size - alloc_size;
+ }
+
+ ret = tmp->offset;
+ tmp->offset -= alloc_size;
+ page_ref_inc(tmp->page);
+ if (tmp->offset < 0) { /* drained page, let pool try recycle page next time */
+ pool->using_tmp_alloc = false;
+ tmp->usable = false;
+ }
+
+ return page_to_virt(tmp->page) + ret;
+}
+
+void *cpif_page_alloc(struct cpif_page_pool *pool, u64 alloc_size, bool *used_tmp_alloc)
+{
+ void *ret;
+
+ if (alloc_size > pool->page_size) {
+ mif_err_limited("requested size exceeds page size. r_size: %llu p_size: %llu\n",
+ alloc_size, pool->page_size);
+ return NULL;
+ }
+
+ if (!pool->using_tmp_alloc) {
+ ret = cpif_alloc_recycling_page(pool, alloc_size);
+ if (ret) {
+ *used_tmp_alloc = false;
+ goto done;
+ }
+ }
+
+ mif_err_limited("cannot recycle page, alloc new one\n");
+ ret = cpif_alloc_tmp_page(pool, alloc_size);
+ if (!ret) {
+ mif_err_limited("failed to tmp page alloc: return\n");
+ goto done;
+ }
+ *used_tmp_alloc = true;
+
+done:
+ return ret;
+}
+EXPORT_SYMBOL(cpif_page_alloc);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Samsung Page Recycling driver");
diff --git a/cpif_page.h b/cpif_page.h
new file mode 100644
index 0000000..996f616
--- /dev/null
+++ b/cpif_page.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Samsung Electronics.
+ *
+ */
+
+#ifndef __CPIF_RX_PAGE_H__
+#define __CPIF_RX_PAGE_H__
+
+#include "modem_prj.h"
+
+#define CPIF_GFP_MASK (__GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
+
+struct cpif_page {
+ struct page *page;
+ bool usable;
+ int offset;
+};
+
+struct cpif_page_pool {
+ u64 page_order;
+ u64 page_size;
+ u64 tmp_page_size;
+ struct cpif_page **recycling_page_arr;
+ struct cpif_page *tmp_page;
+ u32 rpage_arr_idx;
+ u32 rpage_arr_len;
+ bool using_tmp_alloc;
+};
+
+#if IS_ENABLED(CONFIG_CPIF_PAGE_RECYCLING)
+void cpif_page_pool_delete(struct cpif_page_pool *pool);
+void cpif_page_init_tmp_page(struct cpif_page_pool *pool);
+struct cpif_page_pool *cpif_page_pool_create(u64 num_page, u64 page_size);
+struct page *cpif_get_cur_page(struct cpif_page_pool *pool, bool used_tmp_alloc);
+u64 cpif_cur_page_size(struct cpif_page_pool *pool, bool used_tmp_alloc);
+void *cpif_page_alloc(struct cpif_page_pool *pool, u64 alloc_size, bool *used_tmp_alloc);
+#else
+static inline void cpif_page_pool_delete(struct cpif_page_pool *pool) { return; }
+static inline void cpif_page_init_tmp_page(struct cpif_page_pool *pool) { return; }
+static inline struct cpif_page_pool *cpif_page_pool_create(u64 num_page,
+ u64 page_size) { return NULL; }
+static inline struct page *cpif_get_cur_page(struct cpif_page_pool *pool,
+ bool used_tmp_alloc) { return NULL; }
+static inline u64 cpif_cur_page_size(struct cpif_page_pool *pool, bool used_tmp_alloc)
+ { return 0; }
+static inline void *cpif_page_alloc(struct cpif_page_pool *pool, u64 alloc_size,
+ bool *used_tmp_alloc) { return NULL; }
+#endif
+
+#endif /* __CPIF_RX_PAGE_H__ */
diff --git a/cpif_pcie_shim_exynos.h b/cpif_pcie_shim_exynos.h
new file mode 100644
index 0000000..3cc3e34
--- /dev/null
+++ b/cpif_pcie_shim_exynos.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * cpif shim layer for exynos SOC PCIE
+ *
+ * Copyright 2023, Google LLC
+ *
+ */
+
+#include <linux/exynos-pci-ctrl.h>
+#include <linux/exynos-pci-noti.h>
+
+typedef struct exynos_pcie_notify pcie_notify_t;
+typedef struct exynos_pcie_register_event pcie_register_event_t;
+
+extern int exynos_pcie_register_event(struct exynos_pcie_register_event *reg);
+extern int exynos_pcie_deregister_event(struct exynos_pcie_register_event *reg);
+extern void exynos_pcie_rc_register_dump(int ch_num);
+extern void exynos_pcie_rc_dump_all_status(int ch_num);
+extern void exynos_pcie_rc_print_msi_register(int ch_num);
+extern int exynos_pcie_rc_set_outbound_atu(int ch_num, u32 target_addr, u32 offset, u32 size);
+extern bool exynos_pcie_rc_get_cpl_timeout_state(int ch_num);
+extern void exynos_pcie_rc_set_cpl_timeout_state(int ch_num, bool recovery);
+extern bool exynos_pcie_rc_get_sudden_linkdown_state(int ch_num);
+extern void exynos_pcie_rc_set_sudden_linkdown_state(int ch_num, bool recovery);
+extern void exynos_pcie_rc_force_linkdown_work(int ch_num);
+extern int exynos_pcie_rc_chk_link_status(int ch_num);
+extern int exynos_pcie_rc_l1ss_ctrl(int enable, int id, int ch_num);
+extern int exynos_pcie_poweron(int ch_num, int spd, int width);
+extern int exynos_pcie_poweroff(int ch_num);
+extern int exynos_pcie_get_max_link_speed(int ch_num);
+extern int exynos_pcie_get_max_link_width(int ch_num);
+extern int exynos_pcie_rc_change_link_speed(int ch_num, int target_speed);
+extern void exynos_pcie_set_perst_gpio(int ch_num, bool on);
+extern void exynos_pcie_set_ready_cto_recovery(int ch_num);
+extern int register_separated_msi_vector(int ch_num, irq_handler_t handler,
+ void *context, int *irq_num);
+extern int exynos_pcie_set_msi_ctrl_addr(int num, u64 msi_ctrl_addr);
+
+#define pcie_register_event(event) exynos_pcie_register_event(event)
+#define pcie_deregister_event(event) exynos_pcie_deregister_event(event)
+#define pcie_register_dump(ch) exynos_pcie_rc_register_dump(ch)
+#define pcie_dump_all_status(ch) exynos_pcie_rc_dump_all_status(ch)
+#define pcie_print_rc_msi_register(ch) exynos_pcie_rc_print_msi_register(ch)
+#define pcie_set_outbound_atu(ch, target_addr, offset, size) \
+ exynos_pcie_rc_set_outbound_atu(ch, target_addr, offset, size)
+#define pcie_get_cpl_timeout_state(ch) exynos_pcie_rc_get_cpl_timeout_state(ch)
+#define pcie_set_cpl_timeout_state(ch, recovery) exynos_pcie_rc_set_cpl_timeout_state(ch, recovery)
+#define pcie_get_sudden_linkdown_state(ch) \
+ exynos_pcie_rc_get_sudden_linkdown_state(ch)
+#define pcie_set_sudden_linkdown_state(ch, recovery) \
+ exynos_pcie_rc_set_sudden_linkdown_state(ch, recovery)
+#define pcie_force_linkdown_work(ch) exynos_pcie_rc_force_linkdown_work(ch)
+#define pcie_check_link_status(ch) exynos_pcie_rc_chk_link_status(ch)
+#define pcie_l1ss_ctrl(enable, ch) exynos_pcie_rc_l1ss_ctrl(enable, PCIE_L1SS_CTRL_MODEM_IF, ch)
+#define pcie_poweron(ch, speed, width) exynos_pcie_poweron(ch, speed, width)
+#define pcie_poweroff(ch) exynos_pcie_poweroff(ch)
+#define pcie_get_max_link_speed(ch) exynos_pcie_get_max_link_speed(ch)
+#define pcie_get_max_link_width(ch) exynos_pcie_get_max_link_width(ch)
+#define pcie_change_link_speed(ch, spd) exynos_pcie_rc_change_link_speed(ch, spd)
+#define pcie_set_perst_gpio(ch, on) exynos_pcie_set_perst_gpio(ch, on)
+#define pcie_set_ready_cto_recovery(ch) exynos_pcie_set_ready_cto_recovery(ch)
+#define pcie_register_separated_msi_vector(ch, handler, context, irq) \
+ register_separated_msi_vector(ch, handler, context, irq)
+#define pcie_set_msi_ctrl_addr(num, msi_ctrl_addr) \
+ exynos_pcie_set_msi_ctrl_addr(num, msi_ctrl_addr)
diff --git a/cpif_qos_info.c b/cpif_qos_info.c
new file mode 100644
index 0000000..5d492dc
--- /dev/null
+++ b/cpif_qos_info.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019-2020, Samsung Electronics.
+ *
+ */
+#include <linux/slab.h>
+
+#include "cpif_qos_info.h"
+#include "modem_v1.h"
+
+static struct hiprio_uid_list g_hiprio_uid_list;
+
+struct hiprio_uid_list *cpif_qos_get_list(void)
+{
+ return &g_hiprio_uid_list;
+}
+
+struct hiprio_uid *cpif_qos_get_node(u32 uid)
+{
+ struct hiprio_uid *node;
+
+ hash_for_each_possible(g_hiprio_uid_list.uid_map, node, h_node, uid) {
+ if (node->uid == uid)
+ return node;
+ }
+
+ return NULL;
+}
+
+bool cpif_qos_add_uid(u32 uid)
+{
+ struct hiprio_uid *new_uid;
+
+ if (cpif_qos_get_node(uid)) {
+ mif_err("---- uid(%d) already exists in the list\n", uid);
+ return false;
+ }
+ new_uid = kzalloc(sizeof(struct hiprio_uid), GFP_ATOMIC);
+ if (!new_uid)
+ return false;
+
+ new_uid->uid = uid;
+ hash_add(g_hiprio_uid_list.uid_map, &new_uid->h_node, uid);
+
+ return true;
+}
+
+bool cpif_qos_remove_uid(u32 uid)
+{
+ struct hiprio_uid *node = cpif_qos_get_node(uid);
+
+ if (!node) {
+ mif_err("---- uid(%d) does not exist in the list\n", uid);
+ return false;
+ }
+ hash_del(&node->h_node);
+ kfree(node);
+
+ return true;
+}
+
+/* sysfs */
+static ssize_t hiprio_uid_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct hiprio_uid_list *hiprio_list;
+ struct hiprio_uid *node;
+ ssize_t count = 0;
+ int i = 0;
+
+ hiprio_list = cpif_qos_get_list();
+ if (!hiprio_list) {
+ mif_err("-- hiprio uid list does not exist\n");
+ return -EINVAL;
+ }
+
+ if (hash_empty(hiprio_list->uid_map)) {
+ mif_err("-- there is no hiprio uid\n");
+ return count;
+ }
+
+ mif_info("-- uid list --\n");
+ hash_for_each(hiprio_list->uid_map, i, node, h_node) {
+ count += scnprintf(buf + count, PAGE_SIZE - count, "%d\n", node->uid);
+ mif_info("index %d: %d\n", i, node->uid);
+ }
+
+ return count;
+}
+
+static ssize_t hiprio_uid_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ long uid = 0;
+
+ mif_info("cpif_qos command input: %s\n", buf);
+
+ if (strstr(buf, "add")) {
+ if (kstrtol(buf + 4, 10, &uid)) {
+ mif_err("-- failed to parse uid\n");
+ return -EINVAL;
+ }
+ mif_info("-- user requires addition of uid: %ld\n", uid);
+ if (!cpif_qos_add_uid((u32)uid)) {
+ mif_err("-- Adding uid %ld to hiprio list failed\n", uid);
+ return -EINVAL;
+ }
+ } else if (strstr(buf, "rm")) {
+ if (kstrtol(buf + 3, 10, &uid)) {
+ mif_err("-- failed to parse uid\n");
+ return -EINVAL;
+ }
+ mif_info("-- user requires removal of uid: %ld\n", uid);
+ if (!cpif_qos_remove_uid((u32)uid)) {
+ mif_err("-- Removing uid %ld from hiprio list failed\n", uid);
+ return -EINVAL;
+ }
+ } else {
+ mif_err("-- command not valid\n");
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static struct kobject *cpif_qos_kobject;
+static struct kobj_attribute hiprio_uid_attribute = {
+ .attr = {.name = "hiprio_uid", .mode = 0660},
+ .show = hiprio_uid_show,
+ .store = hiprio_uid_store,
+};
+static struct attribute *cpif_qos_attrs[] = {
+ &hiprio_uid_attribute.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(cpif_qos);
+
+/* Init */
+int cpif_qos_init_list(void)
+{
+ cpif_qos_kobject = kobject_create_and_add("cpif_qos", kernel_kobj);
+ if (!cpif_qos_kobject) {
+ mif_err("kobject_create_and_add() error\n");
+ return -EINVAL;
+ }
+
+ if (sysfs_create_groups(cpif_qos_kobject, cpif_qos_groups)) {
+ mif_err("sysfs_create_groups() error\n");
+ return -EINVAL;
+ }
+
+ hash_init(g_hiprio_uid_list.uid_map);
+
+ return 0;
+}
diff --git a/cpif_qos_info.h b/cpif_qos_info.h
new file mode 100644
index 0000000..7469f18
--- /dev/null
+++ b/cpif_qos_info.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019, Samsung Electronics.
+ *
+ */
+
+#ifndef __CPIF_QOS_INFO_H__
+#define __CPIF_QOS_INFO_H__
+
+#include <linux/types.h>
+#include <linux/hashtable.h>
+
+struct hiprio_uid_list {
+ DECLARE_HASHTABLE(uid_map, 9);
+};
+
+struct hiprio_uid {
+ u32 uid;
+ struct hlist_node h_node;
+};
+
+int cpif_qos_init_list(void);
+struct hiprio_uid *cpif_qos_get_node(u32 uid);
+
+#endif /* __CPIF_QOS_INFO_H__ */
diff --git a/cpif_tp_monitor.c b/cpif_tp_monitor.c
new file mode 100644
index 0000000..aa06da1
--- /dev/null
+++ b/cpif_tp_monitor.c
@@ -0,0 +1,1773 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019-2020, Samsung Electronics.
+ *
+ */
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include "modem_prj.h"
+#include "modem_utils.h"
+#include "modem_ctrl.h"
+#include "link_device_memory.h"
+#include "cpif_tp_monitor.h"
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE)
+#include "s51xx_pcie.h"
+#include <dt-bindings/pci/pci.h>
+#endif
+#if IS_ENABLED(CONFIG_EXYNOS_DIT)
+#include "dit.h"
+#endif
+#if IS_ENABLED(CONFIG_EXYNOS_BTS)
+#include <soc/google/bts.h>
+#endif
+
+static struct cpif_tpmon _tpmon;
+
+/*
+ * Get data
+ */
+/* RX speed */
+static u32 tpmon_get_rx_speed_mbps(struct tpmon_data *data)
+{
+ unsigned long speed = 0;
+
+ if (!data->enable)
+ return 0;
+
+ switch (data->proto) {
+ case TPMON_PROTO_TCP:
+ speed = data->tpmon->rx_tcp.rx_mbps;
+ break;
+ case TPMON_PROTO_UDP:
+ speed = data->tpmon->rx_udp.rx_mbps;
+ break;
+ case TPMON_PROTO_OTHERS:
+ speed = data->tpmon->rx_others.rx_mbps;
+ break;
+ case TPMON_PROTO_ALL:
+ default:
+ speed = data->tpmon->rx_total.rx_mbps;
+ break;
+ }
+
+ return (u32)speed;
+}
+
+static int tpmon_calc_rx_speed_internal(
+ struct cpif_tpmon *tpmon, struct cpif_rx_data *rx_data, bool check_stat)
+{
+ u64 rx_bytes;
+ u64 delta_msec;
+ ktime_t curr_time;
+ unsigned long flags;
+
+ curr_time = ktime_get();
+
+ delta_msec = ktime_ms_delta(curr_time, rx_data->prev_time);
+ if (delta_msec < tpmon->trigger_msec_min)
+ return -EIO;
+
+ rx_data->prev_time = curr_time;
+
+ spin_lock_irqsave(&tpmon->lock, flags);
+ rx_bytes = rx_data->rx_bytes;
+ rx_data->rx_bytes = 0;
+ spin_unlock_irqrestore(&tpmon->lock, flags);
+
+ if (!check_stat && (delta_msec > tpmon->trigger_msec_max)) {
+ rx_data->rx_mbps = 0;
+ return -EIO;
+ }
+
+ rx_data->rx_mbps = rx_bytes * 8 / delta_msec / 1000;
+
+ return 0;
+}
+
+static void tpmon_stat_rx_speed(struct cpif_tpmon *tpmon)
+{
+ tpmon_calc_rx_speed_internal(tpmon, &tpmon->rx_total_stat, true);
+ tpmon_calc_rx_speed_internal(tpmon, &tpmon->rx_tcp_stat, true);
+ tpmon_calc_rx_speed_internal(tpmon, &tpmon->rx_udp_stat, true);
+ tpmon_calc_rx_speed_internal(tpmon, &tpmon->rx_others_stat, true);
+}
+
+static void tpmon_calc_rx_speed(struct cpif_tpmon *tpmon)
+{
+ struct modem_ctl *mc = tpmon->ld->mc;
+ u32 hysteresis = mc->tp_threshold + mc->tp_hysteresis;
+ int spd;
+ int ret = 0;
+
+ ret = tpmon_calc_rx_speed_internal(tpmon, &tpmon->rx_total, false);
+ tpmon_calc_rx_speed_internal(tpmon, &tpmon->rx_tcp, false);
+ tpmon_calc_rx_speed_internal(tpmon, &tpmon->rx_udp, false);
+ tpmon_calc_rx_speed_internal(tpmon, &tpmon->rx_others, false);
+
+ if (tpmon->debug_print && tpmon->rx_total.rx_mbps && !ret) {
+ if (mc->pcie_dynamic_spd_enabled)
+ mif_info("%ldMbps(%ld/%ld/%ld), hysteresis_zone(%d, %d]\n",
+ tpmon->rx_total.rx_mbps, tpmon->rx_tcp.rx_mbps,
+ tpmon->rx_udp.rx_mbps, tpmon->rx_others.rx_mbps,
+ mc->tp_threshold, hysteresis);
+ else
+ mif_info("%ldMbps(%ld/%ld/%ld)\n",
+ tpmon->rx_total.rx_mbps, tpmon->rx_tcp.rx_mbps,
+ tpmon->rx_udp.rx_mbps, tpmon->rx_others.rx_mbps);
+ }
+
+ if (!tpmon_check_active())
+ tpmon_stat_rx_speed(tpmon);
+
+ if (ret || !mc->pcie_dynamic_spd_enabled)
+ return;
+
+ // Skip speed change if it is in hysteresis zone
+ if (tpmon->rx_total.rx_mbps > mc->tp_threshold
+ && tpmon->rx_total.rx_mbps <= hysteresis)
+ return;
+
+ if (tpmon->rx_total.rx_mbps > hysteresis)
+ spd = pcie_get_max_link_speed(mc->pcie_ch_num);
+ else
+ spd = LINK_SPEED_GEN1;
+
+ if (spd != tpmon->current_speed) {
+ mif_info("Change link from GEN%d to GEN%d (rx: %ldMbps)\n",
+ tpmon->current_speed, spd, tpmon->rx_total.rx_mbps);
+ tpmon->current_speed = spd;
+ }
+ pcie_change_link_speed(mc->pcie_ch_num, spd);
+}
+
+/* Queue status */
+static u32 tpmon_get_q_status(struct tpmon_data *data)
+{
+ u32 usage = 0;
+
+ if (!data->enable)
+ return 0;
+
+ switch (data->measure) {
+ case TPMON_MEASURE_NETDEV_Q:
+ usage = data->tpmon->q_status_netdev_backlog;
+ break;
+ case TPMON_MEASURE_PKTPROC_DL_Q:
+ usage = data->tpmon->q_status_pktproc_dl;
+ break;
+ case TPMON_MEASURE_DIT_SRC_Q:
+ usage = data->tpmon->q_status_dit_src;
+ break;
+ default:
+ mif_err_limited("measure %d is not valid\n", data->measure);
+ break;
+ }
+
+ return usage;
+}
+
+static int tpmon_calc_q_status_pktproc_dl(struct cpif_tpmon *tpmon)
+{
+ struct mem_link_device *mld = ld_to_mem_link_device(tpmon->ld);
+ struct pktproc_adaptor *ppa = &mld->pktproc;
+ u32 usage = 0;
+ int i;
+
+ if (!pktproc_check_support(ppa))
+ return 0;
+
+ for (i = 0; i < mld->pktproc.num_queue; i++) {
+ if (!pktproc_check_active(ppa, i))
+ continue;
+
+ if (pktproc_get_usage_fore_rear(ppa->q[i]) > 0)
+ usage += pktproc_get_usage_fore_rear(ppa->q[i]);
+ }
+
+ tpmon->q_status_pktproc_dl = usage;
+
+ return 0;
+}
+
+static int tpmon_calc_q_status_dit_src(struct cpif_tpmon *tpmon)
+{
+ u32 usage = 0;
+
+#if IS_ENABLED(CONFIG_EXYNOS_DIT)
+ int ret = dit_get_src_usage(DIT_DIR_RX, &usage);
+
+ if (ret && (ret != -EPERM)) {
+ mif_err_limited("dit_get_src_usage() error:%d\n", ret);
+ return ret;
+ }
+#endif
+
+ tpmon->q_status_dit_src = usage;
+
+ return 0;
+}
+
+static int tpmon_calc_q_status_netdev_backlog(struct cpif_tpmon *tpmon)
+{
+ struct softnet_data *sd = NULL;
+ u32 usage = 0;
+ int i;
+ int num_cpu;
+
+#if defined(CONFIG_VENDOR_NR_CPUS)
+ num_cpu = CONFIG_VENDOR_NR_CPUS;
+#else
+ num_cpu = 8;
+#endif
+ for (i = 0; i < num_cpu; i++) {
+ sd = &per_cpu(softnet_data, i);
+ if (sd->input_queue_tail > sd->input_queue_head)
+ usage += sd->input_queue_tail - sd->input_queue_head;
+ }
+
+ tpmon->q_status_netdev_backlog = usage;
+
+ return 0;
+}
+
+static void tpmon_calc_q_status(struct cpif_tpmon *tpmon)
+{
+ tpmon_calc_q_status_pktproc_dl(tpmon);
+ tpmon_calc_q_status_dit_src(tpmon);
+ tpmon_calc_q_status_netdev_backlog(tpmon);
+}
+
+/* Inforamtion */
+static void tpmon_print_stat(struct cpif_tpmon *tpmon)
+{
+ mif_info_limited("DL:%ldMbps(%ld/%ld/%ld) Q:%d/%d/%d/%d tcp_rmem:%d/%d/%d\n",
+ tpmon->rx_total_stat.rx_mbps,
+ tpmon->rx_tcp_stat.rx_mbps,
+ tpmon->rx_udp_stat.rx_mbps,
+ tpmon->rx_others_stat.rx_mbps,
+ tpmon->q_status_pktproc_dl,
+ tpmon->q_status_dit_src,
+ tpmon->q_status_netdev_backlog,
+ tpmon->legacy_packet_count,
+ init_net.ipv4.sysctl_tcp_rmem[0], init_net.ipv4.sysctl_tcp_rmem[1],
+ init_net.ipv4.sysctl_tcp_rmem[2]);
+
+ tpmon->legacy_packet_count = 0;
+}
+
+/* Check boost/unboost */
+static bool tpmon_check_to_boost(struct tpmon_data *data)
+{
+ int usage = 0;
+ int i;
+ struct cpif_tpmon *tpmon = data->tpmon;
+ struct tpmon_data *all_data = NULL;
+
+ if (!data->enable)
+ return false;
+
+ if (!data->get_data) {
+ mif_err_limited("get_data is null:%s\n", data->name);
+ return false;
+ }
+
+ list_for_each_entry(all_data, &tpmon->all_data_list, data_node) {
+ if ((data->target == all_data->target) &&
+ (data->measure != all_data->measure) &&
+ (data->curr_level_pos < all_data->curr_level_pos)) {
+ return false;
+ }
+ }
+
+ usage = data->get_data(data);
+ if (usage < 0) {
+ mif_err_limited("get_data(%s) error:%d\n", data->name, usage);
+ return false;
+ }
+
+ for (i = 0; i < data->num_threshold; i++)
+ if (usage < data->threshold[i])
+ break;
+
+ if (i <= data->curr_level_pos)
+ return false;
+
+ if (i >= data->num_level) {
+ mif_err_limited("Invalid level:%s %d %d\n",
+ data->name, i, data->num_level);
+ return false;
+ }
+
+ data->prev_level_pos = data->curr_level_pos;
+ data->curr_level_pos = i;
+
+ data->prev_threshold_pos = data->curr_threshold_pos;
+ if (data->curr_level_pos)
+ data->curr_threshold_pos = data->curr_level_pos - 1;
+ else
+ data->curr_threshold_pos = 0;
+
+ data->need_boost = true;
+
+ mif_info("%s %d->%d (usage:%d unboost@%dMbps)\n",
+ data->name, data->prev_level_pos, data->curr_level_pos,
+ usage, data->unboost_threshold_mbps[data->curr_threshold_pos]);
+
+ return true;
+}
+
+static bool tpmon_check_to_unboost(struct tpmon_data *data)
+{
+ ktime_t curr_time;
+ u64 delta_msec;
+
+ if (!data->enable)
+ return false;
+
+ if (!data->curr_level_pos)
+ return false;
+
+ curr_time = ktime_get();
+ if (!data->prev_unboost_time) {
+ data->prev_unboost_time = curr_time;
+ return false;
+ }
+
+ if ((tpmon_get_rx_speed_mbps(data) >=
+ data->unboost_threshold_mbps[data->curr_threshold_pos])) {
+ data->prev_unboost_time = curr_time;
+ return false;
+ }
+
+ delta_msec = ktime_ms_delta(curr_time, data->prev_unboost_time);
+ if (delta_msec < data->tpmon->boost_hold_msec)
+ return false;
+
+ data->prev_level_pos = data->curr_level_pos;
+ if (data->curr_level_pos > 0)
+ data->curr_level_pos--;
+
+ data->prev_threshold_pos = data->curr_threshold_pos;
+ if (data->curr_threshold_pos > 0)
+ data->curr_threshold_pos--;
+
+ mif_info("%s %d->%d (%ldMbps < %dMbps)\n",
+ data->name, data->prev_level_pos, data->curr_level_pos,
+ data->tpmon->rx_total.rx_mbps,
+ data->unboost_threshold_mbps[data->prev_threshold_pos]);
+
+ data->prev_unboost_time = 0;
+
+ return true;
+}
+
+static void tpmon_get_cpu_per_queue(u32 mask, u32 *q, unsigned int q_num,
+ bool get_mask)
+{
+ u32 cur_mask = mask, bit_pos = 0;
+ bool masks_lt_qnum = false;
+ unsigned int idx = 0;
+
+ if (!mask)
+ return;
+
+ while (cur_mask || idx < q_num) {
+ u32 bit_mask;
+
+ if (!cur_mask) {
+ cur_mask = mask;
+ bit_pos = 0;
+
+ if (idx < q_num)
+ masks_lt_qnum = true;
+ }
+
+ bit_mask = (u32)BIT(bit_pos);
+
+ if (bit_mask & cur_mask) {
+ cur_mask &= ~bit_mask;
+ if (get_mask)
+ q[idx % q_num] |= bit_mask;
+ else
+ q[idx % q_num] = bit_pos;
+ idx++;
+ }
+
+ if (masks_lt_qnum && idx == q_num)
+ cur_mask = 0;
+
+ bit_pos++;
+ }
+}
+
+/*
+ * Target
+ */
+/* RPS */
+#if IS_ENABLED(CONFIG_RPS)
+/* From net/core/net-sysfs.c */
+static ssize_t tpmon_store_rps_map(struct netdev_rx_queue *queue,
+ const char *buf, ssize_t len)
+{
+ struct rps_map *old_map, *map;
+ cpumask_var_t mask;
+ int err, cpu, i;
+ static DEFINE_MUTEX(rps_map_mutex);
+
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
+ if (err) {
+ free_cpumask_var(mask);
+ return err;
+ }
+
+ map = kzalloc(max_t(unsigned int,
+ RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
+ GFP_KERNEL);
+ if (!map) {
+ free_cpumask_var(mask);
+ return -ENOMEM;
+ }
+
+ i = 0;
+ for_each_cpu_and(cpu, mask, cpu_online_mask)
+ map->cpus[i++] = cpu;
+
+ if (i) {
+ map->len = i;
+ } else {
+ kfree(map);
+ map = NULL;
+ }
+
+ mutex_lock(&rps_map_mutex);
+ old_map = rcu_dereference_protected(queue->rps_map,
+ mutex_is_locked(&rps_map_mutex));
+ rcu_assign_pointer(queue->rps_map, map);
+
+ if (map)
+ static_branch_inc(&rps_needed);
+ if (old_map)
+ static_branch_dec(&rps_needed);
+
+ mutex_unlock(&rps_map_mutex);
+
+ if (old_map)
+ kfree_rcu(old_map, rcu);
+
+ free_cpumask_var(mask);
+ return len;
+}
+
+static void tpmon_set_rps(struct tpmon_data *data)
+{
+#if IS_ENABLED(CONFIG_CP_PKTPROC)
+ struct mem_link_device *mld = container_of(data->tpmon->ld,
+ struct mem_link_device, link_dev);
+ struct pktproc_adaptor *ppa = &mld->pktproc;
+#endif
+ struct io_device *iod;
+ unsigned int num_queue = 1;
+ unsigned int i, len;
+ u32 val, *rxq_mask;
+
+ if (!data->enable)
+ return;
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC)
+ if (ppa->use_exclusive_irq)
+ num_queue = ppa->num_queue;
+#endif
+
+ rxq_mask = kzalloc(sizeof(u32) * num_queue, GFP_KERNEL);
+ if (!rxq_mask)
+ return;
+
+ val = tpmon_get_curr_level(data);
+ tpmon_get_cpu_per_queue(val, rxq_mask, num_queue, true);
+
+ list_for_each_entry(iod, &data->tpmon->net_node_list, node_all_ndev) {
+ char mask[MAX_RPS_STRING] = {};
+ unsigned long flags;
+ int ret;
+
+ if (!iod->name)
+ continue;
+
+ if (!iod->ndev)
+ continue;
+
+ for (i = 0; i < num_queue; i++) {
+ len = scnprintf(mask, MAX_RPS_STRING, "%x",
+ rxq_mask[i]);
+
+ ret = (int)tpmon_store_rps_map(&iod->ndev->_rx[i],
+ mask, len);
+ if (ret < 0) {
+ mif_err("tpmon_store_rps_map() error:%d\n", ret);
+ goto out;
+ }
+ }
+
+ spin_lock_irqsave(&iod->clat_lock, flags);
+ if (!iod->clat_ndev) {
+ spin_unlock_irqrestore(&iod->clat_lock, flags);
+ continue;
+ }
+
+ dev_hold(iod->clat_ndev);
+ spin_unlock_irqrestore(&iod->clat_lock, flags);
+
+ len = scnprintf(mask, MAX_RPS_STRING, "%x", val);
+ ret = (int)tpmon_store_rps_map(&(iod->clat_ndev->_rx[0]),
+ mask, len);
+ dev_put(iod->clat_ndev);
+
+ if (ret < 0) {
+ mif_err("tpmon_store_rps_map() clat error:%d\n", ret);
+ break;
+ }
+ }
+
+ for (i = 0; i < num_queue; i++)
+ mif_info("%s (rxq[%u] mask:0x%02x)\n", data->name, i, rxq_mask[i]);
+
+out:
+ kfree(rxq_mask);
+}
+#endif
+
+/* GRO flush timeout */
+static void tpmon_set_gro(struct tpmon_data *data)
+{
+ struct mem_link_device *mld = container_of(data->tpmon->ld,
+ struct mem_link_device, link_dev);
+ long timeout;
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC)
+ struct pktproc_adaptor *ppa = &mld->pktproc;
+ int i;
+#endif
+
+ if (!data->enable)
+ return;
+
+ timeout = tpmon_get_curr_level(data);
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC)
+ if (ppa->use_exclusive_irq) {
+ for (i = 0; i < ppa->num_queue; i++) {
+ struct pktproc_queue *q = ppa->q[i];
+
+ q->netdev.gro_flush_timeout = timeout;
+ }
+ } else {
+ mld->dummy_net.gro_flush_timeout = timeout;
+ }
+#else
+ mld->dummy_net.gro_flush_timeout = timeout;
+#endif
+
+#if IS_ENABLED(CONFIG_EXYNOS_DIT)
+ if (dit_get_netdev()) {
+ dit_get_netdev()->gro_flush_timeout = timeout;
+ mld->dummy_net.gro_flush_timeout = 0;
+ }
+#endif
+
+ mif_info("%s (flush timeout:%ld)\n", data->name, timeout);
+}
+
+/* IRQ affinity */
+#if IS_ENABLED(CONFIG_MCU_IPC)
+static void tpmon_set_irq_affinity_mbox(struct tpmon_data *data)
+{
+#if IS_ENABLED(CONFIG_CP_PKTPROC)
+ struct mem_link_device *mld = ld_to_mem_link_device(data->tpmon->ld);
+ struct pktproc_adaptor *ppa = &mld->pktproc;
+#endif
+ unsigned int num_queue = 1;
+ unsigned int i;
+ u32 val, *q_cpu;
+
+ if (!data->enable)
+ return;
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC)
+ if (ppa->use_exclusive_irq)
+ num_queue = ppa->num_queue;
+#endif
+
+ q_cpu = kzalloc(sizeof(u32) * num_queue, GFP_KERNEL);
+ if (!q_cpu)
+ return;
+ }
+
+ val = tpmon_get_curr_level(data);
+ tpmon_get_cpu_per_queue(val, q_cpu, num_queue, false);
+
+ for (i = 0; i < num_queue; i++) {
+ int irq_idx = data->extra_idx;
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC)
+ if (ppa->use_exclusive_irq)
+ irq_idx = ppa->q[i]->irq_idx;
+#endif
+
+ if (cp_mbox_get_affinity(irq_idx) == q_cpu[i]) {
+ mif_info("skip to set same cpu_num for %s (CPU:%u)\n",
+ data->name, q_cpu[i]);
+ continue;
+ }
+
+ mif_info("%s (CPU:%u)\n", data->name, val);
+ cp_mbox_set_affinity(irq_idx, q_cpu[i]);
+ }
+
+ kfree(q_cpu);
+}
+#endif
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE)
+static void tpmon_set_irq_affinity_pcie(struct tpmon_data *data)
+{
+ struct mem_link_device *mld = ld_to_mem_link_device(data->tpmon->ld);
+ struct modem_ctl *mc = data->tpmon->ld->mc;
+#if IS_ENABLED(CONFIG_CP_PKTPROC)
+ struct pktproc_adaptor *ppa = &mld->pktproc;
+ unsigned int num_queue = 1;
+ unsigned int i;
+ u32 val, *q_cpu;
+#endif
+
+ if (!data->enable)
+ return;
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC)
+ if (ppa->use_exclusive_irq)
+ num_queue = ppa->num_queue;
+
+ q_cpu = kzalloc(sizeof(u32) * num_queue, GFP_KERNEL);
+ if (!q_cpu)
+ return;
+
+ val = tpmon_get_curr_level(data);
+ tpmon_get_cpu_per_queue(val, q_cpu, num_queue, false);
+
+ for (i = 0; i < num_queue; i++) {
+ if (!ppa->q[i]->irq)
+ break;
+
+ if (!ppa->use_exclusive_irq)
+ q_cpu[i] = data->extra_idx;
+
+ mif_info("%s (q[%u] cpu:%u)\n", data->name, i, q_cpu[i]);
+ mld->msi_irq_q_cpu[i] = q_cpu[i];
+ }
+
+ kfree(q_cpu);
+#endif
+
+ /* The affinity of msi_irq_base is fixed, use the extra_idx */
+ mld->msi_irq_base_cpu = data->extra_idx;
+ s5100_set_pcie_irq_affinity(mc);
+}
+#endif
+
+#if IS_ENABLED(CONFIG_EXYNOS_DIT)
+static void tpmon_set_irq_affinity_dit(struct tpmon_data *data)
+{
+ u32 val, cpu[1];
+
+ if (!data->enable)
+ return;
+
+ val = tpmon_get_curr_level(data);
+ tpmon_get_cpu_per_queue(val, cpu, 1, false);
+
+ if (dit_get_irq_affinity() == cpu[0]) {
+ mif_info("skip to set same cpu_num for %s (CPU:%u)\n",
+ data->name, cpu[0]);
+ return;
+ }
+
+ mif_info("%s (CPU:%u)\n", data->name, cpu[0]);
+ dit_set_irq_affinity(cpu[0]);
+}
+#endif
+
+/* Frequency */
+#if IS_ENABLED(CONFIG_EXYNOS_PM_QOS)
+static void tpmon_set_exynos_pm_qos(struct tpmon_data *data)
+{
+ u32 val;
+
+ if (!data->enable)
+ return;
+
+ if (!data->extra_data)
+ return;
+
+ val = tpmon_get_curr_level(data);
+
+ mif_info("%s (freq:%d)\n", data->name, val);
+
+ exynos_pm_qos_update_request(data->extra_data, val);
+}
+#endif
+
+#if IS_ENABLED(CONFIG_CPU_FREQ)
+static void tpmon_set_cpu_freq(struct tpmon_data *data)
+{
+ u32 val;
+
+ if (!data->enable)
+ return;
+
+ if (!data->extra_data)
+ return;
+
+ val = tpmon_get_curr_level(data);
+
+ mif_info("%s (freq:%d)\n", data->name, val);
+
+ freq_qos_update_request((struct freq_qos_request *)data->extra_data, val);
+}
+
+static int tpmon_cpufreq_nb(struct notifier_block *nb,
+ unsigned long event, void *arg)
+{
+ struct cpufreq_policy *policy = arg;
+ struct cpif_tpmon *tpmon = &_tpmon;
+ struct tpmon_data *data;
+
+ if (event != CPUFREQ_CREATE_POLICY)
+ return NOTIFY_OK;
+
+ list_for_each_entry(data, &tpmon->all_data_list, data_node) {
+ switch (data->target) {
+ case TPMON_TARGET_CPU_CL0:
+ case TPMON_TARGET_CPU_CL1:
+ case TPMON_TARGET_CPU_CL2:
+ if (policy->cpu == data->extra_idx) {
+ mif_info("freq_qos_add_request for cpu%d min\n", policy->cpu);
+#if IS_ENABLED(CONFIG_ARM_FREQ_QOS_TRACER)
+ freq_qos_tracer_add_request(&policy->constraints,
+ data->extra_data, FREQ_QOS_MIN, PM_QOS_DEFAULT_VALUE);
+#else
+ freq_qos_add_request(&policy->constraints,
+ data->extra_data, FREQ_QOS_MIN, PM_QOS_DEFAULT_VALUE);
+#endif
+ }
+ break;
+ case TPMON_TARGET_CPU_CL0_MAX:
+ case TPMON_TARGET_CPU_CL1_MAX:
+ case TPMON_TARGET_CPU_CL2_MAX:
+ if (policy->cpu == data->extra_idx) {
+ mif_info("freq_qos_add_request for cpu%d max\n", policy->cpu);
+#if IS_ENABLED(CONFIG_ARM_FREQ_QOS_TRACER)
+ freq_qos_tracer_add_request(&policy->constraints,
+ data->extra_data, FREQ_QOS_MAX, PM_QOS_DEFAULT_VALUE);
+#else
+ freq_qos_add_request(&policy->constraints,
+ data->extra_data, FREQ_QOS_MAX, PM_QOS_DEFAULT_VALUE);
+#endif
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return NOTIFY_OK;
+}
+#endif
+
+/* PCIe power */
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE)
+static void tpmon_set_pci_low_power(struct tpmon_data *data)
+{
+ struct modem_ctl *mc = data->tpmon->ld->mc;
+ u32 val;
+
+ if (!data->enable)
+ return;
+
+ mutex_lock(&mc->pcie_check_lock);
+ if (!mc->pcie_powered_on || s51xx_check_pcie_link_status(mc->pcie_ch_num) == 0)
+ goto out;
+
+ val = tpmon_get_curr_level(data);
+ mif_info("%s (enable:%u)\n", data->name, val);
+ s51xx_pcie_l1ss_ctrl((int)val, mc->pcie_ch_num);
+
+out:
+ mutex_unlock(&mc->pcie_check_lock);
+}
+#endif
+
+/* Bus */
+#if IS_ENABLED(CONFIG_EXYNOS_BTS)
+static void tpmon_set_bts(struct tpmon_data *data)
+{
+ u32 val;
+
+ if (!data->enable)
+ return;
+
+ val = tpmon_get_curr_level(data);
+
+ mif_info("%s (val:%d)\n", data->name, val);
+
+ if (val)
+ bts_add_scenario(data->tpmon->bts_scen_index);
+ else
+ bts_del_scenario(data->tpmon->bts_scen_index);
+}
+#endif
+
+/*
+ * Work
+ */
+/* Monitor work */
+static void tpmon_monitor_work(struct work_struct *ws)
+{
+ struct cpif_tpmon *tpmon = container_of(ws,
+ struct cpif_tpmon, monitor_dwork.work);
+ struct tpmon_data *data;
+ ktime_t curr_time;
+ u64 delta_msec;
+
+ if (tpmon_check_active()) {
+ tpmon_stat_rx_speed(tpmon);
+ tpmon_calc_q_status(tpmon);
+ tpmon_print_stat(tpmon);
+ }
+
+ if (tpmon->use_user_level)
+ goto run_again;
+
+ list_for_each_entry(data, &tpmon->all_data_list, data_node) {
+ if (atomic_read(&tpmon->need_init)) {
+ data->set_data(data);
+ continue;
+ }
+
+ if (tpmon_check_to_unboost(data))
+ data->set_data(data);
+ }
+
+ if (atomic_read(&tpmon->need_init)) {
+ atomic_set(&tpmon->need_init, 0);
+ return;
+ }
+
+ curr_time = ktime_get();
+ if (!tpmon->prev_monitor_time)
+ tpmon->prev_monitor_time = curr_time;
+
+ if (tpmon->rx_total_stat.rx_mbps >= tpmon->monitor_stop_mbps) {
+ tpmon->prev_monitor_time = 0;
+ goto run_again;
+ }
+
+ delta_msec = ktime_ms_delta(curr_time, tpmon->prev_monitor_time);
+ if (delta_msec < tpmon->monitor_hold_msec)
+ goto run_again;
+
+ if (tpmon_check_active())
+ tpmon_stop();
+
+ mif_info("monitor is stopped\n");
+
+ return;
+
+run_again:
+ queue_delayed_work(tpmon->monitor_wq, &tpmon->monitor_dwork,
+ msecs_to_jiffies(tpmon->monitor_interval_msec));
+}
+
+/* Boost work */
+static void tpmon_boost_work(struct work_struct *ws)
+{
+ struct cpif_tpmon *tpmon = container_of(ws,
+ struct cpif_tpmon, boost_dwork.work);
+ struct tpmon_data *data;
+
+ list_for_each_entry(data, &tpmon->all_data_list, data_node) {
+ if (!data->set_data) {
+ mif_err_limited("set_data is null:%s\n", data->name);
+ continue;
+ }
+
+ if (data->need_boost) {
+ mif_info("set data name:%s\n", data->name);
+ data->set_data(data);
+ data->need_boost = false;
+ }
+ }
+
+ if (!tpmon_check_active()) {
+ mif_info("start monitor\n");
+ atomic_set(&tpmon->active, 1);
+ queue_delayed_work(tpmon->monitor_wq, &tpmon->monitor_dwork, 0);
+ }
+}
+
+/*
+ * Control
+ */
+void tpmon_add_rx_bytes(struct sk_buff *skb)
+{
+ struct cpif_tpmon *tpmon = &_tpmon;
+ u16 proto = 0;
+ unsigned long flags;
+
+ switch (ip_hdr(skb)->version) {
+ case 4:
+ proto = ip_hdr(skb)->protocol;
+ break;
+ case 6:
+ proto = ipv6_hdr(skb)->nexthdr;
+ if (proto == IPPROTO_FRAGMENT)
+ proto = skb->data[sizeof(struct ipv6hdr)];
+ break;
+ default:
+ mif_err_limited("Non IPv4/IPv6 packet:0x%x\n",
+ ip_hdr(skb)->version);
+ break;
+ }
+
+ spin_lock_irqsave(&tpmon->lock, flags);
+ tpmon->rx_total.rx_bytes += skb->len;
+ tpmon->rx_total_stat.rx_bytes += skb->len;
+ switch (proto) {
+ case IPPROTO_TCP:
+ tpmon->rx_tcp.rx_bytes += skb->len;
+ tpmon->rx_tcp_stat.rx_bytes += skb->len;
+ break;
+ case IPPROTO_UDP:
+ tpmon->rx_udp.rx_bytes += skb->len;
+ tpmon->rx_udp_stat.rx_bytes += skb->len;
+ break;
+ default:
+ tpmon->rx_others.rx_bytes += skb->len;
+ tpmon->rx_others_stat.rx_bytes += skb->len;
+ break;
+ }
+ spin_unlock_irqrestore(&tpmon->lock, flags);
+}
+EXPORT_SYMBOL(tpmon_add_rx_bytes);
+
+void tpmon_add_legacy_packet_count(u32 count)
+{
+ struct cpif_tpmon *tpmon = &_tpmon;
+
+ tpmon->legacy_packet_count += count;
+}
+EXPORT_SYMBOL(tpmon_add_legacy_packet_count);
+
+void tpmon_add_net_node(struct list_head *node)
+{
+ struct cpif_tpmon *tpmon = &_tpmon;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tpmon->lock, flags);
+
+ list_add_tail(node, &tpmon->net_node_list);
+
+ spin_unlock_irqrestore(&tpmon->lock, flags);
+}
+EXPORT_SYMBOL(tpmon_add_net_node);
+
+void tpmon_reset_data(char *name)
+{
+ struct cpif_tpmon *tpmon = &_tpmon;
+ struct tpmon_data *data;
+
+ list_for_each_entry(data, &tpmon->all_data_list, data_node) {
+ if (strncmp(data->name, name, strlen(name)) == 0) {
+ data->set_data(data);
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(tpmon_reset_data);
+
+/* Init */
+static int tpmon_init_params(struct cpif_tpmon *tpmon)
+{
+ struct tpmon_data *data;
+
+ memset(&tpmon->rx_total, 0, sizeof(struct cpif_rx_data));
+ memset(&tpmon->rx_tcp, 0, sizeof(struct cpif_rx_data));
+ memset(&tpmon->rx_udp, 0, sizeof(struct cpif_rx_data));
+ memset(&tpmon->rx_others, 0, sizeof(struct cpif_rx_data));
+
+ memset(&tpmon->rx_total_stat, 0, sizeof(struct cpif_rx_data));
+ memset(&tpmon->rx_tcp_stat, 0, sizeof(struct cpif_rx_data));
+ memset(&tpmon->rx_udp_stat, 0, sizeof(struct cpif_rx_data));
+ memset(&tpmon->rx_others_stat, 0, sizeof(struct cpif_rx_data));
+
+ tpmon->q_status_pktproc_dl = 0;
+ tpmon->q_status_netdev_backlog = 0;
+ tpmon->q_status_dit_src = 0;
+ tpmon->legacy_packet_count = 0;
+ tpmon->current_speed = LINK_SPEED_GEN1;
+
+ tpmon->prev_monitor_time = 0;
+
+ list_for_each_entry(data, &tpmon->all_data_list, data_node) {
+ data->curr_threshold_pos = 0;
+ data->prev_threshold_pos = 0;
+ data->curr_level_pos = 0;
+ data->prev_level_pos = 0;
+ data->prev_unboost_time = 0;
+ data->need_boost = false;
+ }
+
+ return 0;
+}
+
+static void tpmon_check_q_status(struct cpif_tpmon *tpmon)
+{
+ struct tpmon_data *data;
+ bool run_work = false;
+
+ list_for_each_entry(data, &tpmon->q_status_list, q_status_node) {
+ if (data->need_boost)
+ continue;
+
+ if (!tpmon_check_to_boost(data))
+ continue;
+
+ mif_debug("need to run work for %s\n", data->name);
+ run_work = true;
+ }
+
+ if (run_work)
+ queue_delayed_work(tpmon->boost_wq, &tpmon->boost_dwork, 0);
+}
+
+static void tpmon_check_tp_status(struct cpif_tpmon *tpmon)
+{
+ struct tpmon_data *data;
+ bool run_work = false;
+
+ list_for_each_entry(data, &tpmon->tp_node_list, tp_node) {
+ if (data->need_boost)
+ continue;
+
+ if (!tpmon_check_to_boost(data))
+ continue;
+
+ mif_debug("need to run work for %s\n", data->name);
+ run_work = true;
+ }
+
+ if (run_work)
+ queue_delayed_work(tpmon->boost_wq, &tpmon->boost_dwork, 0);
+}
+
+int tpmon_start(void)
+{
+ struct cpif_tpmon *tpmon = &_tpmon;
+
+ if (tpmon->use_user_level)
+ return 0;
+
+ tpmon_calc_q_status(tpmon);
+ tpmon_check_q_status(tpmon);
+
+ tpmon_calc_rx_speed(tpmon);
+ tpmon_check_tp_status(tpmon);
+
+ return 0;
+}
+EXPORT_SYMBOL(tpmon_start);
+
+int tpmon_stop(void)
+{
+ struct cpif_tpmon *tpmon = &_tpmon;
+ struct tpmon_data *data;
+
+ if (!tpmon_check_active())
+ return 0;
+
+ cancel_delayed_work(&tpmon->boost_dwork);
+ cancel_delayed_work(&tpmon->monitor_dwork);
+
+ atomic_set(&tpmon->active, 0);
+
+ list_for_each_entry(data, &tpmon->all_data_list, data_node) {
+ if (data->curr_level_pos != 0) {
+ atomic_set(&tpmon->need_init, 1);
+ break;
+ }
+ }
+
+ tpmon_init_params(tpmon);
+
+ if (atomic_read(&tpmon->need_init))
+ queue_delayed_work(tpmon->monitor_wq, &tpmon->monitor_dwork, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL(tpmon_stop);
+
+int tpmon_init(void)
+{
+ struct cpif_tpmon *tpmon = &_tpmon;
+
+ if (tpmon->use_user_level) {
+ mif_info("enable use_user_level again if you want to set user level\n");
+ tpmon->use_user_level = 0;
+ }
+
+ if (tpmon_check_active())
+ tpmon_stop();
+
+ tpmon_init_params(tpmon);
+
+ mif_info("set initial level\n");
+ atomic_set(&tpmon->need_init, 1);
+ queue_delayed_work(tpmon->monitor_wq, &tpmon->monitor_dwork, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL(tpmon_init);
+
+int tpmon_check_active(void)
+{
+ struct cpif_tpmon *tpmon = &_tpmon;
+
+ return atomic_read(&tpmon->active);
+}
+EXPORT_SYMBOL(tpmon_check_active);
+
+/*
+ * sysfs
+ */
+static ssize_t dt_level_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cpif_tpmon *tpmon = &_tpmon;
+ struct tpmon_data *data;
+ ssize_t len = 0;
+ int i = 0;
+
+ list_for_each_entry(data, &tpmon->all_data_list, data_node) {
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%s threshold: ", data->name);
+
+ for (i = 0; i < data->num_threshold; i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%d ", data->threshold[i]);
+
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%s level: ", data->name);
+
+ for (i = 0; i < data->num_level; i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "0x%x(%d) ",
+ data->level[i], data->level[i]);
+
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n\n");
+ }
+
+ return len;
+}
+static DEVICE_ATTR_RO(dt_level);
+
+static ssize_t curr_level_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cpif_tpmon *tpmon = &_tpmon;
+ struct tpmon_data *data;
+ ssize_t len = 0;
+ int i;
+
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "trigger min:%dmsec max:%dmsec\n",
+ tpmon->trigger_msec_min, tpmon->trigger_msec_max);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "monitor interval:%dmsec hold:%dmsec stop:%dMbps\n",
+ tpmon->monitor_interval_msec,
+ tpmon->monitor_hold_msec,
+ tpmon->monitor_stop_mbps);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "boost hold:%dmsec\n", tpmon->boost_hold_msec);
+
+ list_for_each_entry(data, &tpmon->all_data_list, data_node) {
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "name:%s measure:%d target:%d enable:%d extra_idx:%d proto:%d\n",
+ data->name, data->measure,
+ data->target, data->enable, data->extra_idx, data->proto);
+
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "num_threshold:%d\n", data->num_threshold);
+ for (i = 0; i < data->num_threshold; i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%d(unboost@%dMbps) ",
+ data->threshold[i], data->unboost_threshold_mbps[i]);
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "num_level:%d\n", data->num_level);
+ for (i = 0; i < data->num_level; i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%d ", data->level[i]);
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "curr_level_pos:%d user_level:%d\n",
+ data->curr_level_pos, data->user_level);
+ }
+
+ return len;
+}
+static DEVICE_ATTR_RO(curr_level);
+
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cpif_tpmon *tpmon = &_tpmon;
+ struct tpmon_data *data;
+ ssize_t len = 0;
+ u32 val = 0;
+
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "rx_total %ldbytes %ldMbps\n",
+ tpmon->rx_total.rx_bytes, tpmon->rx_total.rx_mbps);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "rx_tcp %ldbytes %ldMbps\n",
+ tpmon->rx_tcp.rx_bytes, tpmon->rx_tcp.rx_mbps);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "rx_udp %ldbytes %ldMbps\n",
+ tpmon->rx_udp.rx_bytes, tpmon->rx_udp.rx_mbps);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "rx_others %ldbytes %ldMbps\n",
+ tpmon->rx_others.rx_bytes, tpmon->rx_others.rx_mbps);
+
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "queue status pktproc:%d dit:%d netdev:%d legacy:%d\n",
+ tpmon->q_status_pktproc_dl,
+ tpmon->q_status_dit_src,
+ tpmon->q_status_netdev_backlog,
+ tpmon->legacy_packet_count);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "use_user_level:%d debug_print:%d\n",
+ tpmon->use_user_level,
+ tpmon->debug_print);
+
+ list_for_each_entry(data, &tpmon->all_data_list, data_node) {
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%s: enable:%d",
+ data->name, data->enable);
+
+ if (!data->enable) {
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+ continue;
+ }
+
+ val = tpmon_get_curr_level(data);
+ len += scnprintf(buf + len, PAGE_SIZE - len, " val:%d(0x%x)",
+ val, val);
+
+ if (tpmon->use_user_level)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+ else
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ " pos:%d unboost@%dMbps\n",
+ data->curr_level_pos,
+ data->curr_level_pos ?
+ data->unboost_threshold_mbps[data->curr_threshold_pos] : 0);
+ }
+
+ return len;
+}
+static DEVICE_ATTR_RO(status);
+
+static ssize_t use_user_level_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cpif_tpmon *tpmon = &_tpmon;
+
+ return sysfs_emit(buf, "use_user_level:%d\n", tpmon->use_user_level);
+}
+
+static ssize_t use_user_level_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct cpif_tpmon *tpmon = &_tpmon;
+ struct tpmon_data *data;
+ int ret;
+ int level;
+
+ ret = kstrtoint(buf, 0, &level);
+ if (ret != 0) {
+ mif_err("invalid level:%d with %d\n", level, ret);
+ return -EINVAL;
+ }
+
+ tpmon->use_user_level = level;
+ mif_info("use_user_level:%d\n", tpmon->use_user_level);
+
+ list_for_each_entry(data, &tpmon->all_data_list, data_node) {
+ if (data->set_data) {
+ data->user_level = data->level[data->curr_level_pos];
+ }
+ }
+
+ return count;
+}
+static DEVICE_ATTR_RW(use_user_level);
+
+static ssize_t debug_print_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cpif_tpmon *tpmon = &_tpmon;
+
+ return sysfs_emit(buf, "debug pring enable:%d\n", tpmon->debug_print);
+}
+
+static ssize_t debug_print_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct cpif_tpmon *tpmon = &_tpmon;
+ int ret;
+ int level;
+
+ ret = kstrtoint(buf, 0, &level);
+ if (ret != 0) {
+ mif_err("invalid level:%d with %d\n", level, ret);
+ return -EINVAL;
+ }
+
+ tpmon->debug_print = level;
+
+ mif_info("debug pring enable:%d\n", tpmon->debug_print);
+
+ return count;
+}
+static DEVICE_ATTR_RW(debug_print);
+
+static ssize_t set_user_level_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct cpif_tpmon *tpmon = &_tpmon;
+ struct tpmon_data *data;
+ char name[20];
+ int level;
+ int ret;
+
+ if (!tpmon->use_user_level) {
+ mif_info("use_user_level is not set\n");
+ return count;
+ }
+
+ ret = sscanf(buf, "%19s %i", name, &level);
+ if (ret < 1)
+ return -EINVAL;
+
+ mif_info("Change %s to %d(0x%x)\n", name, level, level);
+
+ list_for_each_entry(data, &tpmon->all_data_list, data_node) {
+ if (strcmp(data->name, name) == 0) {
+ data->user_level = level;
+ data->set_data(data);
+ }
+ }
+
+ return count;
+}
+static DEVICE_ATTR_WO(set_user_level);
+
+static struct attribute *tpmon_attrs[] = {
+ &dev_attr_dt_level.attr,
+ &dev_attr_curr_level.attr,
+ &dev_attr_status.attr,
+ &dev_attr_use_user_level.attr,
+ &dev_attr_debug_print.attr,
+ &dev_attr_set_user_level.attr,
+ NULL,
+};
+
+static const struct attribute_group tpmon_group = {
+ .attrs = tpmon_attrs,
+ .name = "tpmon",
+};
+
+/*
+ * Init
+ */
+static int tpmon_set_cpufreq(struct tpmon_data *data)
+{
+#if IS_ENABLED(CONFIG_CPU_FREQ)
+ struct cpif_tpmon *tpmon = data->tpmon;
+ struct cpufreq_policy *policy;
+ int qos_type;
+
+ switch (data->target) {
+ case TPMON_TARGET_CPU_CL0:
+ data->extra_data = (void *)&tpmon->qos_req_cpu_cl0;
+ data->set_data = tpmon_set_cpu_freq;
+ qos_type = FREQ_QOS_MIN;
+ break;
+ case TPMON_TARGET_CPU_CL0_MAX:
+ data->extra_data = (void *)&tpmon->qos_req_cpu_cl0_max;
+ data->set_data = tpmon_set_cpu_freq;
+ qos_type = FREQ_QOS_MAX;
+ break;
+ case TPMON_TARGET_CPU_CL1:
+ data->extra_data = (void *)&tpmon->qos_req_cpu_cl1;
+ data->set_data = tpmon_set_cpu_freq;
+ qos_type = FREQ_QOS_MIN;
+ break;
+ case TPMON_TARGET_CPU_CL1_MAX:
+ data->extra_data = (void *)&tpmon->qos_req_cpu_cl1_max;
+ data->set_data = tpmon_set_cpu_freq;
+ qos_type = FREQ_QOS_MAX;
+ break;
+ case TPMON_TARGET_CPU_CL2:
+ data->extra_data = (void *)&tpmon->qos_req_cpu_cl2;
+ data->set_data = tpmon_set_cpu_freq;
+ qos_type = FREQ_QOS_MIN;
+ break;
+ case TPMON_TARGET_CPU_CL2_MAX:
+ data->extra_data = (void *)&tpmon->qos_req_cpu_cl2_max;
+ data->set_data = tpmon_set_cpu_freq;
+ qos_type = FREQ_QOS_MAX;
+ break;
+ default:
+ mif_err_limited("no target\n");
+ return -EINVAL;
+ }
+
+ if (tpmon->cpufreq_nb.notifier_call) {
+ mif_info("notifier_call is registered\n");
+ return 0;
+ }
+
+ policy = cpufreq_cpu_get(data->extra_idx);
+ if (!policy) {
+ mif_err_limited("cpufreq_cpu_get() error\n");
+ return -EINVAL;
+ }
+
+ if (policy->cpu == data->extra_idx) {
+ mif_info("freq_qos_add_request for cpu%d %d\n",
+ policy->cpu, qos_type);
+#if IS_ENABLED(CONFIG_ARM_FREQ_QOS_TRACER)
+ freq_qos_tracer_add_request(&policy->constraints,
+ data->extra_data, qos_type, PM_QOS_DEFAULT_VALUE);
+#else
+ freq_qos_add_request(&policy->constraints,
+ data->extra_data, qos_type, PM_QOS_DEFAULT_VALUE);
+#endif
+ }
+#endif /* CONFIG_CPU_FREQ */
+
+ return 0;
+}
+
+static int tpmon_set_target(struct tpmon_data *data)
+{
+#if IS_ENABLED(CONFIG_EXYNOS_PM_QOS)
+ struct cpif_tpmon *tpmon = data->tpmon;
+#endif
+ int ret = 0;
+
+ switch (data->target) {
+#if IS_ENABLED(CONFIG_RPS)
+ case TPMON_TARGET_RPS:
+ data->set_data = tpmon_set_rps;
+ break;
+#endif
+
+ case TPMON_TARGET_GRO:
+ data->set_data = tpmon_set_gro;
+ break;
+
+#if IS_ENABLED(CONFIG_EXYNOS_PM_QOS)
+ case TPMON_TARGET_MIF:
+ data->extra_data = (void *)&tpmon->qos_req_mif;
+ data->set_data = tpmon_set_exynos_pm_qos;
+ break;
+ case TPMON_TARGET_MIF_MAX:
+ data->extra_data = (void *)&tpmon->qos_req_mif_max;
+ data->set_data = tpmon_set_exynos_pm_qos;
+ break;
+ case TPMON_TARGET_INT_FREQ:
+ data->extra_data = (void *)&tpmon->qos_req_int;
+ data->set_data = tpmon_set_exynos_pm_qos;
+ break;
+ case TPMON_TARGET_INT_FREQ_MAX:
+ data->extra_data = (void *)&tpmon->qos_req_int_max;
+ data->set_data = tpmon_set_exynos_pm_qos;
+ break;
+#endif
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE)
+ case TPMON_TARGET_PCIE_LOW_POWER:
+ data->set_data = tpmon_set_pci_low_power;
+ break;
+ case TPMON_TARGET_IRQ_PCIE:
+ data->set_data = tpmon_set_irq_affinity_pcie;
+ break;
+#endif
+
+#if IS_ENABLED(CONFIG_MCU_IPC)
+ case TPMON_TARGET_IRQ_MBOX:
+ data->set_data = tpmon_set_irq_affinity_mbox;
+ break;
+#endif
+
+#if IS_ENABLED(CONFIG_EXYNOS_DIT)
+ case TPMON_TARGET_IRQ_DIT:
+ data->set_data = tpmon_set_irq_affinity_dit;
+ break;
+#endif
+
+#if IS_ENABLED(CONFIG_EXYNOS_BTS)
+ case TPMON_TARGET_BTS:
+ data->set_data = tpmon_set_bts;
+ break;
+#endif
+
+#if IS_ENABLED(CONFIG_CPU_FREQ)
+ case TPMON_TARGET_CPU_CL0:
+ case TPMON_TARGET_CPU_CL0_MAX:
+ case TPMON_TARGET_CPU_CL1:
+ case TPMON_TARGET_CPU_CL1_MAX:
+ case TPMON_TARGET_CPU_CL2:
+ case TPMON_TARGET_CPU_CL2_MAX:
+ ret = tpmon_set_cpufreq(data);
+ if (ret) {
+ mif_err("tpmon_set_cpufreq() error:%d\n", ret);
+ return ret;
+ }
+ break;
+#endif
+
+ default:
+ mif_err("%s target error:%d\n", data->name, data->target);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tpmon_parse_dt(struct device_node *np, struct cpif_tpmon *tpmon)
+{
+ struct device_node *tpmon_np = NULL;
+ struct device_node *child_np = NULL;
+ struct device_node *boost_np = NULL;
+ struct tpmon_data *data = NULL;
+ int ret = 0;
+ u32 count = 0;
+ unsigned long flags;
+
+ tpmon_np = of_get_child_by_name(np, "cpif_tpmon");
+ if (!tpmon_np) {
+ mif_err("tpmon_np is null\n");
+ return -ENODEV;
+ }
+
+ mif_dt_read_u32(tpmon_np, "trigger_msec_min", tpmon->trigger_msec_min);
+ mif_dt_read_u32(tpmon_np, "trigger_msec_max", tpmon->trigger_msec_max);
+ mif_info("trigger min:%dmsec max:%dmsec\n",
+ tpmon->trigger_msec_min, tpmon->trigger_msec_max);
+
+ mif_dt_read_u32(tpmon_np, "monitor_interval_msec",
+ tpmon->monitor_interval_msec);
+ mif_dt_read_u32(tpmon_np, "monitor_hold_msec",
+ tpmon->monitor_hold_msec);
+ mif_dt_read_u32(tpmon_np, "monitor_stop_mbps",
+ tpmon->monitor_stop_mbps);
+ mif_info("monitor interval:%dmsec hold:%dmsec stop:%dmbps\n",
+ tpmon->monitor_interval_msec, tpmon->monitor_hold_msec,
+ tpmon->monitor_stop_mbps);
+
+ mif_dt_read_u32(tpmon_np, "boost_hold_msec", tpmon->boost_hold_msec);
+ mif_info("boost hold:%dmsec\n", tpmon->boost_hold_msec);
+
+ for_each_child_of_node(tpmon_np, child_np) {
+ struct tpmon_data child_data = {};
+
+ mif_dt_read_string(child_np, "boost_name", child_data.name);
+ mif_dt_read_u32(child_np, "target", child_data.target);
+ mif_dt_read_u32(child_np, "extra_idx", child_data.extra_idx);
+ mif_dt_count_u32_elems(child_np, "level", child_data.num_level);
+ mif_dt_count_u32_array(child_np, "level",
+ child_data.level, child_data.num_level);
+
+ /* boost */
+ for_each_child_of_node(child_np, boost_np) {
+ if (count >= MAX_TPMON_DATA) {
+ mif_err("count is full:%d\n", count);
+ return -EINVAL;
+ }
+
+ data = &tpmon->data[count];
+ memcpy(data, &child_data, sizeof(child_data));
+ data->tpmon = tpmon;
+
+ /* check enabled */
+ mif_dt_read_u32(boost_np, "enable", data->enable);
+ if (!data->enable)
+ continue;
+
+ /* threshold */
+ mif_dt_count_u32_elems(boost_np, "boost_threshold",
+ data->num_threshold);
+ mif_dt_count_u32_array(boost_np, "boost_threshold",
+ data->threshold, data->num_threshold);
+ mif_dt_count_u32_array(boost_np, "unboost_threshold_mbps",
+ data->unboost_threshold_mbps, data->num_threshold);
+
+ /* target */
+ ret = tpmon_set_target(data);
+ if (ret) {
+ mif_err("tpmon_set_target() error:%d\n", ret);
+ continue;
+ }
+
+ /* measure */
+ mif_dt_read_u32(boost_np, "proto", data->proto);
+ mif_dt_read_u32(boost_np, "measure", data->measure);
+ spin_lock_irqsave(&tpmon->lock, flags);
+ switch (data->measure) {
+ case TPMON_MEASURE_TP:
+ data->get_data = tpmon_get_rx_speed_mbps;
+ list_add_tail(&data->tp_node, &tpmon->tp_node_list);
+ break;
+ case TPMON_MEASURE_NETDEV_Q:
+ case TPMON_MEASURE_PKTPROC_DL_Q:
+ case TPMON_MEASURE_DIT_SRC_Q:
+ data->get_data = tpmon_get_q_status;
+ list_add_tail(&data->q_status_node, &tpmon->q_status_list);
+ break;
+ default:
+ mif_err("%s measure error:%d %d\n",
+ data->name, count, data->measure);
+ spin_unlock_irqrestore(&tpmon->lock, flags);
+ return -EINVAL;
+ }
+ list_add_tail(&data->data_node, &tpmon->all_data_list);
+ spin_unlock_irqrestore(&tpmon->lock, flags);
+
+ mif_info("name:%s measure:%d target:%d extra_idx:%d level:%d/%d proto:%d\n",
+ data->name, data->measure, data->target, data->extra_idx,
+ data->num_threshold, data->num_level, data->proto);
+
+ count++;
+ }
+ }
+
+ return 0;
+}
+
+int tpmon_create(struct platform_device *pdev, struct link_device *ld)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct cpif_tpmon *tpmon = &_tpmon;
+ struct mem_link_device *mld = ld_to_mem_link_device(ld);
+ int ret = 0;
+#if IS_ENABLED(CONFIG_CPU_FREQ)
+ struct cpufreq_policy pol;
+#endif
+
+ if (!np) {
+ mif_err("np is null\n");
+ ret = -EINVAL;
+ goto create_error;
+ }
+ if (!ld) {
+ mif_err("ld is null\n");
+ ret = -EINVAL;
+ goto create_error;
+ }
+
+ tpmon->ld = ld;
+ tpmon->use_user_level = 0;
+ tpmon->debug_print = 0;
+ mld->tpmon = &_tpmon;
+
+ spin_lock_init(&tpmon->lock);
+ atomic_set(&tpmon->active, 0);
+
+ INIT_LIST_HEAD(&tpmon->all_data_list);
+ INIT_LIST_HEAD(&tpmon->tp_node_list);
+ INIT_LIST_HEAD(&tpmon->q_status_list);
+ INIT_LIST_HEAD(&tpmon->net_node_list);
+
+#if IS_ENABLED(CONFIG_CPU_FREQ)
+ if (cpufreq_get_policy(&pol, 0) != 0) {
+ mif_info("register cpufreq notifier\n");
+ tpmon->cpufreq_nb.notifier_call = tpmon_cpufreq_nb;
+ cpufreq_register_notifier(&tpmon->cpufreq_nb, CPUFREQ_POLICY_NOTIFIER);
+ }
+#endif
+
+#if IS_ENABLED(CONFIG_EXYNOS_PM_QOS)
+ exynos_pm_qos_add_request(&tpmon->qos_req_mif,
+ PM_QOS_BUS_THROUGHPUT, 0);
+ exynos_pm_qos_add_request(&tpmon->qos_req_mif_max,
+ PM_QOS_BUS_THROUGHPUT_MAX,
+ PM_QOS_BUS_THROUGHPUT_MAX_DEFAULT_VALUE);
+ exynos_pm_qos_add_request(&tpmon->qos_req_int,
+ PM_QOS_DEVICE_THROUGHPUT, 0);
+ exynos_pm_qos_add_request(&tpmon->qos_req_int_max,
+ PM_QOS_DEVICE_THROUGHPUT_MAX,
+ PM_QOS_DEVICE_THROUGHPUT_MAX_DEFAULT_VALUE);
+#endif
+
+#if IS_ENABLED(CONFIG_EXYNOS_BTS)
+ tpmon->bts_scen_index = bts_get_scenindex("cp_throughput");
+#endif
+
+ ret = tpmon_parse_dt(np, tpmon);
+ if (ret) {
+ mif_err("tpmon_parse_dt() error:%d\n", ret);
+ goto create_error;
+ }
+
+ tpmon->monitor_wq = alloc_workqueue("cpif_tpmon_monitor_wq",
+ __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
+ if (!tpmon->monitor_wq) {
+ mif_err("create_workqueue() monitor_wq error\n");
+ ret = -EINVAL;
+ goto create_error;
+ }
+ INIT_DELAYED_WORK(&tpmon->monitor_dwork, tpmon_monitor_work);
+
+ tpmon->boost_wq = alloc_workqueue("cpif_tpmon_boost_wq",
+ __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
+ if (!tpmon->boost_wq) {
+ mif_err("create_workqueue() boost_wq error\n");
+ ret = -EINVAL;
+ goto create_error;
+ }
+ INIT_DELAYED_WORK(&tpmon->boost_dwork, tpmon_boost_work);
+
+ tpmon->start = tpmon_start;
+ tpmon->stop = tpmon_stop;
+ tpmon->add_rx_bytes = tpmon_add_rx_bytes;
+ tpmon->check_active = tpmon_check_active;
+ tpmon->reset_data = tpmon_reset_data;
+
+ if (sysfs_create_group(&pdev->dev.kobj, &tpmon_group))
+ mif_err("failed to create cpif tpmon groups node\n");
+
+ return ret;
+
+create_error:
+ mif_err("Error:%d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(tpmon_create);
diff --git a/cpif_tp_monitor.h b/cpif_tp_monitor.h
new file mode 100644
index 0000000..a69d775
--- /dev/null
+++ b/cpif_tp_monitor.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019-2020, Samsung Electronics.
+ *
+ */
+
+#ifndef __CPIF_TP_MONITOR_H__
+#define __CPIF_TP_MONITOR_H__
+
+#include <linux/ktime.h>
+#include <linux/workqueue.h>
+#if IS_ENABLED(CONFIG_CPU_FREQ)
+#include <linux/cpufreq.h>
+#endif
+#if IS_ENABLED(CONFIG_EXYNOS_PM_QOS)
+#include <soc/google/exynos_pm_qos.h>
+#endif
+#if IS_ENABLED(CONFIG_ARM_FREQ_QOS_TRACER)
+#include <soc/google/freq-qos-tracer.h>
+#endif
+
+#define MAX_TPMON_DATA 16
+#define MAX_TPMON_THRESHOLD 10
+#define MAX_TPMON_LEVEL (MAX_TPMON_THRESHOLD+1)
+#define MAX_RPS_STRING 8
+#define MAX_IRQ_AFFINITY_DATA 5
+#define MAX_IRQ_AFFINITY_STRING 8
+#define MAX_RX_BYTES_COUNT 1000
+
+struct tpmon_data {
+ struct cpif_tpmon *tpmon;
+
+ struct list_head data_node;
+ struct list_head tp_node;
+ struct list_head q_status_node;
+
+ char *name;
+ u32 target;
+
+ u32 num_level;
+ u32 level[MAX_TPMON_LEVEL];
+ u32 curr_level_pos;
+ u32 prev_level_pos;
+ u32 user_level;
+
+ u32 enable;
+ u32 measure;
+ u32 proto;
+ u32 extra_idx;
+
+ u32 num_threshold;
+ u32 threshold[MAX_TPMON_THRESHOLD];
+ u32 curr_threshold_pos;
+ u32 prev_threshold_pos;
+
+ u32 unboost_threshold_mbps[MAX_TPMON_THRESHOLD];
+ ktime_t prev_unboost_time;
+
+ bool need_boost;
+
+ void *extra_data;
+
+ u32 (*get_data)(struct tpmon_data *data);
+ void (*set_data)(struct tpmon_data *data);
+};
+
+struct cpif_rx_data {
+ unsigned long rx_bytes;
+ unsigned long rx_mbps;
+
+ ktime_t prev_time;
+};
+
+struct cpif_tpmon {
+ struct link_device *ld;
+
+ atomic_t need_init;
+ atomic_t active;
+ spinlock_t lock;
+
+ u32 trigger_msec_min;
+ u32 trigger_msec_max;
+
+ u32 monitor_interval_msec;
+ u32 monitor_hold_msec;
+ u32 monitor_stop_mbps;
+ u32 current_speed;
+
+ u32 boost_hold_msec;
+
+ struct list_head all_data_list;
+ struct list_head tp_node_list;
+ struct list_head q_status_list;
+ struct list_head net_node_list;
+
+ ktime_t prev_monitor_time;
+ struct workqueue_struct *monitor_wq;
+ struct delayed_work monitor_dwork;
+
+ atomic_t boost_active;
+ struct workqueue_struct *boost_wq;
+ struct delayed_work boost_dwork;
+
+ struct cpif_rx_data rx_total;
+ struct cpif_rx_data rx_tcp;
+ struct cpif_rx_data rx_udp;
+ struct cpif_rx_data rx_others;
+
+ struct cpif_rx_data rx_total_stat;
+ struct cpif_rx_data rx_tcp_stat;
+ struct cpif_rx_data rx_udp_stat;
+ struct cpif_rx_data rx_others_stat;
+
+ u32 q_status_pktproc_dl;
+ u32 q_status_netdev_backlog;
+ u32 q_status_dit_src;
+ u32 legacy_packet_count;
+
+ u32 use_user_level;
+ u32 debug_print;
+
+ struct tpmon_data data[MAX_TPMON_DATA];
+
+#if IS_ENABLED(CONFIG_EXYNOS_PM_QOS)
+ struct exynos_pm_qos_request qos_req_mif;
+ struct exynos_pm_qos_request qos_req_mif_max;
+ struct exynos_pm_qos_request qos_req_int;
+ struct exynos_pm_qos_request qos_req_int_max;
+#endif
+
+#if IS_ENABLED(CONFIG_CPU_FREQ)
+ struct notifier_block cpufreq_nb;
+ struct freq_qos_request qos_req_cpu_cl0;
+ struct freq_qos_request qos_req_cpu_cl0_max;
+ struct freq_qos_request qos_req_cpu_cl1;
+ struct freq_qos_request qos_req_cpu_cl1_max;
+ struct freq_qos_request qos_req_cpu_cl2;
+ struct freq_qos_request qos_req_cpu_cl2_max;
+#endif
+
+#if IS_ENABLED(CONFIG_EXYNOS_BTS)
+ int bts_scen_index;
+#endif
+
+ /* Func */
+ int (*start)(void);
+ int (*stop)(void);
+ void (*add_rx_bytes)(struct sk_buff *skb);
+ int (*check_active)(void);
+ void (*reset_data)(char *name);
+};
+
+static inline u32 tpmon_get_curr_level(struct tpmon_data *data)
+{
+ if (!data->enable)
+ return 0;
+
+ return data->tpmon->use_user_level ?
+ data->user_level : data->level[data->curr_level_pos];
+}
+
+#if IS_ENABLED(CONFIG_CPIF_TP_MONITOR)
+extern int tpmon_create(struct platform_device *pdev, struct link_device *ld);
+extern int tpmon_start(void);
+extern int tpmon_stop(void);
+extern int tpmon_init(void);
+extern void tpmon_add_rx_bytes(struct sk_buff *skb);
+extern void tpmon_add_legacy_packet_count(u32 count);
+extern void tpmon_add_net_node(struct list_head *node);
+extern int tpmon_check_active(void);
+#else
+static inline int tpmon_create(struct platform_device *pdev, struct link_device *ld) { return 0; }
+static inline int tpmon_start(void) { return 0; }
+static inline int tpmon_stop(void) { return 0; }
+static inline int tpmon_init(void) { return 0; }
+static inline void tpmon_add_rx_bytes(unsigned long bytes) { return; }
+static inline void tpmon_add_legacy_packet_count(u32 count) { return; }
+static inline void tpmon_add_net_node(struct list_head *node) { return; }
+static inline int tpmon_check_active(void) { return 0; }
+#endif
+
+#endif /* __CPIF_TP_MONITOR_H__ */
diff --git a/cpif_version.h b/cpif_version.h
new file mode 100644
index 0000000..40cd02b
--- /dev/null
+++ b/cpif_version.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019, Samsung Electronics.
+ *
+ */
+
+#ifndef __CPIF_VERSION_H__
+#define __CPIF_VERSION_H__
+
+/* Should not exceed CPIF_VERSION_SIZE */
+static const char cpif_driver_version[] = "CPIF-20220408R1";
+#endif /* __CPIF_VERSION_H__ */
diff --git a/cpif_vmapper.c b/cpif_vmapper.c
new file mode 100644
index 0000000..786288a
--- /dev/null
+++ b/cpif_vmapper.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Samsung Electronics.
+ *
+ */
+
+#include "cpif_vmapper.h"
+#include <linux/dma-direction.h>
+#include <soc/samsung/exynos-cpif-iommu.h>
+#include "modem_v1.h"
+
+struct cpif_va_mapper *cpif_vmap_create(u64 va_start, u64 va_size, u64 instance_size)
+{
+ struct cpif_va_mapper *vmap;
+
+ vmap = kzalloc(sizeof(struct cpif_va_mapper), GFP_ATOMIC);
+ if (vmap == NULL)
+ return NULL;
+
+ vmap->va_start = va_start;
+ vmap->va_size = va_size;
+ vmap->va_end = va_start + va_size;
+ vmap->instance_size = instance_size;
+
+ if (va_size == instance_size) /* no need to use item list */
+ goto skip_item_list;
+ INIT_LIST_HEAD(&vmap->item_list);
+
+skip_item_list:
+ cpif_sysmmu_set_use_iocc();
+ cpif_sysmmu_enable();
+
+ return vmap;
+}
+EXPORT_SYMBOL(cpif_vmap_create);
+
+void cpif_vmap_free(struct cpif_va_mapper *vmap)
+{
+ struct cpif_vmap_item *temp, *temp2;
+ int err;
+
+ if (unlikely(!vmap)) {
+ mif_err("no vmap to free\n");
+ return;
+ }
+
+ if (vmap->va_size == vmap->instance_size && vmap->out) {
+ /* when va and pa is mapped at once */
+ err = cpif_iommu_unmap(vmap->va_start, vmap->va_size);
+ if (unlikely(err == 0))
+ mif_err("failed to perform iommu unmapping\n");
+ kfree(vmap->out);
+ vmap->out = NULL;
+ kfree(vmap);
+ vmap = NULL;
+ return;
+ }
+
+ if (vmap->in) {
+ err = cpif_iommu_unmap(vmap->in->vaddr_base, vmap->in->item_size);
+ if (err == 0)
+ mif_err("failed to unmap\n");
+ kfree(vmap->in);
+ vmap->in = NULL;
+ }
+
+ if (vmap->out) {
+ err = cpif_iommu_unmap(vmap->out->vaddr_base, vmap->out->item_size);
+ if (err == 0)
+ mif_err("failed to unmap\n");
+ kfree(vmap->out);
+ vmap->out = NULL;
+ }
+
+ list_for_each_entry_safe(temp, temp2, &vmap->item_list, item) {
+ err = cpif_iommu_unmap(temp->vaddr_base, temp->item_size);
+ if (err == 0)
+ mif_err("failed to unmap\n");
+ list_del(&temp->item);
+ kfree(temp);
+ }
+
+ kfree(vmap);
+ vmap = NULL;
+}
+EXPORT_SYMBOL(cpif_vmap_free);
+
+u64 cpif_vmap_map_area(struct cpif_va_mapper *vmap, u64 item_paddr, u64 item_size,
+ u64 instance_paddr)
+{
+ int err;
+ struct cpif_vmap_item *temp;
+
+ if (vmap->va_size == vmap->instance_size) { /* when va and pa is mapped at once */
+ if (vmap->out) {
+ mif_err("whole range mapping is done already\n");
+ return 0;
+ }
+
+ err = cpif_iommu_map(vmap->va_start, instance_paddr, vmap->va_size,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(err)) {
+ mif_err("failed to perform iommu mapping\n");
+ return 0;
+ }
+ temp = kzalloc(sizeof(struct cpif_vmap_item), GFP_ATOMIC);
+ if (!temp)
+ return 0;
+ temp->vaddr_base = vmap->va_start;
+ temp->paddr_base = instance_paddr;
+ atomic_set(&temp->ref, 1);
+ vmap->out = temp; /* need to be positioned at out for easy unmap */
+
+ return vmap->out->vaddr_base;
+ }
+
+ if (!vmap->in) {/* first time to map */
+ err = cpif_iommu_map(vmap->va_start, item_paddr, item_size,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(err)) {
+ mif_err_limited("failed to perform iommu mapping\n");
+ return 0;
+ }
+ temp = kzalloc(sizeof(struct cpif_vmap_item), GFP_ATOMIC);
+ if (!temp)
+ return 0;
+ temp->vaddr_base = vmap->va_start;
+ temp->paddr_base = item_paddr;
+ temp->item_size = item_size;
+ atomic_set(&temp->ref, 1);
+ vmap->in = temp;
+ } else if (vmap->in->paddr_base != item_paddr) {
+ /* normal case
+ * if in's vmap item is fully mapped, enqueue that item to
+ * item_list and create new item
+ */
+ u64 next_vaddr_base = vmap->in->vaddr_base + vmap->in->item_size;
+
+ if ((next_vaddr_base + item_size) >= vmap->va_end) /* back to va start */
+ next_vaddr_base = vmap->va_start;
+
+ err = cpif_iommu_map(next_vaddr_base, item_paddr, item_size,
+ DMA_BIDIRECTIONAL);
+
+ if (unlikely(err)) {
+ mif_err_limited("failed to perform iommu mapping\n");
+ return 0;
+ }
+ temp = kzalloc(sizeof(struct cpif_vmap_item), GFP_ATOMIC);
+ if (!temp)
+ return 0;
+ temp->vaddr_base = next_vaddr_base;
+ temp->paddr_base = item_paddr;
+ temp->item_size = item_size;
+ atomic_set(&temp->ref, 1);
+
+ list_add_tail(&vmap->in->item, &vmap->item_list);
+ vmap->in = temp;
+ } else /* item "in" still has room to use, no need to iommu this time */
+ atomic_inc(&vmap->in->ref);
+
+ return vmap->in->vaddr_base + (instance_paddr - item_paddr);
+}
+EXPORT_SYMBOL(cpif_vmap_map_area);
+
+u64 cpif_vmap_unmap_area(struct cpif_va_mapper *vmap, u64 vaddr)
+{
+ int err = 0;
+ u64 ret = 0;
+ struct cpif_vmap_item *temp;
+ struct cpif_vmap_item *target;
+
+ if (vmap->va_size == vmap->instance_size) { /* when va and pa is mapped at once */
+
+ err = cpif_iommu_unmap(vmap->va_start, vmap->va_size);
+ if (unlikely(err == 0)) {
+ mif_err_limited("failed to perform iommu unmapping\n");
+ return 0;
+ }
+ kfree(vmap->out);
+ vmap->out = NULL;
+
+ return vmap->va_start;
+ }
+
+ if (unlikely(!vmap->out)) { /* first time to unmap */
+ temp = list_first_entry_or_null(&vmap->item_list,
+ struct cpif_vmap_item, item);
+ if (unlikely(!temp)) {
+ mif_err_limited("failed to get item from list\n");
+ return 0;
+ }
+ vmap->out = temp;
+ list_del(&temp->item);
+ }
+
+ target = vmap->out;
+
+ if (unlikely(vaddr < target->vaddr_base || vaddr > target->vaddr_base +
+ target->item_size)) {
+ mif_err("invalid vaddr 0x%llX vbase: 0x%llX vend: 0x%llX\n",
+ vaddr, target->vaddr_base,
+ target->vaddr_base + target->item_size);
+ return 0;
+ }
+
+ atomic_dec(&target->ref);
+
+ ret = target->paddr_base + (vaddr - target->vaddr_base);
+
+ /* unmap this item when ref count goes to 0 */
+ if (atomic_read(&target->ref) == 0) {
+ err = cpif_iommu_unmap(target->vaddr_base, target->item_size);
+ if (err == 0) {
+ mif_err_limited("failed to unmap\n");
+ return 0;
+ }
+ kfree(vmap->out);
+ /* update vmap->out to the next item to be unmapped */
+ temp = list_first_entry_or_null(&vmap->item_list,
+ struct cpif_vmap_item, item);
+ if (unlikely(!temp)) {
+ mif_err_limited("item list is empty\n");
+ if (vmap->in) {
+ /* drain out rest, next map will start from beginning */
+ mif_info("drain out vmap->in\n");
+ vmap->out = vmap->in;
+ vmap->in = NULL;
+ } else /* last of last, initialize vmap->out */
+ vmap->out = NULL;
+ return ret;
+ }
+ vmap->out = temp;
+ list_del(&temp->item);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(cpif_vmap_unmap_area);
diff --git a/cpif_vmapper.h b/cpif_vmapper.h
new file mode 100644
index 0000000..cbe0961
--- /dev/null
+++ b/cpif_vmapper.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Samsung Electronics.
+ *
+ */
+
+#ifndef __CPIF_VMAPPER_H__
+#define __CPIF_VMAPPER_H__
+
+#include <linux/types.h>
+#include <linux/slab.h>
+
+struct cpif_vmap_item {
+ u64 vaddr_base; /* cp address */
+ u64 paddr_base; /* ap physical address */
+ u64 item_size;
+ struct list_head item;
+ atomic_t ref; /* if zero, this item will be unmapped */
+};
+
+/* vmapper contains vaddr that is linear, but paddr sparsed with certain size
+ * vmapper contains several items, and each item contains several
+ * instances have fixed size, whereas each item size may defer due to
+ * page allocation
+ */
+struct cpif_va_mapper {
+ u64 va_start; /* va = cp address */
+ u64 va_size;
+ u64 va_end;
+ u64 instance_size; /* size of instance in the item */
+
+ /* vmap table guaranteed to be mapped/unmapped sequentially */
+ struct list_head item_list;
+ struct cpif_vmap_item *out; /* item to be unmapped, after list out */
+ struct cpif_vmap_item *in; /* item mapped recently, before list in */
+};
+
+struct cpif_va_mapper *cpif_vmap_create(u64 va_start, u64 va_size, u64 instance_size);
+void cpif_vmap_free(struct cpif_va_mapper *vmap);
+u64 cpif_vmap_map_area(struct cpif_va_mapper *vmap, u64 item_paddr, u64 item_size,
+ u64 instance_paddr);
+u64 cpif_vmap_unmap_area(struct cpif_va_mapper *vmap, u64 vaddr);
+#endif /* __CPIF_VMAPPER_H__ */
diff --git a/direct_dm.c b/direct_dm.c
new file mode 100644
index 0000000..4695371
--- /dev/null
+++ b/direct_dm.c
@@ -0,0 +1,1139 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Samsung Electronics.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/usb/f_dm.h>
+
+#include "direct_dm.h"
+#include "modem_utils.h"
+
+static struct direct_dm_ctrl *_dc;
+
+static void direct_dm_rx_func(unsigned long arg);
+
+/* RX timer */
+static inline void direct_dm_start_rx_timer(struct direct_dm_ctrl *dc,
+ struct hrtimer *timer)
+{
+ unsigned long flags;
+
+ if (!dc) {
+ mif_err_limited("dc is null\n");
+ return;
+ }
+
+ if (!dc->use_rx_timer) {
+ mif_err_limited("use_rx_timer is not set\n");
+ return;
+ }
+
+ spin_lock_irqsave(&dc->rx_timer_lock, flags);
+ if (!hrtimer_is_queued(timer)) {
+ ktime_t ktime = ktime_set(0, dc->rx_timer_period_msec * NSEC_PER_MSEC);
+
+ dc->stat.rx_timer_req++;
+ hrtimer_start(timer, ktime, HRTIMER_MODE_REL);
+ }
+ spin_unlock_irqrestore(&dc->rx_timer_lock, flags);
+}
+
+static enum hrtimer_restart direct_dm_rx_timer(struct hrtimer *timer)
+{
+ mif_info("run rx func by timer\n");
+
+ _dc->stat.rx_timer_expire++;
+
+ if (_dc->use_rx_task)
+ tasklet_hi_schedule(&_dc->rx_task);
+ else
+ direct_dm_rx_func((unsigned long)_dc);
+
+ return HRTIMER_NORESTART;
+}
+
+/* RX func */
+static int direct_dm_send_to_upper_layer(struct direct_dm_ctrl *dc,
+ struct direct_dm_desc *curr_desc, void *addr)
+{
+ struct sk_buff *skb = NULL;
+ struct mem_link_device *mld;
+ int ch_id = 0;
+
+ if (!dc || !curr_desc || !addr) {
+ mif_err_limited("null addr\n");
+ return -ENOMEM;
+ }
+
+ dc->stat.upper_layer_req_cnt++;
+ skb = dev_alloc_skb(curr_desc->length);
+ if (unlikely(!skb)) {
+ mif_err_limited("mem_alloc_skb() error pos:%d\n",
+ dc->curr_desc_pos);
+ dc->stat.err_upper_layer_req++;
+ return -ENOMEM;
+ }
+ skb_put(skb, curr_desc->length);
+ skb_copy_to_linear_data(skb, addr, curr_desc->length);
+
+ switch (dc->ld->protocol) {
+ case PROTOCOL_SIPC:
+ ch_id = SIPC_CH_ID_CPLOG1;
+ break;
+ case PROTOCOL_SIT:
+ ch_id = EXYNOS_CH_ID_CPLOG;
+ break;
+ default:
+ mif_err_limited("protocol error:%d\n", dc->ld->protocol);
+ return -EINVAL;
+ }
+
+ skbpriv(skb)->lnk_hdr = 0;
+ skbpriv(skb)->sipc_ch = ch_id;
+ skbpriv(skb)->iod = link_get_iod_with_channel(dc->ld, ch_id);
+ skbpriv(skb)->ld = dc->ld;
+ skbpriv(skb)->napi = NULL;
+
+ mld = to_mem_link_device(dc->ld);
+ mld->pass_skb_to_demux(mld, skb);
+
+ dc->desc_rgn[dc->curr_desc_pos].status &= ~BIT(DDM_DESC_S_DONE);
+
+ dc->curr_desc_pos = circ_new_ptr(dc->num_desc,
+ dc->curr_desc_pos, 1);
+ dc->curr_done_pos = circ_new_ptr(dc->num_desc,
+ dc->curr_done_pos, 1);
+
+ return 0;
+}
+
+static int direct_dm_send_to_usb(struct direct_dm_ctrl *dc,
+ struct direct_dm_desc *curr_desc, void *addr)
+{
+ int ret = 0;
+
+ if (!dc || !curr_desc || !addr) {
+ mif_err_limited("null addr\n");
+ return -ENOMEM;
+ }
+
+ dc->stat.usb_req_cnt++;
+ ret = usb_dm_request(addr, curr_desc->length);
+ if (ret) {
+ mif_info_limited("usb_dm_request() ret:%d pos:%d\n",
+ ret, dc->curr_desc_pos);
+
+ dc->usb_req_failed = true;
+ dc->stat.err_usb_req++;
+ if (dc->use_rx_timer) {
+ if (unlikely(dc->enable_debug))
+ mif_info("start timer\n");
+
+ direct_dm_start_rx_timer(dc, &dc->rx_timer);
+ }
+
+ return ret;
+ }
+ dc->usb_req_failed = false;
+
+ dc->curr_desc_pos = circ_new_ptr(dc->num_desc,
+ dc->curr_desc_pos, 1);
+
+ return 0;
+}
+
+static void direct_dm_rx_func(unsigned long arg)
+{
+ struct direct_dm_ctrl *dc = (struct direct_dm_ctrl *)arg;
+ unsigned long paddr;
+ void *addr;
+ int ret;
+ int i;
+ bool upper_layer_req = false;
+ int rcvd = 0;
+ unsigned long flags;
+
+ if (!dc) {
+ mif_err_limited("dc is null\n");
+ return;
+ }
+
+ spin_lock_irqsave(&dc->rx_lock, flags);
+
+ upper_layer_req = dc->info_rgn->silent_log;
+ if (!upper_layer_req && !dc->usb_active) {
+ mif_info_limited("usb is not activated\n");
+ spin_unlock_irqrestore(&dc->rx_lock, flags);
+ return;
+ }
+
+ for (i = 0; i < dc->num_desc; i++) {
+ struct direct_dm_desc curr_desc = dc->desc_rgn[dc->curr_desc_pos];
+
+ if (!(curr_desc.status & BIT(DDM_DESC_S_DONE))) {
+ if (unlikely(dc->enable_debug))
+ mif_info("DDM_DESC_S_DONE is not set %d 0x%llx\n",
+ dc->curr_desc_pos, curr_desc.cp_buff_paddr);
+
+ break;
+ }
+
+ if (curr_desc.status & BIT(DDM_DESC_S_TOUT)) {
+ mif_err_limited("DDM_DESC_S_TOUT is set %d 0x%llx\n",
+ dc->curr_desc_pos, curr_desc.cp_buff_paddr);
+ dc->stat.err_desc_tout++;
+ }
+
+ /* TODO: support compressed log of DM log mover H/W */
+ if (curr_desc.status & BIT(DDM_DESC_S_COMPRESSED))
+ mif_err_limited("DDM_DESC_S_COMPRESSED is set %d 0x%llx\n",
+ dc->curr_desc_pos, curr_desc.cp_buff_paddr);
+
+ if (!curr_desc.length || (curr_desc.length > dc->max_packet_size)) {
+ mif_err_limited("length error:%d\n", curr_desc.length);
+ dc->stat.err_length++;
+ break;
+ }
+
+ paddr = curr_desc.cp_buff_paddr -
+ dc->buff_rgn_offset - dc->cp_ddm_pbase + dc->buff_pbase;
+ addr = phys_to_virt(paddr);
+
+ if (dc->buff_rgn_cached && !dc->hw_iocc)
+ dma_sync_single_for_cpu(dc->dev, paddr,
+ dc->max_packet_size, DMA_FROM_DEVICE);
+
+ if (unlikely(dc->enable_debug))
+ mif_info("pos:%d len:%d a:%pK/0x%lx/0x%llx upper:%d done:%d\n",
+ dc->curr_desc_pos, curr_desc.length,
+ addr, paddr, curr_desc.cp_buff_paddr,
+ upper_layer_req, dc->desc_rgn[dc->curr_desc_pos].status);
+
+ if (unlikely(upper_layer_req)) {
+ ret = direct_dm_send_to_upper_layer(dc, &curr_desc, addr);
+ if (ret)
+ break;
+
+ rcvd++;
+ } else {
+ ret = direct_dm_send_to_usb(dc, &curr_desc, addr);
+ if (ret)
+ break;
+
+ rcvd++;
+
+ if (dc->curr_desc_pos == dc->curr_done_pos) {
+ if (unlikely(dc->enable_debug))
+ mif_info("prev desc is enqueued:%d\n",
+ dc->curr_done_pos);
+ break;
+ }
+ }
+ }
+
+ if (unlikely(dc->enable_debug))
+ mif_info("rcvd:%d\n", rcvd);
+
+ spin_unlock_irqrestore(&dc->rx_lock, flags);
+}
+
+static void direct_dm_run_rx_func(struct direct_dm_ctrl *dc)
+{
+ if (!dc) {
+ mif_err_limited("dc is null\n");
+ return;
+ }
+
+ if (dc->use_rx_timer && hrtimer_active(&dc->rx_timer))
+ hrtimer_cancel(&dc->rx_timer);
+
+ if (dc->use_rx_task)
+ tasklet_hi_schedule(&dc->rx_task);
+ else
+ direct_dm_rx_func((unsigned long)dc);
+}
+
+/* IRQ handler */
+static irqreturn_t direct_dm_irq_handler(int irq, void *arg)
+{
+ struct direct_dm_ctrl *dc = (struct direct_dm_ctrl *)arg;
+
+ if (!dc) {
+ mif_err_limited("dc is null\n");
+ return IRQ_HANDLED;
+ }
+
+ direct_dm_run_rx_func(dc);
+
+ return IRQ_HANDLED;
+}
+
+/* Callback from USB driver */
+static void direct_dm_usb_active_noti(void *arg)
+{
+ struct direct_dm_ctrl *dc = (struct direct_dm_ctrl *)arg;
+ unsigned long flags;
+
+ if (!dc) {
+ mif_err_limited("dc is null\n");
+ return;
+ }
+
+ spin_lock_irqsave(&dc->rx_lock, flags);
+
+ mif_info("usb is activated\n");
+ dc->usb_active = true;
+
+ spin_unlock_irqrestore(&dc->rx_lock, flags);
+
+ if (dc->usb_req_failed) {
+ mif_info("run rx func\n");
+ direct_dm_run_rx_func(dc);
+ }
+}
+
+static void direct_dm_usb_disable_noti(void *arg)
+{
+ struct direct_dm_ctrl *dc = (struct direct_dm_ctrl *)arg;
+ unsigned long flags;
+
+ if (!dc) {
+ mif_err_limited("dc is null\n");
+ return;
+ }
+
+ spin_lock_irqsave(&dc->rx_lock, flags);
+
+ mif_info("usb is deactivated\n");
+ dc->usb_active = false;
+ dc->usb_req_failed = true;
+
+ spin_unlock_irqrestore(&dc->rx_lock, flags);
+
+ if (dc->use_rx_timer && hrtimer_active(&dc->rx_timer)) {
+ mif_info("cancel rx timer\n");
+ hrtimer_cancel(&dc->rx_timer);
+ }
+}
+
+static void direct_dm_usb_completion_noti(void *addr, int length, void *arg)
+{
+ struct direct_dm_ctrl *dc = (struct direct_dm_ctrl *)arg;
+ unsigned long paddr;
+ u32 pos;
+ unsigned long flags;
+
+ if (!dc) {
+ mif_err_limited("dc is null\n");
+ return;
+ }
+
+ dc->stat.usb_complete_cnt++;
+
+ paddr = virt_to_phys(addr);
+ if ((paddr < dc->buff_pbase) ||
+ (paddr >= (dc->buff_pbase + dc->buff_rgn_size))) {
+ mif_err("addr error:%pK 0x%lx 0x%lx 0x%x\n",
+ addr, paddr, dc->buff_pbase, dc->buff_rgn_offset);
+ dc->stat.err_usb_complete++;
+ return;
+ }
+
+ if (dc->buff_rgn_cached && !dc->hw_iocc)
+ dma_sync_single_for_device(dc->dev, paddr,
+ dc->max_packet_size, DMA_FROM_DEVICE);
+
+ spin_lock_irqsave(&dc->rx_lock, flags);
+ pos = (paddr - dc->buff_pbase) / dc->max_packet_size;
+ dc->desc_rgn[pos].status &= ~BIT(DDM_DESC_S_DONE);
+
+ if (dc->curr_done_pos != pos)
+ mif_err("pos error! pos:%d done:%d len:%d a:%pK/0x%lx\n",
+ pos, dc->curr_done_pos, length, addr, paddr);
+
+ if ((length <= 0) || (length > dc->max_packet_size)) {
+ mif_err_limited("length error:%d\n", length);
+ dc->usb_req_failed = true;
+ }
+
+ if (unlikely(dc->enable_debug))
+ mif_info("pos:%d done:%d len:%d a:%pK/0x%lx\n",
+ pos, dc->curr_done_pos, length, addr, paddr);
+
+ dc->curr_done_pos = circ_new_ptr(dc->num_desc,
+ dc->curr_done_pos, 1);
+
+ spin_unlock_irqrestore(&dc->rx_lock, flags);
+}
+
+/* sysfs */
+static ssize_t ctrl_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct direct_dm_ctrl *dc = _dc;
+ ssize_t count = 0;
+
+ if (!dc) {
+ mif_err("dc is null\n");
+ return count;
+ }
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "version:%d shm_rgn_index:%d\n",
+ dc->version, dc->shm_rgn_index);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "hw_iocc:%d info_desc_rgn_cached:%d buff_rgn_cached:%d\n",
+ dc->hw_iocc, dc->info_desc_rgn_cached, dc->buff_rgn_cached);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "info_rgn_offset:0x%08x info_rgn_size:0x%08x\n",
+ dc->info_rgn_offset, dc->info_rgn_size);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "desc_rgn_offset:0x%08x desc_rgn_size:0x%08x num_desc:%d\n",
+ dc->desc_rgn_offset, dc->desc_rgn_size, dc->num_desc);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "buff_rgn_offset:0x%08x buff_rgn_size:0x%08x\n",
+ dc->buff_rgn_offset, dc->buff_rgn_size);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "max_packet_size:%d usb_req_num:%d irq_index:%d\n",
+ dc->max_packet_size, dc->usb_req_num, dc->irq_index);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "cp_ddm_pbase:0x%08x\n", dc->cp_ddm_pbase);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "curr_desc_pos:%d\n", dc->curr_desc_pos);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "curr_done_pos:%d\n", dc->curr_done_pos);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "use_rx_task:%d\n", dc->use_rx_task);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "use_rx_timer:%d\n", dc->use_rx_timer);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "rx_timer_period_msec:%d\n", dc->rx_timer_period_msec);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "enable_debug:%d\n", dc->enable_debug);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "usb_req_failed:%d\n", dc->usb_req_failed);
+
+ return count;
+}
+
+static ssize_t stat_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct direct_dm_ctrl *dc = _dc;
+ ssize_t count = 0;
+
+ if (!dc) {
+ mif_err("dc is null\n");
+ return count;
+ }
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "err_desc_tout:%lld\n", dc->stat.err_desc_tout);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "err_length:%lld\n", dc->stat.err_length);
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "\n");
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "usb_req_cnt:%lld\n", dc->stat.usb_req_cnt);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "err_usb_req:%lld\n", dc->stat.err_usb_req);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "usb_complete_cnt:%lld\n", dc->stat.usb_complete_cnt);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "err_usb_complete:%lld\n", dc->stat.err_usb_complete);
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "\n");
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "upper_layer_req_cnt:%lld\n", dc->stat.upper_layer_req_cnt);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "err_upper_layer_req:%lld\n", dc->stat.err_upper_layer_req);
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "\n");
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "rx_timer_req:%lld\n", dc->stat.rx_timer_req);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "rx_timer_expire:%lld\n", dc->stat.rx_timer_expire);
+
+ return count;
+}
+
+static ssize_t info_rgn_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct direct_dm_ctrl *dc = _dc;
+ ssize_t count = 0;
+
+ if (!dc) {
+ mif_err("dc is null\n");
+ return count;
+ }
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "version:%d max_packet_size:%d\n",
+ dc->info_rgn->version, dc->info_rgn->max_packet_size);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "silent_log:%d\n",
+ dc->info_rgn->silent_log);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "cp_desc_pbase:0x%llx cp_buff_pbase:0x%llx\n",
+ dc->info_rgn->cp_desc_pbase, dc->info_rgn->cp_buff_pbase);
+
+ return count;
+}
+
+static ssize_t desc_rgn_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct direct_dm_ctrl *dc = _dc;
+ ssize_t count = 0;
+ int i;
+
+ if (!dc) {
+ mif_err("dc is null\n");
+ return count;
+ }
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "curr_desc_pos:%d\n", dc->curr_desc_pos);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "curr_done_pos:%d\n", dc->curr_done_pos);
+
+ for (i = 0; i < dc->num_desc; i++) {
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "%d 0x%llx 0x%x 0x%x %d\n",
+ i, dc->desc_rgn[i].cp_buff_paddr, dc->desc_rgn[i].control,
+ dc->desc_rgn[i].status, dc->desc_rgn[i].length);
+ }
+
+ return count;
+}
+
+static ssize_t enable_debug_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct direct_dm_ctrl *dc = _dc;
+ int val;
+ int ret;
+
+ if (!dc) {
+ mif_err("dc is null\n");
+ return count;
+ }
+
+ ret = kstrtoint(buf, 0, &val);
+ if (ret) {
+ mif_err("kstrtoint() error:%d\n", ret);
+ return ret;
+ }
+
+ val ? (dc->enable_debug = true) : (dc->enable_debug = false);
+ mif_info("enable_debug:%d\n", dc->enable_debug);
+
+ return count;
+}
+
+static ssize_t enable_debug_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct direct_dm_ctrl *dc = _dc;
+ ssize_t count = 0;
+
+ if (!dc) {
+ mif_err("dc is null\n");
+ return count;
+ }
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "enable_debug:%d\n",
+ dc->enable_debug);
+
+ return count;
+}
+
+static ssize_t use_rx_task_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct direct_dm_ctrl *dc = _dc;
+ int val;
+ int ret;
+
+ if (!dc) {
+ mif_err("dc is null\n");
+ return count;
+ }
+
+ ret = kstrtoint(buf, 0, &val);
+ if (ret) {
+ mif_err("kstrtoint() error:%d\n", ret);
+ return ret;
+ }
+
+ val ? (dc->use_rx_task = true) : (dc->use_rx_task = false);
+ mif_info("use_rx_task:%d\n", dc->use_rx_task);
+
+ return count;
+}
+
+static ssize_t use_rx_task_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct direct_dm_ctrl *dc = _dc;
+ ssize_t count = 0;
+
+ if (!dc) {
+ mif_err("dc is null\n");
+ return count;
+ }
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "use_rx_task:%d\n",
+ dc->use_rx_task);
+
+ return count;
+}
+
+static ssize_t use_rx_timer_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct direct_dm_ctrl *dc = _dc;
+ int val;
+ int ret;
+
+ if (!dc) {
+ mif_err("dc is null\n");
+ return count;
+ }
+
+ ret = kstrtoint(buf, 0, &val);
+ if (ret) {
+ mif_err("kstrtoint() error:%d\n", ret);
+ return ret;
+ }
+
+ val ? (dc->use_rx_timer = true) : (dc->use_rx_timer = false);
+ mif_info("use_rx_timer:%d\n", dc->use_rx_timer);
+
+ return count;
+}
+
+static ssize_t use_rx_timer_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct direct_dm_ctrl *dc = _dc;
+ ssize_t count = 0;
+
+ if (!dc) {
+ mif_err("dc is null\n");
+ return count;
+ }
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "use_rx_timer:%d\n",
+ dc->use_rx_timer);
+
+ return count;
+}
+
+static ssize_t rx_timer_period_msec_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct direct_dm_ctrl *dc = _dc;
+ int val;
+ int ret;
+
+ if (!dc) {
+ mif_err("dc is null\n");
+ return count;
+ }
+
+ if (!dc->use_rx_timer) {
+ mif_err("use_rx_timer is not set\n");
+ return count;
+ }
+
+ ret = kstrtoint(buf, 0, &val);
+ if (ret) {
+ mif_err("kstrtoint() error:%d\n", ret);
+ return ret;
+ }
+
+ dc->rx_timer_period_msec = val;
+ mif_info("rx_timer_period_msec:%d\n", dc->rx_timer_period_msec);
+
+ return count;
+}
+
+static ssize_t rx_timer_period_msec_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct direct_dm_ctrl *dc = _dc;
+ ssize_t count = 0;
+
+ if (!dc) {
+ mif_err("dc is null\n");
+ return count;
+ }
+
+ if (!dc->use_rx_timer) {
+ mif_err("use_rx_timer is not set\n");
+ return count;
+ }
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "rx_timer_period_msec:%d\n",
+ dc->rx_timer_period_msec);
+
+ return count;
+}
+
+static u32 _test_dm_desc_pos;
+static ssize_t test_dm_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct direct_dm_ctrl *dc = _dc;
+ void *addr;
+ int val;
+ int ret;
+ int i;
+
+ if (!dc) {
+ mif_err("dc is null\n");
+ return count;
+ }
+
+ ret = kstrtoint(buf, 0, &val);
+ if (ret) {
+ mif_err("kstrtoint() error:%d\n", ret);
+ return ret;
+ }
+
+ if (!val || (val > dc->num_desc)) {
+ mif_err("val error:%d\n", val);
+ return count;
+ }
+
+ for (i = 0; i < val; i++) {
+ addr = dc->desc_rgn[_test_dm_desc_pos].cp_buff_paddr -
+ dc->buff_rgn_offset - dc->cp_ddm_pbase + dc->buff_vbase;
+
+ mif_info("pos:%d a:%pK/0x%llx\n",
+ _test_dm_desc_pos, addr,
+ dc->desc_rgn[_test_dm_desc_pos].cp_buff_paddr);
+
+ if (dc->desc_rgn[_test_dm_desc_pos].status & BIT(DDM_DESC_S_DONE)) {
+ mif_err("DDM_DESC_S_DONE is already set. pos:%d\n",
+ _test_dm_desc_pos);
+ break;
+ }
+
+ memset(addr, _test_dm_desc_pos, dc->max_packet_size);
+ dc->desc_rgn[_test_dm_desc_pos].length = dc->max_packet_size;
+ dc->desc_rgn[_test_dm_desc_pos].status |= BIT(DDM_DESC_S_DONE);
+
+ if (dc->desc_rgn[_test_dm_desc_pos].control & BIT(DDM_DESC_C_END))
+ _test_dm_desc_pos = 0;
+ else
+ _test_dm_desc_pos++;
+ }
+
+ direct_dm_irq_handler(dc->irq_index, dc);
+
+ return count;
+}
+
+static ssize_t test_silent_log_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct direct_dm_ctrl *dc = _dc;
+ int val;
+ int ret;
+
+ if (!dc) {
+ mif_err("dc is null\n");
+ return count;
+ }
+
+ ret = kstrtoint(buf, 0, &val);
+ if (ret) {
+ mif_err("kstrtoint() error:%d\n", ret);
+ return ret;
+ }
+
+ val ? (dc->info_rgn->silent_log = 1) :
+ (dc->info_rgn->silent_log = 0);
+ mif_info("silent_log:%d\n", dc->info_rgn->silent_log);
+
+ return count;
+}
+
+static DEVICE_ATTR_RO(ctrl_status);
+static DEVICE_ATTR_RO(stat);
+static DEVICE_ATTR_RO(info_rgn);
+static DEVICE_ATTR_RO(desc_rgn);
+static DEVICE_ATTR_RW(enable_debug);
+static DEVICE_ATTR_RW(use_rx_task);
+static DEVICE_ATTR_RW(use_rx_timer);
+static DEVICE_ATTR_RW(rx_timer_period_msec);
+static DEVICE_ATTR_WO(test_dm);
+static DEVICE_ATTR_WO(test_silent_log);
+
+static struct attribute *direct_dm_attrs[] = {
+ &dev_attr_ctrl_status.attr,
+ &dev_attr_stat.attr,
+ &dev_attr_info_rgn.attr,
+ &dev_attr_desc_rgn.attr,
+ &dev_attr_enable_debug.attr,
+ &dev_attr_use_rx_task.attr,
+ &dev_attr_use_rx_timer.attr,
+ &dev_attr_rx_timer_period_msec.attr,
+ &dev_attr_test_dm.attr,
+ &dev_attr_test_silent_log.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(direct_dm);
+
+/* Initialize */
+int direct_dm_init(struct link_device *ld)
+{
+ struct direct_dm_ctrl *dc = _dc;
+ int i;
+
+ if (!dc) {
+ mif_err("dc is null\n");
+ return -EPERM;
+ }
+
+ dc->ld = ld;
+
+ dc->info_rgn->version = dc->version;
+ dc->info_rgn->max_packet_size = dc->max_packet_size;
+ dc->info_rgn->cp_desc_pbase = dc->cp_ddm_pbase + dc->desc_rgn_offset;
+ dc->info_rgn->cp_buff_pbase = dc->cp_ddm_pbase + dc->buff_rgn_offset;
+ mif_info("version:%d max_packet_size:%d\n",
+ dc->info_rgn->version, dc->info_rgn->max_packet_size);
+ mif_info("cp_desc_pbase:0x%llx cp_buff_pbase:0x%llx\n",
+ dc->info_rgn->cp_desc_pbase, dc->info_rgn->cp_buff_pbase);
+
+ memset(dc->desc_vbase, 0, dc->desc_rgn_size);
+ for (i = 0; i < dc->num_desc; i++) {
+ dc->desc_rgn[i].cp_buff_paddr = dc->cp_ddm_pbase +
+ dc->buff_rgn_offset + (dc->max_packet_size * i);
+
+ dc->desc_rgn[i].control |= BIT(DDM_DESC_C_INT);
+
+ if (i == (dc->num_desc - 1))
+ dc->desc_rgn[i].control |= BIT(DDM_DESC_C_END);
+ }
+
+ if (unlikely(dc->enable_debug)) {
+ for (i = 0; i < dc->num_desc; i++) {
+ mif_info("%d a:0x%llx c:0x%x s:0x%x l:%d\n",
+ i, dc->desc_rgn[i].cp_buff_paddr, dc->desc_rgn[i].control,
+ dc->desc_rgn[i].status, dc->desc_rgn[i].length);
+ }
+ }
+
+ memset(dc->buff_vbase, 0, dc->buff_rgn_size);
+
+ dc->curr_desc_pos = 0;
+ dc->curr_done_pos = 0;
+ _test_dm_desc_pos = 0;
+
+ dc->usb_req_failed = false;
+
+ memset(&dc->stat, 0, sizeof(dc->stat));
+
+ return 0;
+}
+EXPORT_SYMBOL(direct_dm_init);
+
+int direct_dm_deinit(void)
+{
+ return 0;
+}
+EXPORT_SYMBOL(direct_dm_deinit);
+
+/* Create */
+static int direct_dm_setup_region(struct direct_dm_ctrl *dc)
+{
+ unsigned long cp_pbase;
+ unsigned long ddm_pbase;
+ u32 ddm_rgn_size;
+
+ if (!dc) {
+ mif_err("dc is null\n");
+ return -EPERM;
+ }
+
+ cp_pbase = cp_shmem_get_base(0, SHMEM_CP);
+ if (!cp_pbase) {
+ mif_err("cp_pbase is null\n");
+ return -ENOMEM;
+ }
+ ddm_pbase = cp_shmem_get_base(0, dc->shm_rgn_index);
+ if (!ddm_pbase) {
+ mif_err("ddm_pbase is null\n");
+ return -ENOMEM;
+ }
+ ddm_rgn_size = cp_shmem_get_size(0, dc->shm_rgn_index);
+ if (!ddm_rgn_size) {
+ mif_err("ddm_rgn_size is null\n");
+ return -ENOMEM;
+ }
+ mif_info("cp_pbase:0x%lx ddm_pbase:0x%lx ddm_rgn_size:0x%08x\n",
+ cp_pbase, ddm_pbase, ddm_rgn_size);
+
+ /* TODO: support slim modem */
+ dc->cp_ddm_pbase = ddm_pbase - cp_pbase + CP_CPU_BASE_ADDRESS;
+ mif_info("cp_ddm_pbase:0x%08x\n", dc->cp_ddm_pbase);
+
+ if (dc->info_desc_rgn_cached) {
+ dc->info_vbase = phys_to_virt(ddm_pbase + dc->info_rgn_offset);
+ dc->desc_vbase = phys_to_virt(ddm_pbase + dc->desc_rgn_offset);
+ if (!dc->hw_iocc) {
+ mif_err("cached region is not supported yet without hw_iocc\n");
+ return -EINVAL;
+ }
+ } else {
+ dc->info_vbase = cp_shmem_get_nc_region(
+ ddm_pbase + dc->info_rgn_offset,
+ dc->info_rgn_size + dc->desc_rgn_size);
+ if (!dc->info_vbase) {
+ mif_err("dc->info_base error\n");
+ return -ENOMEM;
+ }
+ dc->desc_vbase = dc->info_vbase + dc->info_rgn_size;
+ }
+ memset(dc->info_vbase, 0, dc->info_rgn_size + dc->desc_rgn_size);
+ mif_info("info_rgn_size:0x%08x desc_rgn_size:0x%08x\n",
+ dc->info_rgn_size, dc->desc_rgn_size);
+
+ dc->buff_rgn_size = ddm_rgn_size -
+ (dc->info_rgn_size + dc->desc_rgn_size);
+ dc->buff_pbase = ddm_pbase + dc->buff_rgn_offset;
+ if (dc->buff_rgn_cached) {
+ dc->buff_vbase = phys_to_virt(dc->buff_pbase);
+ } else {
+ dc->buff_vbase = cp_shmem_get_nc_region(
+ dc->buff_pbase, dc->buff_rgn_size);
+ if (!dc->buff_vbase) {
+ mif_err("dc->buff_vbase error\n");
+ return -ENOMEM;
+ }
+ }
+ memset(dc->buff_vbase, 0, dc->buff_rgn_size);
+ dc->num_desc = dc->buff_rgn_size / dc->max_packet_size;
+ if (dc->buff_rgn_cached && !dc->hw_iocc)
+ dma_sync_single_for_device(dc->dev, dc->buff_pbase,
+ dc->buff_rgn_size, DMA_FROM_DEVICE);
+ mif_info("buff_rgn_size:0x%08x num_desc:%d\n",
+ dc->buff_rgn_size, dc->num_desc);
+
+ dc->info_rgn = (struct direct_dm_info_rgn *)dc->info_vbase;
+ dc->desc_rgn = (struct direct_dm_desc *)dc->desc_vbase;
+
+ return 0;
+}
+
+static int direct_dm_register_irq(struct direct_dm_ctrl *dc)
+{
+ int ret;
+
+ ret = cp_mbox_register_handler(dc->irq_index, 0,
+ direct_dm_irq_handler, dc);
+ if (ret) {
+ mif_err("cp_mbox_register_handler() error:%d\n", ret);
+ goto error;
+ }
+
+error:
+ return ret;
+}
+
+static int direct_dm_read_dt(struct device_node *np,
+ struct direct_dm_ctrl *dc)
+{
+ mif_dt_read_u32(np, "version", dc->version);
+ mif_dt_read_u32(np, "shm_rgn_index", dc->shm_rgn_index);
+ mif_dt_read_u32(np, "hw_iocc", dc->hw_iocc);
+ mif_dt_read_u32(np, "info_desc_rgn_cached", dc->info_desc_rgn_cached);
+ mif_dt_read_u32(np, "buff_rgn_cached", dc->buff_rgn_cached);
+
+ mif_dt_read_u32(np, "info_rgn_offset", dc->info_rgn_offset);
+ mif_dt_read_u32(np, "info_rgn_size", dc->info_rgn_size);
+ mif_dt_read_u32(np, "desc_rgn_offset", dc->desc_rgn_offset);
+ mif_dt_read_u32(np, "desc_rgn_size", dc->desc_rgn_size);
+ mif_dt_read_u32(np, "buff_rgn_offset", dc->buff_rgn_offset);
+
+ mif_dt_read_u32(np, "max_packet_size", dc->max_packet_size);
+ mif_dt_read_u32(np, "usb_req_num", dc->usb_req_num);
+ mif_dt_read_u32(np, "irq_index", dc->irq_index);
+
+ mif_dt_read_bool(np, "use_rx_task", dc->use_rx_task);
+ mif_dt_read_bool(np, "use_rx_timer", dc->use_rx_timer);
+ mif_dt_read_u32(np, "rx_timer_period_msec", dc->rx_timer_period_msec);
+
+ mif_info("version:%d shm_rgn_index:%d\n",
+ dc->version, dc->shm_rgn_index);
+ mif_info("hw_iocc:%d info_desc_rgn_cached:%d buff_rgn_cached:%d\n",
+ dc->hw_iocc, dc->info_desc_rgn_cached, dc->buff_rgn_cached);
+
+ mif_info("info_rgn_offset:0x%08x info_rgn_size:0x%08x\n",
+ dc->info_rgn_offset, dc->info_rgn_size);
+ mif_info("desc_rgn_offset:0x%08x desc_rgn_size:0x%08x\n",
+ dc->desc_rgn_offset, dc->desc_rgn_size);
+ mif_info("buff_rgn_offset:0x%08x\n", dc->buff_rgn_offset);
+
+ mif_info("max_packet_size:%d usb_req_num:%d irq_index:%d\n",
+ dc->max_packet_size, dc->usb_req_num, dc->irq_index);
+ mif_info("use_rx_timer:%d rx_timer_period_msec:%d\n",
+ dc->use_rx_timer, dc->rx_timer_period_msec);
+
+
+ return 0;
+}
+
+int direct_dm_create(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct direct_dm_ctrl *dc = NULL;
+ int ret;
+
+ mif_info("+++\n");
+
+ if (!np) {
+ mif_err("of_node is null\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ _dc = devm_kzalloc(dev, sizeof(struct direct_dm_ctrl), GFP_KERNEL);
+ if (!_dc) {
+ mif_err_limited("devm_kzalloc() error\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ dc = _dc;
+
+ dc->dev = dev;
+
+ ret = direct_dm_read_dt(np, dc);
+ if (ret) {
+ mif_err("direct_dm_read_dt() error:%d\n", ret);
+ goto err_setup;
+ }
+
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
+
+ ret = direct_dm_register_irq(dc);
+ if (ret) {
+ mif_err("direct_dm_register_irq() error:%d\n", ret);
+ goto err_setup;
+ }
+
+ ret = direct_dm_setup_region(dc);
+ if (ret) {
+ mif_err("direct_dm_setup_region() error:%d\n", ret);
+ goto err_setup;
+ }
+
+ ret = init_dm_direct_path(dc->usb_req_num,
+ direct_dm_usb_completion_noti,
+ direct_dm_usb_active_noti,
+ direct_dm_usb_disable_noti,
+ (void *)dc);
+ if (ret) {
+ mif_err("init_dm_direct_path() error:%d\n", ret);
+ goto err_setup;
+ }
+
+ spin_lock_init(&dc->rx_lock);
+
+ tasklet_init(&dc->rx_task, direct_dm_rx_func, (unsigned long)dc);
+
+ spin_lock_init(&dc->rx_timer_lock);
+ hrtimer_init(&dc->rx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ dc->rx_timer.function = direct_dm_rx_timer;
+
+ dev_set_drvdata(dev, dc);
+
+ ret = sysfs_create_groups(&dev->kobj, direct_dm_groups);
+ if (ret != 0) {
+ mif_err("sysfs_create_group() error:%d\n", ret);
+ goto err_setup;
+ }
+
+ mif_info("---\n");
+
+ return 0;
+
+err_setup:
+ devm_kfree(dev, dc);
+ dc = NULL;
+
+err:
+ panic("Direct DM driver probe failed\n");
+ mif_err("xxx\n");
+
+ return ret;
+}
+EXPORT_SYMBOL(direct_dm_create);
+
+/* Platform driver */
+static int direct_dm_probe(struct platform_device *pdev)
+{
+ return direct_dm_create(pdev);
+}
+
+static int direct_dm_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int direct_dm_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int direct_dm_resume(struct device *dev)
+{
+ return 0;
+}
+
+static const struct dev_pm_ops direct_dm_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(direct_dm_suspend, direct_dm_resume)
+};
+
+static const struct of_device_id direct_dm_dt_match[] = {
+ { .compatible = "samsung,cpif-direct-dm", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, direct_dm_dt_match);
+
+static struct platform_driver direct_dm_driver = {
+ .probe = direct_dm_probe,
+ .remove = direct_dm_remove,
+ .driver = {
+ .name = "direct_dm",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(direct_dm_dt_match),
+ .pm = &direct_dm_pm_ops,
+ .suppress_bind_attrs = true,
+ },
+};
+module_platform_driver(direct_dm_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Samsung direct DM driver");
diff --git a/direct_dm.h b/direct_dm.h
new file mode 100644
index 0000000..534b0b0
--- /dev/null
+++ b/direct_dm.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Samsung Electronics.
+ *
+ */
+
+#ifndef __DIRECT_DM_H__
+#define __DIRECT_DM_H__
+
+#include <linux/interrupt.h>
+
+enum direct_dm_desc_control_bits {
+ DDM_DESC_C_END = 3, /* End of buffer descriptor */
+ DDM_DESC_C_INT /* Interrupt enabled */
+};
+
+enum direct_dm_desc_status_bits {
+ DDM_DESC_S_DONE, /* DMA done */
+ DDM_DESC_S_TOUT, /* Transferred on timeout */
+ DDM_DESC_S_COMPRESSED = 5 /* "1":compressed log is on, "0":comp is off */
+};
+
+struct direct_dm_desc {
+ u64 cp_buff_paddr:40,
+ _reserved_0:24;
+ u64 length:16,
+ status:8,
+ control:8,
+ _reserved_1:32;
+} __packed;
+
+struct direct_dm_info_rgn {
+ u64 version:4,
+ _reserved_0:4,
+ max_packet_size:16,
+ silent_log:1,
+ _reserved_1:39;
+ u64 cp_desc_pbase:40,
+ _reserved_2:24;
+ u64 cp_buff_pbase:40,
+ _reserved_3:24;
+} __packed;
+
+/* Statistics */
+struct direct_dm_statistics {
+ u64 err_desc_done;
+ u64 err_desc_tout;
+ u64 err_length;
+
+ u64 usb_req_cnt;
+ u64 err_usb_req;
+ u64 usb_complete_cnt;
+ u64 err_usb_complete;
+
+ u64 upper_layer_req_cnt;
+ u64 err_upper_layer_req;
+
+ u64 rx_timer_req;
+ u64 rx_timer_expire;
+};
+
+struct direct_dm_ctrl {
+ struct link_device *ld;
+
+ u32 version;
+ u32 shm_rgn_index;
+
+ u32 hw_iocc;
+ u32 info_desc_rgn_cached;
+ u32 buff_rgn_cached;
+
+ u32 info_rgn_offset;
+ u32 info_rgn_size;
+ u32 desc_rgn_offset;
+ u32 desc_rgn_size;
+ u32 num_desc;
+ u32 buff_rgn_offset;
+ u32 buff_rgn_size;
+
+ u32 max_packet_size;
+ u32 usb_req_num;
+ u32 irq_index;
+
+ u32 cp_ddm_pbase;
+
+ struct device *dev;
+
+ u32 curr_desc_pos;
+ u32 curr_done_pos;
+
+ bool usb_req_failed;
+
+ spinlock_t rx_lock;
+ struct tasklet_struct rx_task;
+ bool use_rx_task;
+
+ bool use_rx_timer;
+ spinlock_t rx_timer_lock;
+ struct hrtimer rx_timer;
+ u32 rx_timer_period_msec;
+
+ bool usb_active;
+
+ struct direct_dm_statistics stat;
+
+ bool enable_debug;
+
+ void __iomem *info_vbase;
+ void __iomem *desc_vbase;
+ void __iomem *buff_vbase;
+ unsigned long buff_pbase;
+
+ struct direct_dm_info_rgn *info_rgn;
+ struct direct_dm_desc *desc_rgn;
+};
+
+#if IS_ENABLED(CONFIG_CPIF_DIRECT_DM)
+extern int direct_dm_create(struct platform_device *pdev);
+extern int direct_dm_init(struct link_device *ld);
+extern int direct_dm_deinit(void);
+#else
+static inline int direct_dm_create(struct platform_device *pdev) { return 0; }
+static inline int direct_dm_init(struct link_device *ld) { return 0; }
+static inline int direct_dm_deinit(void) { return 0; }
+#endif
+
+#endif /* __DIRECT_DM_H__ */
diff --git a/dit.h b/dit.h
new file mode 100644
index 0000000..1abd158
--- /dev/null
+++ b/dit.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * EXYNOS DIT(Direct IP Translator) Driver support
+ *
+ */
+
+#ifndef __DIT_H__
+#define __DIT_H__
+
+#include "modem_utils.h"
+#include "modem_toe_device.h"
+
+enum dit_direction {
+ DIT_DIR_TX,
+ DIT_DIR_RX,
+ DIT_DIR_MAX
+};
+
+enum dit_init_type {
+ DIT_INIT_NORMAL = 0,
+ DIT_INIT_RETRY,
+ DIT_INIT_DEINIT,
+};
+
+enum dit_store_type {
+ DIT_STORE_NONE = 0,
+ DIT_STORE_BACKUP,
+ DIT_STORE_RESTORE,
+};
+
+int dit_init(struct link_device *ld, enum dit_init_type type, enum dit_store_type store);
+int dit_get_irq_affinity(void);
+int dit_set_irq_affinity(int affinity);
+int dit_set_pktproc_queue_num(enum dit_direction dir, u32 queue_num);
+int dit_set_buf_size(enum dit_direction dir, u32 size);
+int dit_set_pktproc_base(enum dit_direction dir, phys_addr_t base);
+int dit_set_desc_ring_len(enum dit_direction dir, u32 len);
+int dit_get_src_usage(enum dit_direction dir, u32 *usage);
+extern u32 gs_chipid_get_type(void);
+
+#if IS_ENABLED(CONFIG_EXYNOS_DIT)
+int dit_enqueue_src_desc_ring(
+ enum dit_direction dir, u8 *src, unsigned long src_paddr,
+ u16 len, u8 ch_id, bool csum);
+int dit_enqueue_src_desc_ring_skb(enum dit_direction dir, struct sk_buff *skb);
+int dit_kick(enum dit_direction dir, bool retry);
+bool dit_check_dir_use_queue(enum dit_direction dir, unsigned int queue_num);
+int dit_reset_dst_wp_rp(enum dit_direction dir);
+struct net_device *dit_get_netdev(void);
+bool dit_support_clat(void);
+bool dit_hal_set_clat_info(struct mem_link_device *mld, struct clat_info *clat);
+#else
+static inline int dit_enqueue_src_desc_ring(
+ enum dit_direction dir, u8 *src, unsigned long src_paddr,
+ u16 len, u8 ch_id, bool csum) { return -1; }
+static inline int dit_enqueue_src_desc_ring_skb(
+ enum dit_direction dir, struct sk_buff *skb) { return -1; }
+static inline int dit_kick(enum dit_direction dir, bool retry) { return -1; }
+static inline bool dit_check_dir_use_queue(
+ enum dit_direction dir, unsigned int queue_num) { return false; }
+static inline int dit_reset_dst_wp_rp(enum dit_direction dir) { return -1; }
+static inline struct net_device *dit_get_netdev(void) { return NULL; }
+static inline bool dit_support_clat(void) { return false; }
+static inline bool dit_hal_set_clat_info(struct mem_link_device *mld, struct clat_info *clat)
+{ return false; }
+#endif
+
+#endif /* __DIT_H__ */
+
diff --git a/dit/Kbuild b/dit/Kbuild
new file mode 100644
index 0000000..711cedc
--- /dev/null
+++ b/dit/Kbuild
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile of dit
+
+ccflags-y += -Wformat
+ccflags-y += -Wformat-zero-length
+
+obj-$(CONFIG_EXYNOS_DIT) += exynos_dit.o
+exynos_dit-y += dit.o dit_net.o dit_hal.o
+
+ifeq ($(CONFIG_EXYNOS_DIT_VERSION),0x02020000)
+ exynos_dit-y += dit_2_2_0.o
+else
+ exynos_dit-y += dit_2_1_0.o
+endif
diff --git a/dit/dit.c b/dit/dit.c
new file mode 100644
index 0000000..7af54a8
--- /dev/null
+++ b/dit/dit.c
@@ -0,0 +1,2769 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * EXYNOS DIT(Direct IP Translator) Driver support
+ *
+ */
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/udp.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/soc/google/exynos-dit.h>
+#if IS_ENABLED(CONFIG_CPU_IDLE)
+#include <soc/google/exynos-cpupm.h>
+#endif
+
+#include "link_device.h"
+#include "dit_common.h"
+#include "dit_net.h"
+#include "dit_hal.h"
+
+static struct dit_ctrl_t *dc;
+
+static struct dit_snapshot_t snapshot[DIT_DIR_MAX][DIT_DESC_RING_MAX] = {
+ {
+ { .name = "tx_int_dst0", .head = -1, .tail = -1 },
+ { .name = "tx_int_dst1", .head = -1, .tail = -1 },
+ { .name = "tx_int_dst2", .head = -1, .tail = -1 },
+ { .name = "tx_kick_src", .head = -1, .tail = -1 },
+ },
+ {
+ { .name = "rx_int_dst0", .head = -1, .tail = -1 },
+ { .name = "rx_int_dst1", .head = -1, .tail = -1 },
+ { .name = "rx_int_dst2", .head = -1, .tail = -1 },
+ { .name = "rx_kick_src", .head = -1, .tail = -1 },
+ },
+};
+
+static void dit_set_snapshot(enum dit_direction dir, enum dit_desc_ring ring_num,
+ int head, int tail, u64 packets)
+{
+ if (dir < 0 || dir >= DIT_DIR_MAX)
+ return;
+
+ if (ring_num < 0 || ring_num >= DIT_DESC_RING_MAX)
+ return;
+
+ if (head >= 0)
+ snapshot[dir][ring_num].head = head;
+ if (tail >= 0)
+ snapshot[dir][ring_num].tail = tail;
+
+ snapshot[dir][ring_num].packets = packets;
+ snapshot[dir][ring_num].total_packets += packets;
+};
+
+static int dit_get_snapshot_head(enum dit_direction dir, enum dit_desc_ring ring_num)
+{
+ if (dir < 0 || dir >= DIT_DIR_MAX)
+ return 0;
+
+ if (ring_num < 0 || ring_num >= DIT_DESC_RING_MAX)
+ return 0;
+
+ if (snapshot[dir][ring_num].head >= 0)
+ return snapshot[dir][ring_num].head;
+
+ return 0;
+}
+
+static int dit_get_snapshot_tail(enum dit_direction dir, enum dit_desc_ring ring_num)
+{
+ if (dir < 0 || dir >= DIT_DIR_MAX)
+ return 0;
+
+ if (ring_num < 0 || ring_num >= DIT_DESC_RING_MAX)
+ return 0;
+
+ if (snapshot[dir][ring_num].tail >= 0)
+ return snapshot[dir][ring_num].tail;
+
+ return 0;
+}
+
+static int dit_get_snapshot_next_head(enum dit_direction dir,
+ enum dit_desc_ring ring_num, unsigned int qlen)
+{
+ if (dir < 0 || dir >= DIT_DIR_MAX)
+ return 0;
+
+ if (ring_num < 0 || ring_num >= DIT_DESC_RING_MAX)
+ return 0;
+
+ if (snapshot[dir][ring_num].tail >= 0)
+ return circ_new_ptr(qlen, snapshot[dir][ring_num].tail, 1);
+
+ return 0;
+}
+
+static bool dit_hw_capa_matched(u32 mask)
+{
+ if (dc->hw_capabilities & mask)
+ return true;
+
+ return false;
+}
+
+static void dit_print_dump(enum dit_direction dir, u32 dump_bits)
+{
+ u16 ring_num;
+ u32 i;
+
+ if (cpif_check_bit(dump_bits, DIT_DUMP_SNAPSHOT_BIT)) {
+ mif_info("---- SNAPSHOT[dir:%d] ----\n", dir);
+ for (ring_num = 0; ring_num < DIT_DESC_RING_MAX; ring_num++) {
+ mif_info("%s head:%d,tail:%d\n", snapshot[dir][ring_num].name,
+ snapshot[dir][ring_num].head, snapshot[dir][ring_num].tail);
+ }
+ }
+
+ if (cpif_check_bit(dump_bits, DIT_DUMP_DESC_BIT)) {
+ struct dit_desc_info *desc_info = &dc->desc_info[dir];
+ struct dit_src_desc *src_desc = NULL;
+ struct dit_dst_desc *dst_desc = NULL;
+
+ src_desc = desc_info->src_desc_ring;
+ mif_info("---- SRC RING[dir:%d] wp:%u,rp:%u ----\n", dir,
+ desc_info->src_wp, desc_info->src_rp);
+ for (i = 0; i < desc_info->src_desc_ring_len; i++) {
+ if (!(src_desc[i].control & DIT_SRC_KICK_CONTROL_MASK))
+ continue;
+ mif_info("src[%06d] ctrl:0x%02X,stat:0x%02X,ch_id:%03u\n",
+ i, src_desc[i].control, src_desc[i].status, src_desc[i].ch_id);
+ }
+
+ for (ring_num = DIT_DST_DESC_RING_0; ring_num < DIT_DST_DESC_RING_MAX; ring_num++) {
+ dst_desc = desc_info->dst_desc_ring[ring_num];
+ mif_info("---- DST RING%d[dir:%d] wp:%u,rp:%u ----\n", ring_num, dir,
+ desc_info->dst_wp[ring_num], desc_info->dst_rp[ring_num]);
+ for (i = 0; i < desc_info->dst_desc_ring_len; i++) {
+ if (!dst_desc[i].control && !dst_desc[i].status)
+ continue;
+ mif_info("dst[%d][%06d] ctrl:0x%02X,stat:0x%02X,p_info:0x%03X\n",
+ ring_num, i, dst_desc[i].control, dst_desc[i].status,
+ dst_desc[i].packet_info);
+ }
+ }
+ }
+
+ if (cpif_check_bit(dump_bits, DIT_DUMP_PORT_TABLE_BIT) && dir == DIT_DIR_RX) {
+ struct nat_local_port local_port;
+ u16 reply_port_dst, reply_port_dst_h, reply_port_dst_l;
+ u16 origin_port_src;
+
+ mif_info("---- PORT TABLE[dir:%d] ----\n", dir);
+ for (i = 0; i < DIT_REG_NAT_LOCAL_PORT_MAX; i++) {
+ local_port.hw_val = READ_REG_VALUE(dc, DIT_REG_NAT_RX_PORT_TABLE_SLOT +
+ (i * DIT_REG_NAT_LOCAL_INTERVAL));
+ if (!local_port.enable)
+ continue;
+
+ reply_port_dst_h = (u16)((local_port.reply_port_dst_h & 0xFF) << 8);
+ reply_port_dst_l = (u16)(i & 0x7FF);
+
+ /* read operation could return an invalid value if hw is running */
+ if (((reply_port_dst_h >> 8) & 0x7) != (reply_port_dst_l >> 8))
+ continue;
+
+ reply_port_dst = (reply_port_dst_h | reply_port_dst_l);
+ origin_port_src = local_port.origin_port_src;
+ if (dit_hw_capa_matched(DIT_CAP_MASK_PORT_BIG_ENDIAN)) {
+ reply_port_dst = htons(reply_port_dst);
+ origin_port_src = htons(origin_port_src);
+ }
+
+ mif_info("[%04d] en:%d,o_port:%5d,r_port:%5d,addr_idx:%02d,dst:%d,udp:%d\n",
+ i, local_port.enable, origin_port_src, reply_port_dst,
+ local_port.addr_index, local_port.dst_ring, local_port.is_udp);
+ }
+ }
+}
+
+bool dit_is_kicked_any(void)
+{
+ unsigned int dir;
+
+ for (dir = 0; dir < DIT_DIR_MAX; dir++) {
+ if (dc->kicked[dir])
+ return true;
+ }
+
+ return false;
+}
+
+static inline int dit_check_ring_space(
+ unsigned int qlen, unsigned int wp, unsigned int rp)
+{
+ unsigned int space;
+
+ if (!circ_valid(qlen, wp, rp)) {
+ mif_err_limited("DIRTY (qlen:%d wp:%d rp:%d)\n",
+ qlen, wp, rp);
+ return -EIO;
+ }
+
+ space = circ_get_space(qlen, wp, rp);
+ if (unlikely(space < 1)) {
+ mif_err_limited("NOSPC (qlen:%d wp:%d rp:%d)\n",
+ qlen, wp, rp);
+ return -ENOSPC;
+ }
+
+ return space;
+}
+
+#if defined(DIT_DEBUG_LOW)
+static void dit_debug_out_of_order(enum dit_direction dir, enum dit_desc_ring ring,
+ u8 *data)
+{
+ struct modem_ctl *mc;
+ struct udphdr *uh;
+ unsigned int off;
+ unsigned int *seq_p;
+ unsigned int seq;
+ u16 port;
+
+ static unsigned int last_seq[DIT_DIR_MAX][DIT_DESC_RING_MAX];
+ static unsigned int out_count[DIT_DIR_MAX][DIT_DESC_RING_MAX];
+ static u16 target_port[DIT_DIR_MAX][DIT_DESC_RING_MAX];
+
+ if (!dc->pktgen_ch)
+ return;
+
+ switch (data[0] & 0xF0) {
+ case 0x40:
+ off = sizeof(struct iphdr);
+ break;
+ case 0x60:
+ off = sizeof(struct ipv6hdr);
+ break;
+ default:
+ return;
+ }
+
+ uh = (struct udphdr *)(data + off);
+ off += sizeof(struct udphdr);
+
+ switch (dir) {
+ case DIT_DIR_TX:
+ port = uh->source;
+ break;
+ case DIT_DIR_RX:
+ port = uh->dest;
+ break;
+ default:
+ return;
+ }
+
+ if (!target_port[dir][ring]) {
+ mif_info("check dir[%d] out of order at ring[%d] for port:%u\n", dir, ring,
+ ntohs(port));
+ /* ntohs() is not needed */
+ target_port[dir][ring] = port;
+ }
+
+ /* check the first detected port only */
+ if (port != target_port[dir][ring])
+ return;
+
+ seq_p = (unsigned int *)&data[off];
+ seq = ntohl(*seq_p);
+
+ if (seq < last_seq[dir][ring]) {
+ mif_info("dir[%d] out of order at ring[%d] seq:0x%08x last:0x%08x\n", dir, ring,
+ seq, last_seq[dir][ring]);
+ if (++out_count[dir][ring] > 5) {
+ dit_print_dump(dir, DIT_DUMP_ALL);
+ if ((dc->ld) && (dc->ld->mc)) {
+ mc = dc->ld->mc;
+ mc->ops.trigger_cp_crash(mc);
+ }
+ }
+ }
+ last_seq[dir][ring] = seq;
+}
+#endif
+
+int dit_check_dst_ready(enum dit_direction dir, enum dit_desc_ring ring_num)
+{
+ struct dit_desc_info *desc_info;
+
+ if (!dc)
+ return -EPERM;
+
+ if (ring_num < DIT_DST_DESC_RING_0 || ring_num >= DIT_DST_DESC_RING_MAX)
+ return -EINVAL;
+
+ /* DST0 is always ready */
+ if (ring_num == DIT_DST_DESC_RING_0)
+ return 0;
+
+ switch (dir) {
+ case DIT_DIR_TX:
+ desc_info = &dc->desc_info[dir];
+ if (!desc_info->dst_desc_ring[ring_num])
+ return -ENODEV;
+ break;
+ case DIT_DIR_RX:
+ desc_info = &dc->desc_info[dir];
+ if (!desc_info->dst_skb_buf[ring_num] || !dit_hal_get_dst_netdev(ring_num))
+ return -ENODEV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline bool dit_check_queues_empty(enum dit_direction dir)
+{
+ struct dit_desc_info *desc_info = &dc->desc_info[dir];
+ unsigned int ring_num;
+
+ if (!circ_empty(desc_info->src_wp, desc_info->src_rp))
+ return false;
+
+ for (ring_num = DIT_DST_DESC_RING_0; ring_num < DIT_DST_DESC_RING_MAX; ring_num++) {
+ if (!circ_empty(desc_info->dst_wp[ring_num], desc_info->dst_rp[ring_num]))
+ return false;
+ }
+
+ return true;
+}
+
+static bool dit_is_reg_value_valid(u32 value, u32 offset)
+{
+ struct nat_local_port local_port;
+ int ret = 0;
+
+ if (offset >= DIT_REG_NAT_RX_PORT_TABLE_SLOT) {
+ local_port.hw_val = value;
+ ret = dit_check_dst_ready(DIT_DIR_RX, local_port.dst_ring);
+ if (ret)
+ goto exit;
+ }
+
+exit:
+ if (ret) {
+ mif_err("reg value 0x%08X at 0x%08X is not valid. ret :%d\n", value, offset, ret);
+ return false;
+ }
+
+ return true;
+}
+
+/* queue reg value writing if dit is running */
+int dit_enqueue_reg_value_with_ext_lock(u32 value, u32 offset)
+{
+ struct dit_reg_value_item *reg_item;
+
+ if (dit_is_kicked_any() || !dc->init_done || !list_empty(&dc->reg_value_q)) {
+ reg_item = kvzalloc(sizeof(struct dit_reg_value_item), GFP_ATOMIC);
+ if (!reg_item) {
+ mif_err("set reg value 0x%08X at 0x%08X enqueue failed\n", value, offset);
+ return -ENOMEM;
+ }
+
+ reg_item->value = value;
+ reg_item->offset = offset;
+ list_add_tail(®_item->list, &dc->reg_value_q);
+ } else {
+ if (dit_is_reg_value_valid(value, offset))
+ WRITE_REG_VALUE(dc, value, offset);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(dit_enqueue_reg_value_with_ext_lock);
+
+int dit_enqueue_reg_value(u32 value, u32 offset)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dc->src_lock, flags);
+ ret = dit_enqueue_reg_value_with_ext_lock(value, offset);
+ spin_unlock_irqrestore(&dc->src_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(dit_enqueue_reg_value);
+
+static void dit_clean_reg_value_with_ext_lock(void)
+{
+ struct dit_reg_value_item *reg_item;
+
+ while (!list_empty(&dc->reg_value_q)) {
+ reg_item = list_first_entry(&dc->reg_value_q, struct dit_reg_value_item, list);
+ if (dit_is_reg_value_valid(reg_item->value, reg_item->offset))
+ WRITE_REG_VALUE(dc, reg_item->value, reg_item->offset);
+ list_del(®_item->list);
+ kvfree(reg_item);
+ }
+}
+
+static void dit_set_dst_skb_header(struct sk_buff *skb)
+{
+ /* for tcpdump with any interface */
+ skb->protocol = htons(ETH_P_ALL);
+ skb_reset_transport_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_mac_header(skb);
+}
+
+static void dit_update_stat(struct sk_buff *skb)
+{
+ /* remove link layer header size */
+ unsigned int len = (skb->len - sizeof(struct ethhdr));
+
+ /* update upstream stat */
+ struct net_device *netdev = dit_hal_get_dst_netdev(DIT_DST_DESC_RING_0);
+
+ if (netdev) {
+#if IS_ENABLED(CONFIG_CPIF_TP_MONITOR)
+ struct mem_link_device *mld = to_mem_link_device(dc->ld);
+
+ skb_set_network_header(skb, sizeof(struct ethhdr));
+ mld->tpmon->add_rx_bytes(skb);
+#endif
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += len;
+ }
+
+ dit_hal_add_data_bytes(len, 0);
+}
+
+static inline void dit_set_skb_checksum(struct dit_dst_desc *dst_desc,
+ enum dit_desc_ring ring_num, struct sk_buff *skb)
+{
+ if ((ring_num == DIT_DST_DESC_RING_0) && dst_desc->pre_csum) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ return;
+ }
+
+ if (dst_desc->status & DIT_CHECKSUM_FAILED_STATUS_MASK)
+ return;
+
+ if (cpif_check_bit(dst_desc->status, DIT_DESC_S_TCPC) &&
+ cpif_check_bit(dst_desc->status, DIT_DESC_S_IPCS))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+static inline void dit_set_skb_udp_csum_zero(struct dit_dst_desc *dst_desc,
+ enum dit_desc_ring ring_num, struct sk_buff *skb)
+{
+ struct udphdr *uh;
+ unsigned int off;
+
+ if (ring_num == DIT_DST_DESC_RING_0)
+ return;
+
+ /* every packets on DST1/2 are IPv4 NATed */
+ if (!cpif_check_bit(dst_desc->packet_info, DIT_PACKET_INFO_IPV4_BIT) ||
+ !cpif_check_bit(dst_desc->packet_info, DIT_PACKET_INFO_UDP_BIT))
+ return;
+
+ off = sizeof(struct ethhdr) + sizeof(struct iphdr);
+ uh = (struct udphdr *)(skb->data + off);
+
+ /* set to 0 if csum was 0 from SRC.
+ * set to CSUM_MANGLED_0 if csum is 0 after the hw csum magic.
+ */
+ if (dst_desc->udp_csum_zero)
+ uh->check = 0;
+ else if (!uh->check)
+ uh->check = CSUM_MANGLED_0;
+}
+
+static int dit_pass_to_net(enum dit_desc_ring ring_num,
+ struct sk_buff *skb)
+{
+ struct mem_link_device *mld;
+ int ret = 0;
+
+#if defined(DIT_DEBUG_LOW)
+ dit_debug_out_of_order(DIT_DIR_RX, ring_num, skb->data);
+#endif
+
+ switch (ring_num) {
+ case DIT_DST_DESC_RING_0:
+ mld = to_mem_link_device(dc->ld);
+
+ /* this function check iod and ch inside
+ * an error means further calling is not necessary
+ */
+ return mld->pass_skb_to_net(mld, skb);
+ case DIT_DST_DESC_RING_1:
+ case DIT_DST_DESC_RING_2:
+ dit_update_stat(skb);
+ skb->dev = dit_hal_get_dst_netdev(ring_num);
+ if (!skb->dev || !netif_running(skb->dev) || !netif_carrier_ok(skb->dev)) {
+ mif_err_limited("invalid netdev!! ring_num: %d\n", ring_num);
+ dev_kfree_skb_any(skb);
+ break;
+ }
+
+ dit_set_dst_skb_header(skb);
+ ret = dev_queue_xmit(skb);
+ if (ret == NET_XMIT_DROP)
+ mif_err_limited("drop!! ring_num: %d\n", ring_num);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static inline void dit_reset_src_desc_kick_control(struct dit_src_desc *src_desc)
+{
+ u8 mask = DIT_SRC_KICK_CONTROL_MASK;
+
+ if (!src_desc)
+ return;
+
+ src_desc->control &= ~mask;
+}
+
+static inline void dit_set_src_desc_udp_csum_zero(struct dit_src_desc *src_desc,
+ u8 *src)
+{
+ const struct iphdr *iph = (struct iphdr *)src;
+ struct udphdr *uh;
+ unsigned int off;
+
+ /* check IPv4 UDP only */
+ if (((src[0] & 0xFF) != 0x45) || (iph->protocol != IPPROTO_UDP))
+ return;
+
+ off = sizeof(*iph);
+ uh = (struct udphdr *)(src + off);
+ if (uh->check == 0)
+ src_desc->udp_csum_zero = 1;
+}
+
+static void dit_set_src_desc_kick_range(enum dit_direction dir, unsigned int src_wp,
+ unsigned int src_rp)
+{
+ struct dit_desc_info *desc_info = &dc->desc_info[dir];
+ struct dit_src_desc *src_desc;
+ phys_addr_t p_desc;
+ unsigned int head;
+ unsigned int tail;
+ u32 offset_lo = 0, offset_hi = 0, offset_en = 0;
+
+ /* reset previous kick */
+ head = dit_get_snapshot_head(dir, DIT_SRC_DESC_RING);
+ src_desc = &desc_info->src_desc_ring[head];
+ dit_reset_src_desc_kick_control(src_desc);
+
+ tail = dit_get_snapshot_tail(dir, DIT_SRC_DESC_RING);
+ src_desc = &desc_info->src_desc_ring[tail];
+ dit_reset_src_desc_kick_control(src_desc);
+
+ barrier();
+
+ /* set current kick */
+ head = src_rp;
+ src_desc = &desc_info->src_desc_ring[head];
+ cpif_set_bit(src_desc->control, DIT_DESC_C_HEAD);
+ p_desc = desc_info->src_desc_ring_daddr + (sizeof(struct dit_src_desc) * head);
+
+ if (dir == DIT_DIR_TX) {
+ offset_lo = DIT_REG_NAT_TX_DESC_ADDR_0_SRC;
+ offset_hi = DIT_REG_NAT_TX_DESC_ADDR_1_SRC;
+ offset_en = DIT_REG_NAT_TX_DESC_ADDR_EN_SRC;
+ } else {
+ offset_lo = DIT_REG_NAT_RX_DESC_ADDR_0_SRC;
+ offset_hi = DIT_REG_NAT_RX_DESC_ADDR_1_SRC;
+ offset_en = DIT_REG_NAT_RX_DESC_ADDR_EN_SRC;
+ }
+
+ WRITE_REG_PADDR_LO(dc, p_desc, offset_lo);
+ WRITE_REG_PADDR_HI(dc, p_desc, offset_hi);
+ WRITE_REG_VALUE(dc, 0x1, offset_en);
+
+ tail = circ_prev_ptr(desc_info->src_desc_ring_len, src_wp, 1);
+ src_desc = &desc_info->src_desc_ring[tail];
+ cpif_set_bit(src_desc->control, DIT_DESC_C_TAIL);
+ cpif_set_bit(src_desc->control, DIT_DESC_C_INT);
+ DIT_INDIRECT_CALL(dc, set_src_desc_tail, dir, desc_info, tail);
+
+ src_desc = &desc_info->src_desc_ring[desc_info->src_desc_ring_len - 1];
+ cpif_set_bit(src_desc->control, DIT_DESC_C_RINGEND);
+
+ dit_set_snapshot(dir, DIT_SRC_DESC_RING, head, tail,
+ circ_get_usage(desc_info->src_desc_ring_len, tail, head) + 1);
+}
+
+static void dit_set_dst_desc_int_range(enum dit_direction dir,
+ enum dit_desc_ring ring_num)
+{
+ struct dit_desc_info *desc_info = &dc->desc_info[dir];
+ struct dit_dst_desc *dst_desc;
+ phys_addr_t p_desc;
+ unsigned int dst_wp_pos;
+ u32 offset_lo = 0, offset_hi = 0;
+
+ dst_desc = desc_info->dst_desc_ring[ring_num];
+ dst_wp_pos = desc_info->dst_wp[ring_num];
+ p_desc = desc_info->dst_desc_ring_daddr[ring_num] +
+ (sizeof(struct dit_dst_desc) * dst_wp_pos);
+
+ switch (ring_num) {
+ case DIT_DST_DESC_RING_0:
+ if (dir == DIT_DIR_TX) {
+ offset_lo = DIT_REG_NAT_TX_DESC_ADDR_0_DST0;
+ offset_hi = DIT_REG_NAT_TX_DESC_ADDR_1_DST0;
+ } else {
+ offset_lo = DIT_REG_NAT_RX_DESC_ADDR_0_DST0;
+ offset_hi = DIT_REG_NAT_RX_DESC_ADDR_1_DST0;
+ }
+ break;
+ case DIT_DST_DESC_RING_1:
+ if (dir == DIT_DIR_TX) {
+ offset_lo = DIT_REG_NAT_TX_DESC_ADDR_0_DST1;
+ offset_hi = DIT_REG_NAT_TX_DESC_ADDR_1_DST1;
+ } else {
+ offset_lo = DIT_REG_NAT_RX_DESC_ADDR_0_DST1;
+ offset_hi = DIT_REG_NAT_RX_DESC_ADDR_1_DST1;
+ }
+ break;
+ case DIT_DST_DESC_RING_2:
+ if (dir == DIT_DIR_TX) {
+ offset_lo = DIT_REG_NAT_TX_DESC_ADDR_0_DST2;
+ offset_hi = DIT_REG_NAT_TX_DESC_ADDR_1_DST2;
+ } else {
+ offset_lo = DIT_REG_NAT_RX_DESC_ADDR_0_DST2;
+ offset_hi = DIT_REG_NAT_RX_DESC_ADDR_1_DST2;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (offset_lo && offset_hi && (desc_info->dst_desc_ring_len > 0)) {
+ WRITE_REG_PADDR_LO(dc, p_desc, offset_lo);
+ WRITE_REG_PADDR_HI(dc, p_desc, offset_hi);
+ cpif_set_bit(dst_desc[desc_info->dst_desc_ring_len - 1].control,
+ DIT_DESC_C_RINGEND);
+ }
+}
+
+static int dit_enqueue_src_desc_ring_internal(enum dit_direction dir,
+ u8 *src, unsigned long src_paddr,
+ u16 len, u8 ch_id, bool csum)
+{
+ struct dit_desc_info *desc_info;
+ struct dit_src_desc *src_desc;
+ int remain;
+ int src_wp = 0;
+ bool is_upstream_pkt = false;
+#if defined(DIT_DEBUG)
+ static unsigned int overflow;
+ static unsigned int last_max_overflow;
+#endif
+#if defined(DIT_DEBUG_LOW)
+ u32 usage;
+#endif
+
+ if (!dc)
+ return -EPERM;
+
+ desc_info = &dc->desc_info[dir];
+
+ remain = dit_check_ring_space(desc_info->src_desc_ring_len,
+ desc_info->src_wp, desc_info->src_rp);
+ if (unlikely(remain < 1)) {
+#if defined(DIT_DEBUG)
+ if (remain == -ENOSPC)
+ overflow++;
+ if (overflow > last_max_overflow) {
+ last_max_overflow = overflow;
+ mif_err("enqueue overflow new max: %d", last_max_overflow);
+ }
+#endif
+ return remain;
+ }
+
+#if defined(DIT_DEBUG)
+ overflow = 0;
+#endif
+#if defined(DIT_DEBUG_LOW)
+ dit_debug_out_of_order(dir, DIT_SRC_DESC_RING, src);
+#endif
+
+ src_wp = (int) desc_info->src_wp;
+ src_desc = &desc_info->src_desc_ring[src_wp];
+ if (src_paddr)
+ src_desc->src_addr = src_paddr;
+ else
+ src_desc->src_addr = virt_to_phys(src);
+ src_desc->length = len;
+ src_desc->ch_id = ch_id;
+ src_desc->pre_csum = csum;
+ src_desc->udp_csum_zero = 0;
+ src_desc->control = 0;
+ if (src_wp == (desc_info->src_desc_ring_len - 1))
+ cpif_set_bit(src_desc->control, DIT_DESC_C_RINGEND);
+ src_desc->status = 0;
+
+ DIT_INDIRECT_CALL(dc, set_desc_filter_bypass, dir, src_desc, src, &is_upstream_pkt);
+ if (is_upstream_pkt)
+ dit_set_src_desc_udp_csum_zero(src_desc, src);
+
+ if (dc->use_dma_map && dir == DIT_DIR_TX) {
+ dma_addr_t daddr;
+
+ daddr = dma_map_single(dc->dev, src, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dc->dev, daddr)) {
+ mif_err("dit dir[%d] src skb[%d] dma_map_single failed\n", dir, src_wp);
+ return -ENOMEM;
+ }
+ dma_unmap_single(dc->dev, daddr, len, DMA_TO_DEVICE);
+ }
+
+ barrier();
+
+ desc_info->src_wp = circ_new_ptr(desc_info->src_desc_ring_len, src_wp, 1);
+
+ /* ensure the src_wp ordering */
+ smp_mb();
+
+#if defined(DIT_DEBUG_LOW)
+ usage = circ_get_usage(desc_info->src_desc_ring_len, desc_info->src_wp, desc_info->src_rp);
+ if (usage > snapshot[dir][DIT_SRC_DESC_RING].max_usage)
+ snapshot[dir][DIT_SRC_DESC_RING].max_usage = usage;
+#endif
+
+ return src_wp;
+}
+
+int dit_enqueue_src_desc_ring(enum dit_direction dir,
+ u8 *src, unsigned long src_paddr,
+ u16 len, u8 ch_id, bool csum)
+{
+ return dit_enqueue_src_desc_ring_internal(
+ dir, src, src_paddr, len, ch_id, csum);
+}
+EXPORT_SYMBOL(dit_enqueue_src_desc_ring);
+
+int dit_enqueue_src_desc_ring_skb(enum dit_direction dir, struct sk_buff *skb)
+{
+ int src_wp;
+
+ src_wp = dit_enqueue_src_desc_ring_internal(dir, skb->data,
+ virt_to_phys(skb->data), skb->len,
+ skbpriv(skb)->sipc_ch, (skb->ip_summed == CHECKSUM_UNNECESSARY));
+ if (src_wp >= 0)
+ dc->desc_info[dir].src_skb_buf[src_wp] = skb;
+
+ return src_wp;
+}
+EXPORT_SYMBOL(dit_enqueue_src_desc_ring_skb);
+
+static int dit_fill_tx_dst_data_buffer(enum dit_desc_ring ring_num, unsigned int read)
+{
+ struct dit_desc_info *desc_info;
+ struct dit_dst_desc *dst_desc;
+ unsigned int dst_rp_pos;
+ unsigned int i;
+
+ if (!dc)
+ return -EPERM;
+
+ if (!read)
+ return 0;
+
+ desc_info = &dc->desc_info[DIT_DIR_TX];
+ if (unlikely(!desc_info->pktproc_pbase))
+ return -EACCES;
+
+ dst_desc = desc_info->dst_desc_ring[ring_num];
+ dst_rp_pos = desc_info->dst_rp[ring_num];
+
+ for (i = 0; i < read; i++) {
+ dst_desc[dst_rp_pos].dst_addr = desc_info->pktproc_pbase +
+ (dst_rp_pos * desc_info->buf_size);
+ dst_rp_pos = circ_new_ptr(desc_info->dst_desc_ring_len, dst_rp_pos, 1);
+ }
+
+ return 0;
+}
+
+static int dit_fill_rx_dst_data_buffer(enum dit_desc_ring ring_num, unsigned int read, bool initial)
+{
+ struct dit_desc_info *desc_info;
+ struct dit_dst_desc *dst_desc;
+ struct sk_buff **dst_skb;
+ unsigned int dst_rp_pos;
+ gfp_t gfp_mask;
+ int i;
+
+ if (!dc)
+ return -EPERM;
+
+ if (!read)
+ return 0;
+
+ desc_info = &dc->desc_info[DIT_DIR_RX];
+
+ if (initial && desc_info->dst_skb_buf_filled[ring_num])
+ return 0;
+
+ if (unlikely(!desc_info->dst_skb_buf[ring_num])) {
+ unsigned int buf_size = sizeof(struct sk_buff *) * desc_info->dst_desc_ring_len;
+
+ desc_info->dst_skb_buf[ring_num] = kvzalloc(buf_size, GFP_KERNEL);
+ if (!desc_info->dst_skb_buf[ring_num]) {
+ mif_err("dit dst[%d] skb container alloc failed\n", ring_num);
+ return -ENOMEM;
+ }
+ }
+
+ if (dc->use_dma_map && unlikely(!desc_info->dst_skb_buf_daddr[ring_num])) {
+ unsigned int buf_size = sizeof(dma_addr_t) * desc_info->dst_desc_ring_len;
+
+ desc_info->dst_skb_buf_daddr[ring_num] = kvzalloc(buf_size, GFP_KERNEL);
+ if (!desc_info->dst_skb_buf_daddr[ring_num]) {
+ mif_err("dit dst[%d] skb dma addr container alloc failed\n", ring_num);
+ return -ENOMEM;
+ }
+ }
+
+ dst_desc = desc_info->dst_desc_ring[ring_num];
+ dst_skb = desc_info->dst_skb_buf[ring_num];
+ dst_rp_pos = desc_info->dst_rp[ring_num];
+
+ /* fill free space */
+ for (i = 0; i < read; i++) {
+ if (dst_desc[dst_rp_pos].dst_addr)
+ goto next;
+
+ if (unlikely(dst_skb[dst_rp_pos]))
+ goto dma_map;
+
+ if (desc_info->dst_page_pool[ring_num]) {
+ void *data;
+ bool used_tmp_alloc;
+ u16 len = SKB_DATA_ALIGN(dc->desc_info[DIT_DIR_RX].buf_size);
+
+ len += SKB_DATA_ALIGN(dc->page_recycling_skb_padding);
+ len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ data = cpif_page_alloc(desc_info->dst_page_pool[ring_num], len,
+ &used_tmp_alloc);
+ if (!data) {
+ mif_err("dit dst[%d] skb[%d] recycle pg alloc failed\n",
+ ring_num, dst_rp_pos);
+ return -ENOMEM;
+ }
+
+ dst_skb[dst_rp_pos] = build_skb(data, len);
+ } else if (initial) {
+ gfp_mask = GFP_KERNEL;
+ if (ring_num == DIT_DST_DESC_RING_0)
+ gfp_mask = GFP_ATOMIC;
+
+ dst_skb[dst_rp_pos] = __netdev_alloc_skb_ip_align(dc->netdev,
+ desc_info->buf_size,
+ gfp_mask);
+ } else {
+ dst_skb[dst_rp_pos] = napi_alloc_skb(&dc->napi, desc_info->buf_size);
+ }
+
+ if (unlikely(!dst_skb[dst_rp_pos])) {
+ mif_err("dit dst[%d] skb[%d] build failed\n", ring_num, dst_rp_pos);
+ return -ENOMEM;
+ }
+
+ if (desc_info->dst_page_pool[ring_num])
+ skb_reserve(dst_skb[dst_rp_pos], dc->page_recycling_skb_padding);
+
+#if defined(DIT_DEBUG_LOW)
+ snapshot[DIT_DIR_RX][ring_num].alloc_skbs++;
+#endif
+
+dma_map:
+ if (dc->use_dma_map && !desc_info->dst_skb_buf_daddr[ring_num][dst_rp_pos]) {
+ dma_addr_t daddr;
+
+ daddr = dma_map_single(dc->dev, dst_skb[dst_rp_pos]->data,
+ desc_info->buf_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dc->dev, daddr)) {
+ mif_err("dit dst[%d] skb[%d] dma_map_single failed\n",
+ ring_num, dst_rp_pos);
+ return -ENOMEM;
+ }
+
+ desc_info->dst_skb_buf_daddr[ring_num][dst_rp_pos] = daddr;
+#if defined(DIT_DEBUG_LOW)
+ snapshot[DIT_DIR_RX][ring_num].dma_maps++;
+#endif
+ }
+
+ dst_desc[dst_rp_pos].dst_addr = virt_to_phys(dst_skb[dst_rp_pos]->data);
+
+next:
+ dst_rp_pos = circ_new_ptr(desc_info->dst_desc_ring_len, dst_rp_pos, 1);
+ }
+
+ if (initial)
+ desc_info->dst_skb_buf_filled[ring_num] = true;
+
+ return 0;
+}
+
+static int dit_free_dst_data_buffer(enum dit_direction dir, enum dit_desc_ring ring_num)
+{
+ struct dit_desc_info *desc_info;
+ struct dit_dst_desc *dst_desc;
+ struct sk_buff **dst_skb;
+ int i;
+
+ if (!dc)
+ return -EPERM;
+
+ desc_info = &dc->desc_info[dir];
+
+ if (unlikely(!desc_info->dst_skb_buf[ring_num]))
+ return -EINVAL;
+
+ if (!circ_empty(desc_info->dst_wp[ring_num], desc_info->dst_rp[ring_num])) {
+ mif_err("skip free. dst[%d] is processing. wp:%d rp:%d\n", ring_num,
+ desc_info->dst_wp[ring_num], desc_info->dst_rp[ring_num]);
+ return -EBUSY;
+ }
+
+ dst_desc = desc_info->dst_desc_ring[ring_num];
+ dst_skb = desc_info->dst_skb_buf[ring_num];
+
+ /* don't free dst_skb_buf if there are skbs will be handled in napi poll */
+ for (i = 0; i < desc_info->dst_desc_ring_len; i++) {
+ if (!dst_desc[i].dst_addr)
+ return -EFAULT;
+ }
+
+ for (i = 0; i < desc_info->dst_desc_ring_len; i++) {
+ if (dst_skb[i]) {
+ if (dc->use_dma_map && desc_info->dst_skb_buf_daddr[ring_num] &&
+ desc_info->dst_skb_buf_daddr[ring_num][i]) {
+#if defined(DIT_DEBUG_LOW)
+ snapshot[DIT_DIR_RX][ring_num].dma_maps--;
+#endif
+ dma_unmap_single(dc->dev,
+ desc_info->dst_skb_buf_daddr[ring_num][i],
+ desc_info->buf_size, DMA_FROM_DEVICE);
+ }
+
+#if defined(DIT_DEBUG_LOW)
+ snapshot[dir][ring_num].alloc_skbs--;
+#endif
+ dev_kfree_skb_any(dst_skb[i]);
+ }
+ dst_desc[i].dst_addr = 0;
+ }
+
+ mif_info("free dst[%d] skb buffers\n", ring_num);
+
+ if (dc->use_dma_map) {
+ kvfree(desc_info->dst_skb_buf_daddr[ring_num]);
+ desc_info->dst_skb_buf_daddr[ring_num] = NULL;
+ }
+
+ kvfree(dst_skb);
+ desc_info->dst_skb_buf[ring_num] = NULL;
+ desc_info->dst_skb_buf_filled[ring_num] = false;
+
+ return 0;
+}
+
+/* ToDo: Tx does not have dst_skb_buf, might need another flag */
+static int dit_get_dst_data_buffer_free_space(enum dit_direction dir)
+{
+ struct dit_desc_info *desc_info = &dc->desc_info[dir];
+ unsigned int min = desc_info->dst_desc_ring_len;
+ unsigned int space;
+ int ring_num;
+
+ for (ring_num = DIT_DST_DESC_RING_0; ring_num < DIT_DST_DESC_RING_MAX; ring_num++) {
+ if (!desc_info->dst_skb_buf[ring_num])
+ continue;
+
+ space = circ_get_space(desc_info->dst_desc_ring_len,
+ desc_info->dst_wp[ring_num], desc_info->dst_rp[ring_num]);
+ if (min > space)
+ min = space;
+ }
+
+ return min;
+}
+
+int dit_manage_rx_dst_data_buffers(bool fill)
+{
+ int ring_num;
+ int ret = 0;
+
+ /* ToDo: need to update dst wp and rp? */
+ for (ring_num = DIT_DST_DESC_RING_1; ring_num < DIT_DST_DESC_RING_MAX; ring_num++) {
+ if (fill) {
+ ret = dit_fill_rx_dst_data_buffer(ring_num,
+ dc->desc_info[DIT_DIR_RX].dst_desc_ring_len, true);
+ if (ret)
+ break;
+
+ mif_info("dst[%d] filled with wp[%d] rp[%d]\n", ring_num,
+ dc->desc_info[DIT_DIR_RX].dst_wp[ring_num],
+ dc->desc_info[DIT_DIR_RX].dst_rp[ring_num]);
+ dit_set_dst_desc_int_range(DIT_DIR_RX, ring_num);
+ } else
+ ret = dit_free_dst_data_buffer(DIT_DIR_RX, ring_num);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(dit_manage_rx_dst_data_buffers);
+
+int dit_read_rx_dst_poll(struct napi_struct *napi, int budget)
+{
+ struct dit_desc_info *desc_info = &dc->desc_info[DIT_DIR_RX];
+ struct dit_dst_desc *dst_desc;
+ struct sk_buff *skb;
+ unsigned int rcvd_total = 0;
+ unsigned int usage;
+ unsigned int dst_rp_pos;
+ unsigned int ring_num;
+ int i, ret;
+#if IS_ENABLED(CONFIG_CPIF_TP_MONITOR)
+ struct mem_link_device *mld = to_mem_link_device(dc->ld);
+#endif
+
+ for (ring_num = DIT_DST_DESC_RING_0; ring_num < DIT_DST_DESC_RING_MAX; ring_num++) {
+ /* read from rp to wp */
+ usage = circ_get_usage(desc_info->dst_desc_ring_len,
+ desc_info->dst_wp[ring_num], desc_info->dst_rp[ring_num]);
+ for (i = 0; i < usage; i++) {
+ if (rcvd_total >= budget)
+ break;
+
+ dst_rp_pos = desc_info->dst_rp[ring_num];
+
+ /* get dst desc and skb */
+ dst_desc = &desc_info->dst_desc_ring[ring_num][dst_rp_pos];
+
+ if (dc->use_dma_map) {
+ dma_addr_t daddr =
+ desc_info->dst_skb_buf_daddr[ring_num][dst_rp_pos];
+
+ if (daddr) {
+#if defined(DIT_DEBUG_LOW)
+ snapshot[DIT_DIR_RX][ring_num].dma_maps--;
+#endif
+ dma_unmap_single(dc->dev, daddr, desc_info->buf_size,
+ DMA_FROM_DEVICE);
+ desc_info->dst_skb_buf_daddr[ring_num][dst_rp_pos] = 0;
+ }
+ }
+
+ skb = desc_info->dst_skb_buf[ring_num][dst_rp_pos];
+
+ /* try to fill dst data buffers */
+ desc_info->dst_skb_buf[ring_num][dst_rp_pos] = NULL;
+ ret = dit_fill_rx_dst_data_buffer(ring_num, 1, false);
+ if (ret) {
+ desc_info->dst_skb_buf[ring_num][dst_rp_pos] = skb;
+ break;
+ }
+
+ /* set skb */
+ skb_put(skb, dst_desc->length);
+ skbpriv(skb)->lnk_hdr = 0;
+ skbpriv(skb)->sipc_ch = dst_desc->ch_id;
+ skbpriv(skb)->iod = link_get_iod_with_channel(dc->ld,
+ skbpriv(skb)->sipc_ch);
+ skbpriv(skb)->ld = dc->ld;
+ skbpriv(skb)->napi = napi;
+
+ /* clat */
+ if (cpif_check_bit(dst_desc->packet_info, DIT_PACKET_INFO_IPV6_BIT) &&
+ ((skb->data[0] & 0xFF) == 0x45)) {
+ skbpriv(skb)->rx_clat = 1;
+ snapshot[DIT_DIR_RX][ring_num].clat_packets++;
+ }
+
+ /* hw checksum */
+ dit_set_skb_checksum(dst_desc, ring_num, skb);
+
+ /* adjust udp zero checksum */
+ dit_set_skb_udp_csum_zero(dst_desc, ring_num, skb);
+
+ dst_desc->packet_info = 0;
+ dst_desc->control = 0;
+ if (dst_rp_pos == desc_info->dst_desc_ring_len - 1)
+ cpif_set_bit(dst_desc->control, DIT_DESC_C_RINGEND);
+ dst_desc->status = 0;
+
+ ret = dit_pass_to_net(ring_num, skb);
+
+ /* update dst rp after dit_pass_to_net */
+ desc_info->dst_rp[ring_num] = circ_new_ptr(desc_info->dst_desc_ring_len,
+ dst_rp_pos, 1);
+ rcvd_total++;
+#if defined(DIT_DEBUG_LOW)
+ snapshot[DIT_DIR_RX][ring_num].alloc_skbs--;
+#endif
+
+ if (ret < 0)
+ break;
+ }
+ }
+
+#if IS_ENABLED(CONFIG_CPIF_TP_MONITOR)
+ if (rcvd_total)
+ mld->tpmon->start();
+#endif
+
+ if (rcvd_total < budget) {
+ napi_complete_done(napi, rcvd_total);
+ /* kick can be reserved if dst buffer was not enough */
+ dit_kick(DIT_DIR_RX, true);
+ }
+
+ return rcvd_total;
+}
+EXPORT_SYMBOL(dit_read_rx_dst_poll);
+
+static void dit_update_dst_desc_pos(enum dit_direction dir, enum dit_desc_ring ring_num)
+{
+ struct dit_desc_info *desc_info = &dc->desc_info[dir];
+ unsigned int last_dst_wp = desc_info->dst_wp[ring_num];
+ struct dit_dst_desc *dst_desc;
+ struct mem_link_device *mld = to_mem_link_device(dc->ld);
+ u64 packets = 0;
+#if defined(DIT_DEBUG_LOW)
+ u32 usage;
+#endif
+
+ do {
+ dst_desc = &desc_info->dst_desc_ring[ring_num][desc_info->dst_wp[ring_num]];
+ if (!cpif_check_bit(dst_desc->status, DIT_DESC_S_DONE))
+ break;
+
+ /* update dst */
+ cpif_clear_bit(dst_desc->status, DIT_DESC_S_DONE);
+ /* tx does not use status field */
+ if (dir == DIT_DIR_TX)
+ dst_desc->status = 0;
+ if (desc_info->dst_skb_buf[ring_num])
+ dst_desc->dst_addr = 0;
+ desc_info->dst_wp[ring_num] = circ_new_ptr(desc_info->dst_desc_ring_len,
+ desc_info->dst_wp[ring_num], 1);
+
+ /* update src
+ * after a DST interrupt, reset all of src buf
+ */
+ if (desc_info->src_skb_buf[desc_info->src_rp]) {
+ dev_consume_skb_any(desc_info->src_skb_buf[desc_info->src_rp]);
+ desc_info->src_skb_buf[desc_info->src_rp] = NULL;
+ }
+ desc_info->src_rp = circ_new_ptr(desc_info->src_desc_ring_len,
+ desc_info->src_rp, 1);
+
+ packets++;
+
+#if defined(DIT_DEBUG)
+ if (desc_info->dst_wp[ring_num] == desc_info->dst_rp[ring_num]) {
+ mif_err("dst[%d] wp[%d] would overwrite rp (dir:%d)\n", ring_num,
+ desc_info->dst_wp[ring_num], dir);
+ }
+#endif
+
+#if defined(DIT_DEBUG_LOW)
+ usage = circ_get_usage(desc_info->dst_desc_ring_len,
+ desc_info->dst_wp[ring_num], desc_info->dst_rp[ring_num]);
+ if (usage > snapshot[dir][ring_num].max_usage)
+ snapshot[dir][ring_num].max_usage = usage;
+#endif
+ } while (1);
+
+ if (packets > 0) {
+ u32 qnum = desc_info->pktproc_queue_num;
+
+ dit_set_dst_desc_int_range(dir, ring_num);
+ dit_set_snapshot(dir, ring_num, last_dst_wp,
+ circ_prev_ptr(desc_info->dst_desc_ring_len,
+ desc_info->dst_wp[ring_num], 1), packets);
+
+ /* update pktproc fore pointer */
+ switch (dir) {
+ case DIT_DIR_TX:
+ desc_info->dst_rp[ring_num] = circ_new_ptr(desc_info->dst_desc_ring_len,
+ desc_info->dst_rp[ring_num], packets);
+#if IS_ENABLED(CONFIG_CP_PKTPROC_UL)
+ mld->pktproc_ul.q[qnum]->update_fore_ptr(mld->pktproc_ul.q[qnum], packets);
+#endif
+ break;
+ case DIT_DIR_RX:
+ mld->pktproc.q[qnum]->update_fore_ptr(mld->pktproc.q[qnum], packets);
+ break;
+ default:
+ mif_err_limited("dir error:%d\n", dir);
+ break;
+ }
+ }
+}
+
+irqreturn_t dit_irq_handler(int irq, void *arg)
+{
+ int pending_bit = *((int *)(arg));
+ enum dit_desc_ring ring_num;
+ struct mem_link_device *mld;
+ struct modem_ctl *mc;
+ enum dit_direction dir = DIT_DIR_MAX;
+ u32 pending_mask = DIT_ALL_INT_PENDING_MASK;
+ unsigned long flags;
+
+ switch (pending_bit) {
+ case RX_DST0_INT_PENDING_BIT:
+ case TX_DST0_INT_PENDING_BIT:
+ ring_num = DIT_DST_DESC_RING_0;
+ break;
+ case RX_DST1_INT_PENDING_BIT:
+ ring_num = DIT_DST_DESC_RING_1;
+ break;
+ case RX_DST2_INT_PENDING_BIT:
+ ring_num = DIT_DST_DESC_RING_2;
+ break;
+ default:
+ break;
+ }
+
+ switch (pending_bit) {
+ case RX_DST0_INT_PENDING_BIT:
+ case RX_DST1_INT_PENDING_BIT:
+ case RX_DST2_INT_PENDING_BIT:
+ dir = DIT_DIR_RX;
+ pending_mask = DIT_RX_INT_PENDING_MASK;
+
+ dit_update_dst_desc_pos(DIT_DIR_RX, ring_num);
+ if (napi_schedule_prep(&dc->napi))
+ __napi_schedule(&dc->napi);
+ break;
+ case TX_DST0_INT_PENDING_BIT:
+ dir = DIT_DIR_TX;
+ pending_mask = DIT_TX_INT_PENDING_MASK;
+ mld = ld_to_mem_link_device(dc->ld);
+ mc = dc->ld->mc;
+
+ dit_update_dst_desc_pos(DIT_DIR_TX, ring_num);
+ spin_lock_irqsave(&mc->lock, flags);
+ if (ipc_active(mld))
+ send_ipc_irq(mld, mask2int(MASK_SEND_DATA));
+ spin_unlock_irqrestore(&mc->lock, flags);
+ break;
+ case ERR_INT_PENDING_BIT:
+ /* nothing to do when ERR interrupt */
+ mif_err_limited("ERR interrupt!! int_pending: 0x%X\n",
+ READ_REG_VALUE(dc, DIT_REG_INT_PENDING));
+ break;
+ default:
+ break;
+ }
+
+ spin_lock(&dc->src_lock);
+ /* do not clear ERR for debugging */
+ if (pending_bit != ERR_INT_PENDING_BIT)
+ WRITE_REG_VALUE(dc, BIT(pending_bit), DIT_REG_INT_PENDING);
+
+ if ((READ_REG_VALUE(dc, DIT_REG_INT_PENDING) & pending_mask) == 0) {
+ if (dir < DIT_DIR_MAX)
+ dc->kicked[dir] = false;
+ if (!dit_is_kicked_any()) {
+#if IS_ENABLED(CONFIG_CPU_IDLE)
+ exynos_update_ip_idle_status(dc->idle_ip_index, DIT_IDLE_IP_IDLE);
+#endif
+ dit_clean_reg_value_with_ext_lock();
+ }
+ }
+ spin_unlock(&dc->src_lock);
+
+ /* try init and kick again */
+ dit_init(NULL, DIT_INIT_RETRY, DIT_STORE_NONE);
+ if (dir < DIT_DIR_MAX)
+ dit_kick(dir, true);
+
+ return IRQ_HANDLED;
+}
+
+bool dit_is_busy(enum dit_direction dir)
+{
+ u32 status_bits = 0;
+ u32 status_mask = 0;
+ u32 pending_bits = 0;
+ u32 pending_mask = 0;
+
+ switch (dir) {
+ case DIT_DIR_TX:
+ status_mask = TX_STATUS_MASK;
+ pending_mask = DIT_TX_INT_PENDING_MASK;
+ break;
+ case DIT_DIR_RX:
+ status_mask = RX_STATUS_MASK;
+ pending_mask = DIT_RX_INT_PENDING_MASK;
+ break;
+ default:
+ break;
+ }
+
+ status_bits = READ_REG_VALUE(dc, DIT_REG_STATUS);
+ if (status_bits & status_mask) {
+ mif_err("status = 0x%02X\n", status_bits);
+ return true;
+ }
+
+ pending_bits = READ_REG_VALUE(dc, DIT_REG_INT_PENDING);
+ if (pending_bits & pending_mask) {
+ mif_err("pending = 0x%02X\n", pending_bits);
+ return true;
+ }
+
+ return false;
+}
+
+int dit_kick(enum dit_direction dir, bool retry)
+{
+ int ret = 0;
+ unsigned long flags;
+ struct dit_desc_info *desc_info;
+ u32 kick_mask = 0;
+ unsigned int src_wp;
+ unsigned int src_rp;
+
+ if (unlikely(!dc))
+ return -EPERM;
+
+ spin_lock_irqsave(&dc->src_lock, flags);
+ if (retry && !dc->kick_reserved[dir]) {
+ ret = -EAGAIN;
+ goto exit;
+ }
+
+ if (dc->kicked[dir] || !dc->init_done) {
+ dc->kick_reserved[dir] = true;
+ ret = -EAGAIN;
+ goto exit;
+ }
+
+ if (dit_is_busy(dir)) {
+ dc->kick_reserved[dir] = true;
+ mif_err_limited("busy\n");
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ desc_info = &dc->desc_info[dir];
+
+ /* save src_wp and src_rp to prevent dst overflow */
+ src_wp = desc_info->src_wp;
+ src_rp = dit_get_snapshot_next_head(dir, DIT_SRC_DESC_RING,
+ desc_info->src_desc_ring_len);
+ if (circ_empty(src_wp, src_rp)) {
+ ret = -ENODATA;
+ goto exit;
+ }
+
+ /* check dst buffer space */
+ if (circ_get_usage(desc_info->src_desc_ring_len, src_wp, src_rp) >
+ dit_get_dst_data_buffer_free_space(dir)) {
+ dc->kick_reserved[dir] = true;
+ mif_err_limited("not enough dst data buffer (dir:%d)\n", dir);
+ ret = -ENOSPC;
+ goto exit;
+ }
+
+ switch (dir) {
+ case DIT_DIR_TX:
+ cpif_set_bit(kick_mask, TX_COMMAND_BIT);
+ break;
+ case DIT_DIR_RX:
+ cpif_set_bit(kick_mask, RX_COMMAND_BIT);
+ break;
+ default:
+ break;
+ }
+
+ dc->kicked[dir] = true;
+ dc->kick_reserved[dir] = false;
+
+exit:
+ spin_unlock_irqrestore(&dc->src_lock, flags);
+
+ if (ret)
+ return ret;
+
+ dit_set_src_desc_kick_range(dir, src_wp, src_rp);
+#if IS_ENABLED(CONFIG_CPU_IDLE)
+ exynos_update_ip_idle_status(dc->idle_ip_index, DIT_IDLE_IP_ACTIVE);
+#endif
+ WRITE_REG_VALUE(dc, kick_mask, DIT_REG_SW_COMMAND);
+
+ return 0;
+}
+EXPORT_SYMBOL(dit_kick);
+
+static bool dit_check_nat_enabled(void)
+{
+ unsigned int ring_num;
+
+ for (ring_num = DIT_DST_DESC_RING_1; ring_num < DIT_DST_DESC_RING_MAX; ring_num++) {
+ if (dit_check_dst_ready(DIT_DIR_RX, ring_num) == 0)
+ return true;
+ }
+
+ return false;
+}
+
+static void dit_check_clat_enabled_internal(struct io_device *iod, void *args)
+{
+ bool *enabled = (bool *)args;
+
+ if (*enabled || !dc->ld->is_ps_ch(iod->ch))
+ return;
+
+ if (iod->clat_ndev)
+ *enabled = true;
+}
+
+static bool dit_check_clat_enabled(void)
+{
+ bool enabled = false;
+
+ if (unlikely(!dc->ld))
+ return false;
+
+ iodevs_for_each(dc->ld->msd, dit_check_clat_enabled_internal, &enabled);
+
+ return enabled;
+}
+
+static int dit_reg_backup_restore_internal(bool backup, const u16 *offset,
+ const u16 *size, void **buf,
+ const unsigned int arr_len)
+{
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < arr_len; i++) {
+ if (!buf[i]) {
+ buf[i] = kvzalloc(size[i], GFP_KERNEL);
+ if (!buf[i]) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ }
+
+ if (backup)
+ BACKUP_REG_VALUE(dc, buf[i], offset[i], size[i]);
+ else
+ RESTORE_REG_VALUE(dc, buf[i], offset[i], size[i]);
+ }
+
+exit:
+ /* reset buffer if failed to backup */
+ if (unlikely(ret && backup)) {
+ for (i = 0; i < arr_len; i++) {
+ if (buf[i])
+ memset(buf[i], 0, size[i]);
+ }
+ }
+
+ return ret;
+}
+
+static int dit_reg_backup_restore(bool backup)
+{
+ /* NAT */
+ static const u16 nat_offset[] = {
+ DIT_REG_NAT_LOCAL_ADDR,
+ DIT_REG_NAT_ETHERNET_DST_MAC_ADDR_0,
+ DIT_REG_NAT_RX_PORT_TABLE_SLOT,
+ };
+ static const u16 nat_size[] = {
+ (DIT_REG_NAT_LOCAL_ADDR_MAX * DIT_REG_NAT_LOCAL_INTERVAL),
+ (DIT_REG_NAT_LOCAL_ADDR_MAX * DIT_REG_ETHERNET_MAC_INTERVAL),
+ (DIT_REG_NAT_LOCAL_PORT_MAX * DIT_REG_NAT_LOCAL_INTERVAL),
+ };
+ static const unsigned int nat_len = ARRAY_SIZE(nat_offset);
+ static void *nat_buf[ARRAY_SIZE(nat_offset)];
+
+ /* CLAT */
+ static const u16 clat_offset[] = {
+ DIT_REG_CLAT_TX_FILTER,
+ DIT_REG_CLAT_TX_PLAT_PREFIX_0,
+ DIT_REG_CLAT_TX_CLAT_SRC_0,
+ };
+ static const u16 clat_size[] = {
+ (DIT_REG_CLAT_ADDR_MAX * DIT_REG_CLAT_TX_FILTER_INTERVAL),
+ (DIT_REG_CLAT_ADDR_MAX * DIT_REG_CLAT_TX_PLAT_PREFIX_INTERVAL),
+ (DIT_REG_CLAT_ADDR_MAX * DIT_REG_CLAT_TX_CLAT_SRC_INTERVAL),
+ };
+ static const unsigned int clat_len = ARRAY_SIZE(clat_offset);
+ static void *clat_buf[ARRAY_SIZE(clat_offset)];
+
+ int ret = 0;
+
+ if (unlikely(!dc))
+ return -EPERM;
+
+ /* NAT */
+ if (dit_check_nat_enabled()) {
+ ret = dit_reg_backup_restore_internal(backup, nat_offset,
+ nat_size, nat_buf, nat_len);
+ if (ret)
+ goto error;
+ }
+
+ /* CLAT */
+ if (dit_check_clat_enabled()) {
+ ret = dit_reg_backup_restore_internal(backup, clat_offset,
+ clat_size, clat_buf, clat_len);
+ if (ret)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ mif_err("backup/restore failed is_backup:%d, ret:%d\n", backup, ret);
+
+ return ret;
+}
+
+#define POOL_PAGE_SIZE 32768
+static int dit_init_page_pool(enum dit_direction dir, enum dit_desc_ring ring_num)
+{
+ struct dit_desc_info *desc_info;
+ u64 total_page_count;
+ u64 num_pkt_per_page;
+ u64 max_pkt_size;
+
+ if (!dc->use_page_recycling_rx)
+ return 0;
+
+ /* Support Rx DST0 only */
+ if (dir != DIT_DIR_RX || ring_num != DIT_DST_DESC_RING_0)
+ return 0;
+
+ desc_info = &dc->desc_info[dir];
+ if (desc_info->dst_page_pool[ring_num])
+ return 0;
+
+ max_pkt_size = desc_info->buf_size + dc->page_recycling_skb_padding +
+ sizeof(struct skb_shared_info);
+ num_pkt_per_page = POOL_PAGE_SIZE / max_pkt_size;
+ total_page_count = desc_info->dst_desc_ring_len / num_pkt_per_page;
+
+ desc_info->dst_page_pool[ring_num] = cpif_page_pool_create(total_page_count,
+ POOL_PAGE_SIZE);
+ if (unlikely(!desc_info->dst_page_pool[ring_num]))
+ return -ENOMEM;
+
+ cpif_page_init_tmp_page(desc_info->dst_page_pool[ring_num]);
+
+ return 0;
+}
+
+static int dit_init_hw(void)
+{
+ unsigned int dir;
+ unsigned int count = 0;
+
+ const u16 port_offset_start[DIT_DIR_MAX] = {
+ DIT_REG_NAT_TX_PORT_INIT_START,
+ DIT_REG_NAT_RX_PORT_INIT_START
+ };
+
+ const u16 port_offset_done[DIT_DIR_MAX] = {
+ DIT_REG_NAT_TX_PORT_INIT_DONE,
+ DIT_REG_NAT_RX_PORT_INIT_DONE
+ };
+
+ /* set Tx/Rx port table to all zero
+ * it requires 20us at 100MHz until DONE.
+ */
+ for (dir = 0; dir < DIT_DIR_MAX; dir++) {
+ WRITE_REG_VALUE(dc, 0x0, port_offset_done[dir]);
+ WRITE_REG_VALUE(dc, 0x1, port_offset_start[dir]);
+ while (++count < 100) {
+ udelay(20);
+ if (READ_REG_VALUE(dc, port_offset_done[dir])) {
+ break;
+ }
+ }
+
+ if (count >= 100) {
+ mif_err("PORT_INIT_DONE failed dir:%d\n", dir);
+ return -EIO;
+ }
+ }
+
+ WRITE_REG_VALUE(dc, 0x4020, DIT_REG_DMA_INIT_DATA);
+ WRITE_REG_VALUE(dc, BIT(DMA_INIT_COMMAND_BIT), DIT_REG_SW_COMMAND);
+
+ WRITE_REG_VALUE(dc, 0x0, DIT_REG_DMA_CHKSUM_OFF);
+ WRITE_REG_VALUE(dc, 0xF, DIT_REG_NAT_ZERO_CHK_OFF);
+ WRITE_REG_VALUE(dc, BIT(RX_ETHERNET_EN_BIT), DIT_REG_NAT_ETHERNET_EN);
+
+ WRITE_REG_VALUE(dc, DIT_RX_BURST_16BEAT, DIT_REG_TX_DESC_CTRL_SRC);
+ WRITE_REG_VALUE(dc, DIT_RX_BURST_16BEAT, DIT_REG_TX_DESC_CTRL_DST);
+ WRITE_REG_VALUE(dc, DIT_RX_BURST_16BEAT, DIT_REG_TX_HEAD_CTRL);
+ WRITE_REG_VALUE(dc, DIT_RX_BURST_16BEAT, DIT_REG_TX_MOD_HD_CTRL);
+ WRITE_REG_VALUE(dc, DIT_RX_BURST_16BEAT, DIT_REG_TX_PKT_CTRL);
+ WRITE_REG_VALUE(dc, DIT_RX_BURST_16BEAT, DIT_REG_TX_CHKSUM_CTRL);
+
+ WRITE_REG_VALUE(dc, DIT_RX_BURST_16BEAT, DIT_REG_RX_DESC_CTRL_SRC);
+ WRITE_REG_VALUE(dc, DIT_RX_BURST_16BEAT, DIT_REG_RX_DESC_CTRL_DST);
+ WRITE_REG_VALUE(dc, DIT_RX_BURST_16BEAT, DIT_REG_RX_HEAD_CTRL);
+ WRITE_REG_VALUE(dc, DIT_RX_BURST_16BEAT, DIT_REG_RX_MOD_HD_CTRL);
+ WRITE_REG_VALUE(dc, DIT_RX_BURST_16BEAT, DIT_REG_RX_PKT_CTRL);
+ WRITE_REG_VALUE(dc, DIT_RX_BURST_16BEAT, DIT_REG_RX_CHKSUM_CTRL);
+
+ WRITE_REG_VALUE(dc, DIT_INT_ENABLE_MASK, DIT_REG_INT_ENABLE);
+ WRITE_REG_VALUE(dc, DIT_INT_MASK_MASK, DIT_REG_INT_MASK);
+ WRITE_REG_VALUE(dc, DIT_ALL_INT_PENDING_MASK, DIT_REG_INT_PENDING);
+
+ WRITE_REG_VALUE(dc, 0x0, DIT_REG_CLK_GT_OFF);
+
+ DIT_INDIRECT_CALL(dc, do_init_hw);
+ if (!dc->reg_version)
+ DIT_INDIRECT_CALL(dc, get_reg_version, &dc->reg_version);
+
+ WRITE_SHR_VALUE(dc, dc->sharability_value);
+
+ return 0;
+}
+
+static int dit_init_desc(enum dit_direction dir)
+{
+ struct dit_desc_info *desc_info = &dc->desc_info[dir];
+ void *buf = NULL;
+ unsigned int buf_size;
+ phys_addr_t p_desc;
+ int ret = 0, ring_num;
+ u32 offset_lo = 0, offset_hi = 0;
+
+ if (!desc_info->src_desc_ring) {
+ buf_size = sizeof(struct dit_src_desc) *
+ (desc_info->src_desc_ring_len + DIT_SRC_DESC_RING_LEN_PADDING);
+
+ if (dc->use_dma_map) {
+ buf = dma_alloc_coherent(dc->dev, buf_size, &desc_info->src_desc_ring_daddr,
+ GFP_KERNEL);
+ } else {
+ buf = devm_kzalloc(dc->dev, buf_size, GFP_KERNEL);
+ }
+ if (!buf) {
+ mif_err("dit dir[%d] src desc alloc failed\n", dir);
+ return -ENOMEM;
+ }
+
+ desc_info->src_desc_ring = buf;
+ if (!dc->use_dma_map)
+ desc_info->src_desc_ring_daddr = virt_to_phys(buf);
+ }
+
+ p_desc = desc_info->src_desc_ring_daddr;
+ if (dir == DIT_DIR_TX) {
+ offset_lo = DIT_REG_TX_RING_START_ADDR_0_SRC;
+ offset_hi = DIT_REG_TX_RING_START_ADDR_1_SRC;
+ } else {
+ offset_lo = DIT_REG_RX_RING_START_ADDR_0_SRC;
+ offset_hi = DIT_REG_RX_RING_START_ADDR_1_SRC;
+ }
+ WRITE_REG_PADDR_LO(dc, p_desc, offset_lo);
+ WRITE_REG_PADDR_HI(dc, p_desc, offset_hi);
+
+ if (!desc_info->src_skb_buf) {
+ buf_size = sizeof(struct sk_buff *) * desc_info->src_desc_ring_len;
+ buf = kvzalloc(buf_size, GFP_KERNEL);
+ if (!buf) {
+ mif_err("dit dir[%d] src skb container alloc failed\n", dir);
+ return -ENOMEM;
+ }
+ desc_info->src_skb_buf = buf;
+ }
+
+ for (ring_num = DIT_DST_DESC_RING_0; ring_num < DIT_DST_DESC_RING_MAX; ring_num++) {
+ offset_lo = 0;
+ offset_hi = 0;
+
+ if (!desc_info->dst_desc_ring[ring_num]) {
+ buf_size = sizeof(struct dit_dst_desc) *
+ (desc_info->dst_desc_ring_len + DIT_DST_DESC_RING_LEN_PADDING);
+
+ if (dc->use_dma_map) {
+ buf = dma_alloc_coherent(dc->dev, buf_size,
+ &desc_info->dst_desc_ring_daddr[ring_num],
+ GFP_KERNEL);
+ } else {
+ buf = devm_kzalloc(dc->dev, buf_size, GFP_KERNEL);
+ }
+ if (!buf) {
+ mif_err("dit dir[%d] dst desc[%d] alloc failed\n", dir, ring_num);
+ return -ENOMEM;
+ }
+
+ desc_info->dst_desc_ring[ring_num] = buf;
+ if (!dc->use_dma_map)
+ desc_info->dst_desc_ring_daddr[ring_num] = virt_to_phys(buf);
+ }
+
+ ret = dit_init_page_pool(dir, ring_num);
+ if (ret) {
+ mif_err("dit dir[%d] dst desc[%d] page pool init failed\n", dir, ring_num);
+ return -ENOMEM;
+ }
+
+ p_desc = desc_info->dst_desc_ring_daddr[ring_num];
+ switch (ring_num) {
+ case DIT_DST_DESC_RING_0:
+ if (dir == DIT_DIR_TX) {
+ offset_lo = DIT_REG_TX_RING_START_ADDR_0_DST0;
+ offset_hi = DIT_REG_TX_RING_START_ADDR_1_DST0;
+ ret = dit_fill_tx_dst_data_buffer(ring_num,
+ desc_info->dst_desc_ring_len);
+ } else {
+ offset_lo = DIT_REG_RX_RING_START_ADDR_0_DST0;
+ offset_hi = DIT_REG_RX_RING_START_ADDR_1_DST0;
+ ret = dit_fill_rx_dst_data_buffer(ring_num,
+ desc_info->dst_desc_ring_len, true);
+ }
+
+ if (ret) {
+ mif_err("dit dir[%d] dst desc[%d] buffer fill failed\n",
+ dir, ring_num);
+ return -ENOMEM;
+ }
+ break;
+ case DIT_DST_DESC_RING_1:
+ if (dir == DIT_DIR_TX) {
+ offset_lo = DIT_REG_TX_RING_START_ADDR_0_DST1;
+ offset_hi = DIT_REG_TX_RING_START_ADDR_1_DST1;
+ } else {
+ offset_lo = DIT_REG_RX_RING_START_ADDR_0_DST1;
+ offset_hi = DIT_REG_RX_RING_START_ADDR_1_DST1;
+ }
+ break;
+ case DIT_DST_DESC_RING_2:
+ if (dir == DIT_DIR_TX) {
+ offset_lo = DIT_REG_TX_RING_START_ADDR_0_DST2;
+ offset_hi = DIT_REG_TX_RING_START_ADDR_1_DST2;
+ } else {
+ offset_lo = DIT_REG_RX_RING_START_ADDR_0_DST2;
+ offset_hi = DIT_REG_RX_RING_START_ADDR_1_DST2;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (offset_lo && offset_hi) {
+ WRITE_REG_PADDR_LO(dc, p_desc, offset_lo);
+ WRITE_REG_PADDR_HI(dc, p_desc, offset_hi);
+ }
+
+ dit_set_dst_desc_int_range(dir, ring_num);
+ }
+
+ DIT_INDIRECT_CALL(dc, do_init_desc, dir);
+
+ mif_info("dir:%d src_len:%d dst_len:%d\n",
+ dir, desc_info->src_desc_ring_len, desc_info->dst_desc_ring_len);
+
+ return 0;
+}
+
+int dit_init(struct link_device *ld, enum dit_init_type type, enum dit_store_type store)
+{
+ unsigned long flags;
+ unsigned int dir;
+ int ret = 0;
+
+ if (unlikely(!dc)) {
+ mif_err("dit not created\n");
+ return -EPERM;
+ }
+
+ /* ld can be null if it is set before */
+ if (!ld && !dc->ld) {
+ mif_err("link device set failed\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&dc->src_lock, flags);
+ if (type == DIT_INIT_RETRY && !dc->init_reserved) {
+ spin_unlock_irqrestore(&dc->src_lock, flags);
+ return -EAGAIN;
+ }
+
+ if (dit_is_kicked_any()) {
+ if (type != DIT_INIT_DEINIT)
+ dc->init_reserved = true;
+
+ spin_unlock_irqrestore(&dc->src_lock, flags);
+ return -EEXIST;
+ }
+
+ if (atomic_inc_return(&dc->init_running) > 1) {
+ spin_unlock_irqrestore(&dc->src_lock, flags);
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ dc->init_done = false;
+ spin_unlock_irqrestore(&dc->src_lock, flags);
+
+ if (store == DIT_STORE_BACKUP) {
+ ret = dit_reg_backup_restore(true);
+ if (ret)
+ goto exit;
+ }
+
+ if (type == DIT_INIT_DEINIT)
+ goto exit;
+
+ for (dir = 0; dir < DIT_DIR_MAX; dir++) {
+ ret = dit_init_desc(dir);
+ if (ret) {
+ mif_err("dit desc init failed\n");
+ goto exit;
+ }
+ }
+
+ ret = dit_init_hw();
+ if (ret) {
+ mif_err("dit hw init failed\n");
+ goto exit;
+ }
+
+ if (store == DIT_STORE_RESTORE) {
+ ret = dit_reg_backup_restore(false);
+ if (ret)
+ goto exit;
+ }
+
+ ret = dit_net_init(dc);
+ if (ret) {
+ mif_err("dit net init failed\n");
+ goto exit;
+ }
+
+ if (ld)
+ dc->ld = ld;
+
+ spin_lock_irqsave(&dc->src_lock, flags);
+ dc->init_done = true;
+ dc->init_reserved = false;
+ dit_clean_reg_value_with_ext_lock();
+ spin_unlock_irqrestore(&dc->src_lock, flags);
+
+ mif_info("dit init done. hw_ver:0x%08X\n", dc->hw_version);
+
+exit:
+ atomic_dec(&dc->init_running);
+ if (ret || type == DIT_INIT_DEINIT)
+ return ret;
+
+ dit_kick(DIT_DIR_TX, true);
+ dit_kick(DIT_DIR_RX, true);
+
+ return 0;
+}
+EXPORT_SYMBOL(dit_init);
+
+static int dit_register_irq(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret = 0;
+ int i;
+
+ if (!dc->irq_len) {
+ mif_err("dit irq not defined\n");
+ return -ENODEV;
+ }
+
+ dc->irq_buf = devm_kzalloc(dev, sizeof(int) * dc->irq_len, GFP_KERNEL);
+ if (!dc->irq_buf) {
+ mif_err("dit irq buf alloc failed\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ for (i = 0; i < dc->irq_len; i++) {
+ int irq_num;
+
+ irq_num = platform_get_irq_byname(pdev, dc->irq_name[i]);
+ ret = devm_request_irq(dev, irq_num, dit_irq_handler, 0, dc->irq_name[i],
+ &dc->irq_pending_bit[i]);
+ if (ret) {
+ mif_err("failed to request irq: %d, ret: %d\n", i, ret);
+ ret = -EIO;
+ goto error;
+ }
+
+ if (dc->irq_pending_bit[i] == TX_DST0_INT_PENDING_BIT)
+ dc->irq_num_tx = irq_num;
+ dc->irq_buf[i] = irq_num;
+ }
+
+ return 0;
+
+error:
+ if (dc->irq_buf) {
+ devm_kfree(dev, dc->irq_buf);
+ dc = NULL;
+ }
+
+ return ret;
+}
+
+static ssize_t status_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dit_desc_info *desc_info;
+ ssize_t count = 0;
+ unsigned int wp, rp, desc_len;
+ unsigned int dir, ring_num;
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "hw_ver:0x%08X reg_ver:0x%X\n",
+ dc->hw_version, dc->reg_version);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "use tx:%d rx:%d(stop:%d) clat:%d page_recycle:%d\n",
+ dc->use_dir[DIT_DIR_TX], dc->use_dir[DIT_DIR_RX], dc->stop_enqueue[DIT_DIR_RX],
+ dc->use_clat, dc->use_page_recycling_rx);
+
+ for (dir = 0; dir < DIT_DIR_MAX; dir++) {
+ desc_info = &dc->desc_info[dir];
+ for (ring_num = 0; ring_num < DIT_DESC_RING_MAX; ring_num++) {
+ if (ring_num == DIT_SRC_DESC_RING) {
+ wp = desc_info->src_wp;
+ rp = desc_info->src_rp;
+ desc_len = desc_info->src_desc_ring_len;
+ } else {
+ wp = desc_info->dst_wp[ring_num];
+ rp = desc_info->dst_rp[ring_num];
+ desc_len = desc_info->dst_desc_ring_len;
+ }
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "%s max_usage(d)/alloc(d)/map(d)/total: %u/%u/%u/%u\n",
+ snapshot[dir][ring_num].name,
+ snapshot[dir][ring_num].max_usage,
+ snapshot[dir][ring_num].alloc_skbs,
+ snapshot[dir][ring_num].dma_maps,
+ desc_len);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ " wp: %u, rp: %u\n", wp, rp);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ " kicked head: %d, tail: %d, packets: %llu\n",
+ snapshot[dir][ring_num].head, snapshot[dir][ring_num].tail,
+ snapshot[dir][ring_num].packets);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ " total packets: %llu, clat: %llu\n",
+ snapshot[dir][ring_num].total_packets,
+ snapshot[dir][ring_num].clat_packets);
+ }
+ }
+
+ return count;
+}
+
+static ssize_t register_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ ssize_t count = 0;
+ int i = 0;
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "INT_PENDING: 0x%X\n",
+ READ_REG_VALUE(dc, DIT_REG_INT_PENDING));
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "STATUS: 0x%X\n",
+ READ_REG_VALUE(dc, DIT_REG_STATUS));
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "NAT Local Address\n");
+ for (i = 0; i < DIT_REG_NAT_LOCAL_ADDR_MAX; i++) {
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ " [%02d] src:0x%08X%04X, dst:0x%08X/0x%08X%04X\n",
+ i,
+ ntohl(READ_REG_VALUE(dc, DIT_REG_NAT_ETHERNET_SRC_MAC_ADDR_0 +
+ (i * DIT_REG_ETHERNET_MAC_INTERVAL))),
+ ntohs(READ_REG_VALUE(dc, DIT_REG_NAT_ETHERNET_SRC_MAC_ADDR_1 +
+ (i * DIT_REG_ETHERNET_MAC_INTERVAL))),
+ ntohl(READ_REG_VALUE(dc, DIT_REG_NAT_LOCAL_ADDR +
+ (i * DIT_REG_NAT_LOCAL_INTERVAL))),
+ ntohl(READ_REG_VALUE(dc, DIT_REG_NAT_ETHERNET_DST_MAC_ADDR_0 +
+ (i * DIT_REG_ETHERNET_MAC_INTERVAL))),
+ ntohs(READ_REG_VALUE(dc, DIT_REG_NAT_ETHERNET_DST_MAC_ADDR_1 +
+ (i * DIT_REG_ETHERNET_MAC_INTERVAL))));
+ }
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "CLAT Address\n");
+ for (i = 0; i < DIT_REG_CLAT_ADDR_MAX; i++) {
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ " [%02d] v4:0x%08X, v6:0x%08X%08X%08X%08X, prx:0x%08X%08X%08X\n",
+ i,
+ ntohl(READ_REG_VALUE(dc, DIT_REG_CLAT_TX_FILTER +
+ (i * DIT_REG_CLAT_TX_FILTER_INTERVAL))),
+ ntohl(READ_REG_VALUE(dc, DIT_REG_CLAT_TX_CLAT_SRC_0 +
+ (i * DIT_REG_CLAT_TX_CLAT_SRC_INTERVAL))),
+ ntohl(READ_REG_VALUE(dc, DIT_REG_CLAT_TX_CLAT_SRC_1 +
+ (i * DIT_REG_CLAT_TX_CLAT_SRC_INTERVAL))),
+ ntohl(READ_REG_VALUE(dc, DIT_REG_CLAT_TX_CLAT_SRC_2 +
+ (i * DIT_REG_CLAT_TX_CLAT_SRC_INTERVAL))),
+ ntohl(READ_REG_VALUE(dc, DIT_REG_CLAT_TX_CLAT_SRC_3 +
+ (i * DIT_REG_CLAT_TX_CLAT_SRC_INTERVAL))),
+ ntohl(READ_REG_VALUE(dc, DIT_REG_CLAT_TX_PLAT_PREFIX_0 +
+ (i * DIT_REG_CLAT_TX_PLAT_PREFIX_INTERVAL))),
+ ntohl(READ_REG_VALUE(dc, DIT_REG_CLAT_TX_PLAT_PREFIX_1 +
+ (i * DIT_REG_CLAT_TX_PLAT_PREFIX_INTERVAL))),
+ ntohl(READ_REG_VALUE(dc, DIT_REG_CLAT_TX_PLAT_PREFIX_2 +
+ (i * DIT_REG_CLAT_TX_PLAT_PREFIX_INTERVAL))));
+ }
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "check logs for port table\n");
+ dit_print_dump(DIT_DIR_TX, DIT_DUMP_ALL);
+ dit_print_dump(DIT_DIR_RX, DIT_DUMP_ALL);
+
+ return count;
+}
+
+#if defined(DIT_DEBUG)
+static ssize_t debug_set_rx_port_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nat_local_port local_port;
+ unsigned int index;
+ int ret;
+
+ ret = sscanf(buf, "%u %x", &index, &local_port.hw_val);
+ if (ret < 1)
+ return -EINVAL;
+
+ if (index >= DIT_REG_NAT_LOCAL_PORT_MAX)
+ return -EINVAL;
+
+ dit_enqueue_reg_value(local_port.hw_val,
+ DIT_REG_NAT_RX_PORT_TABLE_SLOT + (index * DIT_REG_NAT_LOCAL_INTERVAL));
+
+ return count;
+}
+
+static ssize_t debug_set_local_addr_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u8 eth_src_str[(ETH_ALEN * 2) + 1];
+ u8 eth_dst_str[(ETH_ALEN * 2) + 1];
+ u64 eth_src_addr;
+ u64 eth_dst_addr;
+ u32 ip_addr;
+ unsigned int index;
+ unsigned long flags;
+ int ret;
+
+ /* for example, "0 D6CFEB352CF4 C0A82A5D 2AAD159CDE96" is for packets
+ * from D6CFEB352CF4(rndis0) to 192.168.42.93/2AAD159CDE96(neigh)
+ */
+ ret = sscanf(buf, "%u %12s %x %12s", &index, eth_src_str, &ip_addr, eth_dst_str);
+ if (ret < 1)
+ return -EINVAL;
+
+ ret = kstrtou64(eth_src_str, 16, ð_src_addr);
+ if (ret)
+ return ret;
+ ret = kstrtou64(eth_dst_str, 16, ð_dst_addr);
+ if (ret)
+ return ret;
+
+ if (index >= DIT_REG_NAT_LOCAL_ADDR_MAX)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dc->src_lock, flags);
+ dit_enqueue_reg_value_with_ext_lock(htonl(eth_src_addr >> 16),
+ DIT_REG_NAT_ETHERNET_SRC_MAC_ADDR_0 + (index * DIT_REG_ETHERNET_MAC_INTERVAL));
+ dit_enqueue_reg_value_with_ext_lock(htons(eth_src_addr & 0xFFFF),
+ DIT_REG_NAT_ETHERNET_SRC_MAC_ADDR_1 + (index * DIT_REG_ETHERNET_MAC_INTERVAL));
+ dit_enqueue_reg_value_with_ext_lock(htonl(ip_addr),
+ DIT_REG_NAT_LOCAL_ADDR + (index * DIT_REG_NAT_LOCAL_INTERVAL));
+ dit_enqueue_reg_value_with_ext_lock(htonl(eth_dst_addr >> 16),
+ DIT_REG_NAT_ETHERNET_DST_MAC_ADDR_0 + (index * DIT_REG_ETHERNET_MAC_INTERVAL));
+ dit_enqueue_reg_value_with_ext_lock(htons(eth_dst_addr & 0xFFFF),
+ DIT_REG_NAT_ETHERNET_DST_MAC_ADDR_1 + (index * DIT_REG_ETHERNET_MAC_INTERVAL));
+ dit_enqueue_reg_value_with_ext_lock(htons(ETH_P_IP),
+ DIT_REG_NAT_ETHERNET_TYPE + (index * DIT_REG_ETHERNET_MAC_INTERVAL));
+ spin_unlock_irqrestore(&dc->src_lock, flags);
+
+ return count;
+}
+
+static ssize_t debug_reset_usage_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int dir, ring_num;
+ unsigned int reset_ring;
+ int ret;
+
+ ret = sscanf(buf, "%u %u", &dir, &reset_ring);
+ if (ret < 1)
+ return -EINVAL;
+
+ if (dir >= DIT_DIR_MAX)
+ return -EINVAL;
+
+ if (reset_ring > DIT_DESC_RING_MAX)
+ return -EINVAL;
+
+ for (ring_num = DIT_DST_DESC_RING_0; ring_num < DIT_DESC_RING_MAX; ring_num++) {
+ if ((ring_num == reset_ring) || (reset_ring == DIT_DESC_RING_MAX))
+ snapshot[dir][ring_num].max_usage = 0;
+ }
+
+ return count;
+}
+
+static ssize_t debug_use_tx_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int flag;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &flag);
+ if (ret)
+ return -EINVAL;
+
+ dc->use_dir[DIT_DIR_TX] = (flag > 0 ? true : false);
+ return count;
+}
+
+static ssize_t debug_use_tx_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "use_tx: %d\n", dc->use_dir[DIT_DIR_TX]);
+}
+
+static ssize_t debug_use_rx_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int flag;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &flag);
+ if (ret)
+ return -EINVAL;
+
+ dc->use_dir[DIT_DIR_RX] = (flag > 0 ? true : false);
+ return count;
+}
+
+static ssize_t debug_use_rx_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "use_rx: %d\n", dc->use_dir[DIT_DIR_RX]);
+}
+
+static ssize_t debug_use_clat_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct clat_info clat;
+ unsigned int i;
+ unsigned int flag;
+ int ret;
+ struct mem_link_device *mld = ld_to_mem_link_device(dc->ld);
+
+ ret = kstrtoint(buf, 0, &flag);
+ if (ret)
+ return -EINVAL;
+
+ if (!flag) {
+ memset(&clat, 0, sizeof(clat));
+ for (i = 0; i < DIT_REG_CLAT_ADDR_MAX; i++) {
+ clat.clat_index = i;
+ scnprintf(clat.ipv6_iface, IFNAMSIZ, "rmnet%d", i);
+ dit_hal_set_clat_info(mld, &clat);
+ }
+ }
+
+ dc->use_clat = (flag > 0 ? true : false);
+
+ return count;
+}
+
+static ssize_t debug_use_clat_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "use_clat: %d\n", dc->use_clat);
+}
+
+static ssize_t debug_hal_support_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int flag;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &flag);
+ if (ret)
+ return -EINVAL;
+
+ dc->hal_support = (flag > 0 ? true : false);
+ return count;
+}
+
+static ssize_t debug_hal_support_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "hal_support: %d\n", dc->hal_support);
+}
+#endif
+
+#if defined(DIT_DEBUG_LOW)
+static ssize_t debug_pktgen_ch_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "pktgen ch: %d\n", dc->pktgen_ch);
+}
+
+static ssize_t debug_pktgen_ch_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct io_device *iod;
+ int ch;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &ch);
+ if (ret)
+ return -EINVAL;
+
+ dc->pktgen_ch = ch;
+
+ if (!dc->ld)
+ goto out;
+
+ iod = link_get_iod_with_channel(dc->ld, dc->pktgen_ch);
+ if (iod)
+ DIT_INDIRECT_CALL(dc, set_reg_upstream, iod->ndev);
+ else
+ DIT_INDIRECT_CALL(dc, set_reg_upstream, NULL);
+
+out:
+ return count;
+}
+
+static ssize_t debug_set_force_bypass_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int bypass;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &bypass);
+ if (ret)
+ return -EINVAL;
+
+ dc->force_bypass = bypass;
+ return count;
+}
+#endif
+
+static DEVICE_ATTR_RO(status);
+static DEVICE_ATTR_RO(register);
+#if defined(DIT_DEBUG)
+static DEVICE_ATTR_WO(debug_set_rx_port);
+static DEVICE_ATTR_WO(debug_set_local_addr);
+static DEVICE_ATTR_WO(debug_reset_usage);
+static DEVICE_ATTR_RW(debug_use_tx);
+static DEVICE_ATTR_RW(debug_use_rx);
+static DEVICE_ATTR_RW(debug_use_clat);
+static DEVICE_ATTR_RW(debug_hal_support);
+#endif
+#if defined(DIT_DEBUG_LOW)
+static DEVICE_ATTR_RW(debug_pktgen_ch);
+static DEVICE_ATTR_WO(debug_set_force_bypass);
+#endif
+
+static struct attribute *dit_attrs[] = {
+ &dev_attr_status.attr,
+ &dev_attr_register.attr,
+#if defined(DIT_DEBUG)
+ &dev_attr_debug_set_rx_port.attr,
+ &dev_attr_debug_set_local_addr.attr,
+ &dev_attr_debug_reset_usage.attr,
+ &dev_attr_debug_use_tx.attr,
+ &dev_attr_debug_use_rx.attr,
+ &dev_attr_debug_use_clat.attr,
+ &dev_attr_debug_hal_support.attr,
+#endif
+#if defined(DIT_DEBUG_LOW)
+ &dev_attr_debug_pktgen_ch.attr,
+ &dev_attr_debug_set_force_bypass.attr,
+#endif
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(dit);
+
+bool dit_check_dir_use_queue(enum dit_direction dir, unsigned int queue_num)
+{
+ struct dit_desc_info *desc_info;
+
+ if (!dc)
+ return false;
+
+ desc_info = &dc->desc_info[dir];
+ if (!dc->use_dir[dir] || queue_num != desc_info->pktproc_queue_num)
+ return false;
+
+ if (dc->stop_enqueue[dir] && dit_check_queues_empty(dir))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL(dit_check_dir_use_queue);
+
+int dit_get_irq_affinity(void)
+{
+ if (!dc)
+ return -EPERM;
+
+ return dc->irq_affinity;
+}
+EXPORT_SYMBOL(dit_get_irq_affinity);
+
+int dit_set_irq_affinity(int affinity)
+{
+ int i;
+ int num_cpu;
+
+ if (!dc)
+ return -EPERM;
+
+#if defined(CONFIG_VENDOR_NR_CPUS)
+ num_cpu = CONFIG_VENDOR_NR_CPUS;
+#else
+ num_cpu = 8;
+#endif
+ if (affinity >= num_cpu) {
+ mif_err("affinity:%d error. cpu max:%d\n", affinity, num_cpu);
+ return -EINVAL;
+ }
+
+ dc->irq_affinity = affinity;
+
+ for (i = 0; i < dc->irq_len; i++) {
+ int val = dc->irq_affinity;
+
+ if (dc->irq_buf[i] == dc->irq_num_tx)
+ val = dc->irq_affinity_tx;
+
+ mif_debug("num:%d affinity:%d\n", dc->irq_buf[i], val);
+ irq_set_affinity_hint(dc->irq_buf[i], cpumask_of(val));
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(dit_set_irq_affinity);
+
+int dit_set_pktproc_queue_num(enum dit_direction dir, u32 queue_num)
+{
+ struct dit_desc_info *desc_info;
+
+ if (!dc)
+ return -EPERM;
+
+ desc_info = &dc->desc_info[dir];
+ desc_info->pktproc_queue_num = queue_num;
+ mif_info("dir:%d queue_num:%d\n", dir, desc_info->pktproc_queue_num);
+
+ return 0;
+}
+EXPORT_SYMBOL(dit_set_pktproc_queue_num);
+
+int dit_set_buf_size(enum dit_direction dir, u32 size)
+{
+ struct dit_desc_info *desc_info = NULL;
+
+ if (!dc)
+ return -EPERM;
+
+ desc_info = &dc->desc_info[dir];
+ desc_info->buf_size = size;
+ mif_info("dir:%d size:%d\n", dir, desc_info->buf_size);
+
+ return 0;
+}
+EXPORT_SYMBOL(dit_set_buf_size);
+
+int dit_set_pktproc_base(enum dit_direction dir, phys_addr_t base)
+{
+ struct dit_desc_info *desc_info = NULL;
+
+ if (!dc)
+ return -EPERM;
+
+ desc_info = &dc->desc_info[dir];
+ desc_info->pktproc_pbase = base;
+ mif_info("dir:%d base:%pap\n", dir, &desc_info->pktproc_pbase);
+
+ return 0;
+}
+EXPORT_SYMBOL(dit_set_pktproc_base);
+
+int dit_set_desc_ring_len(enum dit_direction dir, u32 len)
+{
+ struct dit_desc_info *desc_info = NULL;
+
+ if (!dc)
+ return -EPERM;
+
+ desc_info = &dc->desc_info[dir];
+ desc_info->src_desc_ring_len = len;
+ desc_info->dst_desc_ring_len = len;
+
+ if (dir == DIT_DIR_RX) {
+ desc_info->src_desc_ring_len += dc->rx_extra_desc_ring_len;
+ desc_info->dst_desc_ring_len += dc->rx_extra_desc_ring_len;
+ }
+
+ mif_info("dir:%d len:%d src_len:%d dst_len:%d\n", dir, len,
+ desc_info->src_desc_ring_len, desc_info->dst_desc_ring_len);
+
+ return 0;
+}
+EXPORT_SYMBOL(dit_set_desc_ring_len);
+
+int dit_get_src_usage(enum dit_direction dir, u32 *usage)
+{
+ struct dit_desc_info *desc_info = NULL;
+
+ if (!dc)
+ return -EPERM;
+
+ desc_info = &dc->desc_info[dir];
+ *usage = circ_get_usage(desc_info->src_desc_ring_len,
+ desc_info->src_wp, desc_info->src_rp);
+ mif_debug("dir:%d usage:%d\n", dir, *usage);
+
+ return 0;
+}
+EXPORT_SYMBOL(dit_get_src_usage);
+
+int dit_reset_dst_wp_rp(enum dit_direction dir)
+{
+ struct dit_desc_info *desc_info = NULL;
+ int ring_num;
+
+ if (!dc)
+ return -EPERM;
+
+ desc_info = &dc->desc_info[dir];
+ for (ring_num = DIT_DST_DESC_RING_0; ring_num < DIT_DST_DESC_RING_MAX; ring_num++) {
+ desc_info->dst_wp[ring_num] = 0;
+ desc_info->dst_rp[ring_num] = 0;
+ dit_set_dst_desc_int_range(dir, ring_num);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(dit_reset_dst_wp_rp);
+
+struct net_device *dit_get_netdev(void)
+{
+ if (!dc)
+ return NULL;
+
+ return dc->netdev;
+}
+EXPORT_SYMBOL(dit_get_netdev);
+
+bool dit_support_clat(void)
+{
+ if (!dc)
+ return false;
+
+ return dc->use_clat;
+}
+EXPORT_SYMBOL(dit_support_clat);
+
+#if IS_ENABLED(CONFIG_EXYNOS_ITMON)
+static int itmon_notifier_callback(struct notifier_block *nb,
+ unsigned long action, void *nb_data)
+{
+ struct itmon_notifier *itmon_data = nb_data;
+
+ if (IS_ERR_OR_NULL(itmon_data))
+ return NOTIFY_DONE;
+
+ if (itmon_data->port && !strncmp("DIT", itmon_data->port, sizeof("DIT") - 1)) {
+ dit_print_dump(DIT_DIR_TX, DIT_DUMP_ALL);
+ dit_print_dump(DIT_DIR_RX, DIT_DUMP_ALL);
+ return NOTIFY_BAD;
+ }
+
+ return NOTIFY_DONE;
+}
+#endif
+
+static void dit_set_hw_specific(void)
+{
+#if defined(CONFIG_EXYNOS_DIT_VERSION)
+ dc->hw_version = CONFIG_EXYNOS_DIT_VERSION;
+#else
+ dc->hw_version = DIT_VERSION(2, 1, 0);
+#endif
+
+#if defined(CONFIG_SOC_GS101)
+ dc->hw_capabilities |= DIT_CAP_MASK_PORT_BIG_ENDIAN;
+ /* chipid: A0 = 0, B0 = 1 */
+ if (gs_chipid_get_type() >= 1)
+ dc->hw_capabilities &= ~DIT_CAP_MASK_PORT_BIG_ENDIAN;
+#endif
+}
+
+static int dit_read_dt(struct device_node *np)
+{
+ if (!IS_ERR_OR_NULL(dc->sharability_base)) {
+ mif_dt_read_u32(np, "dit_sharability_offset", dc->sharability_offset);
+ mif_dt_read_u32(np, "dit_sharability_value", dc->sharability_value);
+ }
+
+ mif_dt_read_u32(np, "dit_hw_capabilities", dc->hw_capabilities);
+
+ mif_dt_read_bool(np, "dit_use_tx", dc->use_dir[DIT_DIR_TX]);
+ mif_dt_read_bool(np, "dit_use_rx", dc->use_dir[DIT_DIR_RX]);
+ mif_dt_read_bool(np, "dit_use_clat", dc->use_clat);
+ mif_dt_read_bool(np, "dit_use_recycling", dc->use_page_recycling_rx);
+
+ mif_dt_read_bool(np, "dit_hal_support", dc->hal_support);
+ if (dc->hal_support) {
+ mif_dt_read_bool(np, "dit_hal_enqueue_rx", dc->hal_enqueue_rx);
+ if (dc->hal_enqueue_rx)
+ dc->stop_enqueue[DIT_DIR_RX] = true;
+ }
+
+ mif_dt_read_u32(np, "dit_rx_extra_desc_ring_len", dc->rx_extra_desc_ring_len);
+ mif_dt_read_u32(np, "dit_irq_affinity", dc->irq_affinity);
+ dc->irq_affinity_tx = dc->irq_affinity;
+
+ return 0;
+}
+
+int dit_create(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+#if IS_ENABLED(CONFIG_EXYNOS_ITMON)
+ struct notifier_block *itmon_nb = NULL;
+#endif
+ int ret;
+
+ if (!np) {
+ mif_err("of_node is null\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ dc = devm_kzalloc(dev, sizeof(struct dit_ctrl_t), GFP_KERNEL);
+ if (!dc) {
+ mif_err("dit ctrl alloc failed\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ dc->dev = dev;
+
+ dc->register_base = devm_platform_ioremap_resource_byname(pdev, "dit");
+ if (IS_ERR_OR_NULL(dc->register_base)) {
+ mif_err("register devm_ioremap error\n");
+ ret = -EFAULT;
+ goto error;
+ }
+
+ dc->sharability_base = devm_platform_ioremap_resource_byname(pdev, "sysreg");
+ if (IS_ERR_OR_NULL(dc->sharability_base)) {
+ mif_err("sharability devm_ioremap error. use dma map.\n");
+ dc->use_dma_map = true;
+ }
+
+ ret = dit_read_dt(np);
+ if (ret) {
+ mif_err("read dt error\n");
+ goto error;
+ }
+
+ dit_set_hw_specific();
+
+ ret = dit_ver_create(dc);
+ if (ret) {
+ mif_err("dit versioning failed\n");
+ goto error;
+ }
+
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
+
+ ret = dit_register_irq(pdev);
+ if (ret) {
+ mif_err("register irq error\n");
+ goto error;
+ }
+
+ spin_lock_init(&dc->src_lock);
+ INIT_LIST_HEAD(&dc->reg_value_q);
+ atomic_set(&dc->init_running, 0);
+
+ dit_set_irq_affinity(dc->irq_affinity);
+ dev_set_drvdata(dev, dc);
+
+#if IS_ENABLED(CONFIG_CPU_IDLE)
+ dc->idle_ip_index = exynos_get_idle_ip_index(dev_name(&pdev->dev));
+ if (dc->idle_ip_index < 0) {
+ mif_err("%s idle ip registration failed, ret: %d\n",
+ dev_name(&pdev->dev), dc->idle_ip_index);
+ goto error;
+ }
+
+ exynos_update_ip_idle_status(dc->idle_ip_index, DIT_IDLE_IP_IDLE);
+#endif
+
+#if IS_ENABLED(CONFIG_EXYNOS_ITMON)
+ itmon_nb = devm_kzalloc(dev, sizeof(struct notifier_block), GFP_KERNEL);
+ if (!itmon_nb) {
+ mif_err("itmon notifier block alloc failed\n");
+ goto error;
+ }
+
+ itmon_nb->notifier_call = itmon_notifier_callback;
+ itmon_notifier_chain_register(itmon_nb);
+#endif
+
+ ret = sysfs_create_groups(&dev->kobj, dit_groups);
+ if (ret != 0) {
+ mif_err("sysfs_create_group() error %d\n", ret);
+ goto error;
+ }
+
+ ret = dit_hal_create(dc);
+ if (ret) {
+ mif_err("dit hal create failed\n");
+ goto error;
+ }
+
+ if (dc->use_page_recycling_rx)
+ dc->page_recycling_skb_padding = NET_SKB_PAD + NET_IP_ALIGN;
+
+ mif_info("dit created. hw_ver:0x%08X tx:%d rx:%d clat:%d hal:%d ext:%d irq:%d pg_r:%d\n",
+ dc->hw_version, dc->use_dir[DIT_DIR_TX], dc->use_dir[DIT_DIR_RX], dc->use_clat,
+ dc->hal_support, dc->rx_extra_desc_ring_len, dc->irq_affinity,
+ dc->use_page_recycling_rx);
+
+ return 0;
+
+error:
+ if (!IS_ERR_OR_NULL(dc->sharability_base))
+ devm_iounmap(dev, dc->sharability_base);
+
+ if (!IS_ERR_OR_NULL(dc->register_base))
+ devm_iounmap(dev, dc->register_base);
+
+ if (dc) {
+ devm_kfree(dev, dc);
+ dc = NULL;
+ }
+
+ panic("DIT driver probe failed\n");
+ return ret;
+}
+
+static int dit_probe(struct platform_device *pdev)
+{
+ return dit_create(pdev);
+}
+
+static int dit_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int dit_suspend(struct device *dev)
+{
+ struct dit_ctrl_t *dc = dev_get_drvdata(dev);
+ int ret;
+
+ if (unlikely(!dc) || unlikely(!dc->ld))
+ return 0;
+
+ ret = dit_init(NULL, DIT_INIT_DEINIT, DIT_STORE_BACKUP);
+ if (ret) {
+ mif_err("deinit failed ret:%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dit_resume(struct device *dev)
+{
+ struct dit_ctrl_t *dc = dev_get_drvdata(dev);
+ int ret;
+
+ if (unlikely(!dc) || unlikely(!dc->ld))
+ return 0;
+
+ dit_set_irq_affinity(dc->irq_affinity);
+
+ ret = dit_init(NULL, DIT_INIT_NORMAL, DIT_STORE_RESTORE);
+ if (ret) {
+ unsigned int dir;
+
+ mif_err("init failed ret:%d\n", ret);
+ for (dir = 0; dir < DIT_DIR_MAX; dir++) {
+ if (dit_is_busy(dir))
+ mif_err("busy (dir:%d)\n", dir);
+ }
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops dit_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dit_suspend, dit_resume)
+};
+
+static const struct of_device_id dit_dt_match[] = {
+ { .compatible = "samsung,exynos-dit", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dit_dt_match);
+
+static struct platform_driver dit_driver = {
+ .probe = dit_probe,
+ .remove = dit_remove,
+ .driver = {
+ .name = "dit",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(dit_dt_match),
+ .pm = &dit_pm_ops,
+ .suppress_bind_attrs = true,
+ },
+};
+module_platform_driver(dit_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Samsung DIT Driver");
+
diff --git a/dit/dit_2_1_0.c b/dit/dit_2_1_0.c
new file mode 100644
index 0000000..4979106
--- /dev/null
+++ b/dit/dit_2_1_0.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * EXYNOS DIT(Direct IP Translator) Driver support
+ *
+ */
+
+#include "dit_common.h"
+#include "dit_hal.h"
+
+static struct dit_ctrl_t *dc;
+
+static int dit_set_desc_filter_bypass(enum dit_direction dir, struct dit_src_desc *src_desc,
+ u8 *src, bool *is_upstream_pkt)
+{
+ struct net_device *upstream_netdev;
+ bool bypass = true;
+ u8 mask = 0;
+
+ /*
+ * LRO start/end bit can be used for filter bypass
+ * 1/1: filtered
+ * 0/0: filter bypass
+ * dit does not support checksum yet
+ */
+ cpif_set_bit(mask, DIT_DESC_C_END);
+ cpif_set_bit(mask, DIT_DESC_C_START);
+
+ if (dir != DIT_DIR_RX)
+ goto out;
+
+ /*
+ * check ipv6 for clat.
+ * port table does not have entries for tun device or ipv6.
+ * every ipv6 packets from any rmnet can see port table.
+ */
+ if ((src[0] & 0xF0) == 0x60) {
+ bypass = false;
+ goto out;
+ }
+
+ /* check upstream netdev */
+ upstream_netdev = dit_hal_get_dst_netdev(DIT_DST_DESC_RING_0);
+ if (upstream_netdev) {
+ struct io_device *iod = link_get_iod_with_channel(dc->ld, src_desc->ch_id);
+
+ if (iod && iod->ndev == upstream_netdev) {
+ *is_upstream_pkt = true;
+ bypass = false;
+ goto out;
+ }
+ }
+
+out:
+#if defined(DIT_DEBUG_LOW)
+ if (dc->force_bypass == 1)
+ bypass = true;
+ else if (dc->force_bypass == 2)
+ bypass = false;
+#endif
+
+ if (bypass)
+ src_desc->control &= ~mask;
+ else
+ src_desc->control |= mask;
+
+ return 0;
+}
+
+static void __dit_set_interrupt(void)
+{
+ static int irq_pending_bit[] = {
+ RX_DST0_INT_PENDING_BIT, RX_DST1_INT_PENDING_BIT,
+ RX_DST2_INT_PENDING_BIT, TX_DST0_INT_PENDING_BIT,
+ ERR_INT_PENDING_BIT};
+ static char const *irq_name[] = {
+ "DIT-RxDst0", "DIT-RxDst1",
+ "DIT-RxDst2", "DIT-Tx",
+ "DIT-Err"};
+
+ dc->irq_pending_bit = irq_pending_bit;
+ dc->irq_name = irq_name;
+ dc->irq_len = ARRAY_SIZE(irq_pending_bit);
+}
+
+int dit_ver_create(struct dit_ctrl_t *dc_ptr)
+{
+ if (unlikely(!dc_ptr))
+ return -EPERM;
+
+ dc = dc_ptr;
+
+ __dit_set_interrupt();
+
+ dc->set_desc_filter_bypass = dit_set_desc_filter_bypass;
+
+ return 0;
+}
+
diff --git a/dit/dit_2_1_0.h b/dit/dit_2_1_0.h
new file mode 100644
index 0000000..3277501
--- /dev/null
+++ b/dit/dit_2_1_0.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Samsung Electronics.
+ *
+ */
+
+#ifndef __DIT_2_1_0_H__
+#define __DIT_2_1_0_H__
+
+#define DIT_REG_CLK_GT_OFF 0x0004 /* 20 bit */
+#define DIT_REG_DMA_INIT_DATA 0x0008 /* 28 bit */
+
+/* 0:16beat, 1:8beat, 2:4beat, 3:2beat, 4:1beat */
+#define DIT_REG_TX_DESC_CTRL_SRC 0x000C /* 3 bit */
+#define DIT_REG_TX_DESC_CTRL_DST 0x0010 /* 3 bit */
+#define DIT_REG_TX_HEAD_CTRL 0x0014 /* 3 bit */
+#define DIT_REG_TX_MOD_HD_CTRL 0x0018 /* 3 bit */
+#define DIT_REG_TX_PKT_CTRL 0x001C /* 3 bit */
+#define DIT_REG_TX_CHKSUM_CTRL 0x0020 /* 3 bit */
+
+#define DIT_REG_RX_DESC_CTRL_SRC 0x0024 /* 3 bit */
+#define DIT_REG_RX_DESC_CTRL_DST 0x0028 /* 3 bit */
+#define DIT_REG_RX_HEAD_CTRL 0x002C /* 3 bit */
+#define DIT_REG_RX_MOD_HD_CTRL 0x0030 /* 3 bit */
+#define DIT_REG_RX_PKT_CTRL 0x0034 /* 3 bit */
+#define DIT_REG_RX_CHKSUM_CTRL 0x0038 /* 3 bit */
+
+#define DIT_REG_DMA_CHKSUM_OFF 0x003C /* 2 bit */
+
+/* start address for Tx desc */
+#define DIT_REG_TX_RING_START_ADDR_0_SRC 0x0044
+#define DIT_REG_TX_RING_START_ADDR_1_SRC 0x0048
+#define DIT_REG_TX_RING_START_ADDR_0_DST0 0x004C
+#define DIT_REG_TX_RING_START_ADDR_1_DST0 0x0050
+#define DIT_REG_TX_RING_START_ADDR_0_DST1 0x0054
+#define DIT_REG_TX_RING_START_ADDR_1_DST1 0x0058
+#define DIT_REG_TX_RING_START_ADDR_0_DST2 0x005C
+#define DIT_REG_TX_RING_START_ADDR_1_DST2 0x0060
+
+/* start address for Rx desc */
+#define DIT_REG_RX_RING_START_ADDR_0_SRC 0x0064
+#define DIT_REG_RX_RING_START_ADDR_1_SRC 0x0068
+#define DIT_REG_RX_RING_START_ADDR_0_DST0 0x006C
+#define DIT_REG_RX_RING_START_ADDR_1_DST0 0x0070
+#define DIT_REG_RX_RING_START_ADDR_0_DST1 0x0074
+#define DIT_REG_RX_RING_START_ADDR_1_DST1 0x0078
+#define DIT_REG_RX_RING_START_ADDR_0_DST2 0x007C
+#define DIT_REG_RX_RING_START_ADDR_1_DST2 0x0080
+
+#define DIT_REG_INT_ENABLE 0x0084
+#define DIT_REG_INT_MASK 0x0088
+#define DIT_REG_INT_PENDING 0x008C
+#define DIT_REG_STATUS 0x0090
+
+/* address for Tx desc */
+#define DIT_REG_NAT_TX_DESC_ADDR_0_SRC 0x4000 /* 32 bit */
+#define DIT_REG_NAT_TX_DESC_ADDR_1_SRC 0x4004 /* 4 bit */
+#define DIT_REG_NAT_TX_DESC_ADDR_EN_SRC 0x4008 /* 1 bit */
+#define DIT_REG_NAT_TX_DESC_ADDR_0_DST0 0x4018
+#define DIT_REG_NAT_TX_DESC_ADDR_1_DST0 0x401C
+#define DIT_REG_NAT_TX_DESC_ADDR_0_DST1 0x4020
+#define DIT_REG_NAT_TX_DESC_ADDR_1_DST1 0x4024
+#define DIT_REG_NAT_TX_DESC_ADDR_0_DST2 0x4028
+#define DIT_REG_NAT_TX_DESC_ADDR_1_DST2 0x402C
+
+/* address for Rx desc */
+#define DIT_REG_NAT_RX_DESC_ADDR_0_SRC 0x4030 /* 32 bit */
+#define DIT_REG_NAT_RX_DESC_ADDR_1_SRC 0x4034 /* 4 bit */
+#define DIT_REG_NAT_RX_DESC_ADDR_EN_SRC 0x4038 /* 1 bit */
+#define DIT_REG_NAT_RX_DESC_ADDR_0_DST0 0x4048
+#define DIT_REG_NAT_RX_DESC_ADDR_1_DST0 0x404C
+#define DIT_REG_NAT_RX_DESC_ADDR_0_DST1 0x4050
+#define DIT_REG_NAT_RX_DESC_ADDR_1_DST1 0x4054
+#define DIT_REG_NAT_RX_DESC_ADDR_0_DST2 0x4058
+#define DIT_REG_NAT_RX_DESC_ADDR_1_DST2 0x405C
+
+struct dit_src_desc {
+ u64 src_addr:36,
+ _reserved_0:12,
+ /* the below 16 bits are "private info" on the document */
+ ch_id:8,
+ pre_csum:1, /* checksum successful from pktproc */
+ udp_csum_zero:1, /* reset udp checksum 0 after NAT */
+ _reserved_2:6;
+ u64 length:16,
+ _reserved_1:32,
+ control:8,
+ status:8;
+} __packed;
+
+/* DIT_INT_PENDING */
+enum dit_int_pending_bits {
+ TX_DST0_INT_PENDING_BIT = 0,
+ TX_DST1_INT_PENDING_BIT,
+ TX_DST2_INT_PENDING_BIT,
+ RX_DST0_INT_PENDING_BIT = 3,
+ RX_DST1_INT_PENDING_BIT,
+ RX_DST2_INT_PENDING_BIT,
+ ERR_INT_PENDING_BIT = 14,
+};
+
+#define DIT_TX_INT_PENDING_MASK \
+ (BIT(TX_DST0_INT_PENDING_BIT) | BIT(TX_DST1_INT_PENDING_BIT) | \
+ BIT(TX_DST2_INT_PENDING_BIT) | BIT(ERR_INT_PENDING_BIT))
+
+#define DIT_RX_INT_PENDING_MASK \
+ (BIT(RX_DST0_INT_PENDING_BIT) | BIT(RX_DST1_INT_PENDING_BIT) | \
+ BIT(RX_DST2_INT_PENDING_BIT) | BIT(ERR_INT_PENDING_BIT))
+
+enum dit_int_enable_bits {
+ TX_DST0_INT_ENABLE_BIT = 0,
+ TX_DST1_INT_ENABLE_BIT,
+ TX_DST2_INT_ENABLE_BIT,
+ RX_DST0_INT_ENABLE_BIT = 3,
+ RX_DST1_INT_ENABLE_BIT,
+ RX_DST2_INT_ENABLE_BIT,
+ ERR_INT_ENABLE_BIT = 14,
+};
+
+#define DIT_INT_ENABLE_MASK \
+ (BIT(TX_DST0_INT_ENABLE_BIT) | BIT(TX_DST1_INT_ENABLE_BIT) | \
+ BIT(TX_DST2_INT_ENABLE_BIT) | \
+ BIT(RX_DST0_INT_ENABLE_BIT) | BIT(RX_DST1_INT_ENABLE_BIT) | \
+ BIT(RX_DST2_INT_ENABLE_BIT) | \
+ BIT(ERR_INT_ENABLE_BIT))
+
+#define DIT_INT_MASK_MASK DIT_INT_ENABLE_MASK
+
+#endif /* __DIT_2_1_0_H__ */
diff --git a/dit/dit_2_2_0.c b/dit/dit_2_2_0.c
new file mode 100644
index 0000000..886860d
--- /dev/null
+++ b/dit/dit_2_2_0.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * EXYNOS DIT(Direct IP Translator) Driver support
+ *
+ */
+
+#include "dit_common.h"
+#include "dit_hal.h"
+
+static struct dit_ctrl_t *dc;
+static int upstream_ch = -1;
+
+static int dit_get_reg_version(u32 *version)
+{
+ *version = READ_REG_VALUE(dc, DIT_REG_VERSION);
+
+ return 0;
+}
+
+static void dit_set_reg_nat_interface(int ch)
+{
+ unsigned int part = 0, value = 0;
+ unsigned int i, offset;
+ unsigned long flags;
+
+ if (ch >= 0) {
+ part = (ch >> 5) & 0x7;
+ value = 1 << (ch & 0x1F);
+ }
+
+ spin_lock_irqsave(&dc->src_lock, flags);
+ for (i = 0; i < DIT_REG_NAT_INTERFACE_NUM_MAX; i++) {
+ offset = i * DIT_REG_NAT_INTERFACE_NUM_INTERVAL;
+ dit_enqueue_reg_value_with_ext_lock((i == part ? value : 0),
+ DIT_REG_NAT_INTERFACE_NUM + offset);
+ }
+ spin_unlock_irqrestore(&dc->src_lock, flags);
+}
+
+static void dit_set_reg_upstream_internal(struct io_device *iod, void *args)
+{
+ struct net_device *netdev = (struct net_device *)args;
+
+ if (iod->ndev == netdev) {
+ upstream_ch = (int)iod->ch;
+ dit_set_reg_nat_interface(iod->ch);
+ }
+}
+
+static int dit_set_reg_upstream(struct net_device *netdev)
+{
+ if (!netdev) {
+ dit_set_reg_nat_interface(-1);
+ upstream_ch = -1;
+ } else {
+ if (unlikely(!dc->ld))
+ return -EINVAL;
+
+ iodevs_for_each(dc->ld->msd, dit_set_reg_upstream_internal, netdev);
+ }
+
+ return 0;
+}
+
+static int dit_set_desc_filter_bypass(enum dit_direction dir, struct dit_src_desc *src_desc,
+ u8 *src, bool *is_upstream_pkt)
+{
+ bool bypass = false;
+ u8 mask = 0;
+
+ /*
+ * LRO start/end bit can be used for filter bypass
+ * 1/1: filtered
+ * 0/0: filter bypass
+ * dit does not support checksum yet
+ */
+ cpif_set_bit(mask, DIT_DESC_C_END);
+ cpif_set_bit(mask, DIT_DESC_C_START);
+
+ if (dir != DIT_DIR_RX) {
+ bypass = true;
+ goto out;
+ }
+
+ /* ToDo: not need if HW supports UDP zero checksum.
+ * check if the packet will be filtered by DIT_REG_NAT_INTERFACE_NUM.
+ */
+ if (upstream_ch >= 0 && src_desc->ch_id == upstream_ch)
+ *is_upstream_pkt = true;
+
+out:
+#if defined(DIT_DEBUG_LOW)
+ if (dc->force_bypass == 1)
+ bypass = true;
+#endif
+
+ if (bypass) {
+ src_desc->control &= ~mask;
+ } else {
+ src_desc->control |= mask;
+ src_desc->interface = src_desc->ch_id;
+ }
+
+ return 0;
+}
+
+static int dit_set_src_desc_tail(enum dit_direction dir, struct dit_desc_info *desc_info,
+ unsigned int tail)
+{
+ phys_addr_t p_desc;
+ u32 offset_lo = 0, offset_hi = 0, offset_en = 0;
+
+ p_desc = desc_info->src_desc_ring_daddr + (sizeof(struct dit_src_desc) * tail);
+ if (dir == DIT_DIR_TX) {
+ offset_lo = DIT_REG_TX_SRC_A_TAIL_ADDR_0_TEMP;
+ offset_hi = DIT_REG_TX_SRC_A_TAIL_ADDR_1_TEMP;
+ offset_en = DIT_REG_TX_SRC_A_TAIL_VALID;
+ } else {
+ offset_lo = DIT_REG_RX_SRC_A_TAIL_ADDR_0_TEMP;
+ offset_hi = DIT_REG_RX_SRC_A_TAIL_ADDR_1_TEMP;
+ offset_en = DIT_REG_RX_SRC_A_TAIL_VALID;
+ }
+
+ WRITE_REG_PADDR_LO(dc, p_desc, offset_lo);
+ WRITE_REG_PADDR_HI(dc, p_desc, offset_hi);
+ WRITE_REG_VALUE(dc, 0x1, offset_en);
+
+ return 0;
+}
+
+static int dit_do_init_desc(enum dit_direction dir)
+{
+ struct dit_desc_info *desc_info;
+ phys_addr_t p_desc;
+
+ /* dst01-dst03 is not used but hw checks the registers */
+ u32 tx_offset_lo[] = {
+ DIT_REG_TX_RING_START_ADDR_0_DST01, DIT_REG_TX_RING_START_ADDR_0_DST02,
+ DIT_REG_TX_RING_START_ADDR_0_DST03,
+ DIT_REG_NAT_TX_DESC_ADDR_0_DST01, DIT_REG_NAT_TX_DESC_ADDR_0_DST02,
+ DIT_REG_NAT_TX_DESC_ADDR_0_DST03};
+ u32 tx_offset_hi[] = {
+ DIT_REG_TX_RING_START_ADDR_1_DST01, DIT_REG_TX_RING_START_ADDR_1_DST02,
+ DIT_REG_TX_RING_START_ADDR_1_DST03,
+ DIT_REG_NAT_TX_DESC_ADDR_1_DST01, DIT_REG_NAT_TX_DESC_ADDR_1_DST02,
+ DIT_REG_NAT_TX_DESC_ADDR_1_DST03};
+ u32 rx_offset_lo[] = {
+ DIT_REG_RX_RING_START_ADDR_0_DST01, DIT_REG_RX_RING_START_ADDR_0_DST02,
+ DIT_REG_RX_RING_START_ADDR_0_DST03,
+ DIT_REG_NAT_RX_DESC_ADDR_0_DST01, DIT_REG_NAT_RX_DESC_ADDR_0_DST02,
+ DIT_REG_NAT_RX_DESC_ADDR_0_DST03};
+ u32 rx_offset_hi[] = {
+ DIT_REG_RX_RING_START_ADDR_1_DST01, DIT_REG_RX_RING_START_ADDR_1_DST02,
+ DIT_REG_RX_RING_START_ADDR_1_DST03,
+ DIT_REG_NAT_RX_DESC_ADDR_1_DST01, DIT_REG_NAT_RX_DESC_ADDR_1_DST02,
+ DIT_REG_NAT_RX_DESC_ADDR_1_DST03};
+
+ u32 *offset_lo;
+ u32 *offset_hi;
+ unsigned int offset_len;
+ unsigned int i;
+
+ if (dir == DIT_DIR_TX) {
+ offset_lo = tx_offset_lo;
+ offset_hi = tx_offset_hi;
+ offset_len = ARRAY_SIZE(tx_offset_lo);
+ } else {
+ offset_lo = rx_offset_lo;
+ offset_hi = rx_offset_hi;
+ offset_len = ARRAY_SIZE(rx_offset_lo);
+ }
+
+ desc_info = &dc->desc_info[dir];
+ p_desc = desc_info->dst_desc_ring_daddr[DIT_DST_DESC_RING_0];
+
+ for (i = 0; i < offset_len; i++) {
+ WRITE_REG_PADDR_LO(dc, p_desc, offset_lo[i]);
+ WRITE_REG_PADDR_HI(dc, p_desc, offset_hi[i]);
+ }
+
+ return 0;
+}
+
+static int dit_do_init_hw(void)
+{
+ WRITE_REG_VALUE(dc, BIT(RX_TTLDEC_EN_BIT), DIT_REG_NAT_TTLDEC_EN);
+ WRITE_REG_VALUE(dc, BIT(TX_DST_DESC_RESET_BIT), DIT_REG_DST_DESC_RESET);
+ WRITE_REG_VALUE(dc, BIT(RX_DST_DESC_RESET_BIT), DIT_REG_DST_DESC_RESET);
+ dit_set_reg_upstream(NULL);
+
+ return 0;
+}
+
+static void __dit_set_interrupt(void)
+{
+ static int irq_pending_bit[] = {
+ RX_DST00_INT_PENDING_BIT, RX_DST1_INT_PENDING_BIT,
+ RX_DST2_INT_PENDING_BIT, TX_DST00_INT_PENDING_BIT};
+ static char const *irq_name[] = {
+ "DIT-RxDst00", "DIT-RxDst1",
+ "DIT-RxDst2", "DIT-Tx"};
+
+ dc->irq_pending_bit = irq_pending_bit;
+ dc->irq_name = irq_name;
+ dc->irq_len = ARRAY_SIZE(irq_pending_bit);
+}
+
+int dit_ver_create(struct dit_ctrl_t *dc_ptr)
+{
+ if (unlikely(!dc_ptr))
+ return -EPERM;
+
+ dc = dc_ptr;
+
+ __dit_set_interrupt();
+
+ dc->get_reg_version = dit_get_reg_version;
+ dc->set_reg_upstream = dit_set_reg_upstream;
+ dc->set_desc_filter_bypass = dit_set_desc_filter_bypass;
+ dc->set_src_desc_tail = dit_set_src_desc_tail;
+ dc->do_init_desc = dit_do_init_desc;
+ dc->do_init_hw = dit_do_init_hw;
+
+ return 0;
+}
+
diff --git a/dit/dit_2_2_0.h b/dit/dit_2_2_0.h
new file mode 100644
index 0000000..8c4e524
--- /dev/null
+++ b/dit/dit_2_2_0.h
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Samsung Electronics.
+ *
+ */
+
+#ifndef __DIT_2_2_0_H__
+#define __DIT_2_2_0_H__
+
+#define DIT_REG_CLK_GT_OFF 0x0100 /* 20 bit */
+#define DIT_REG_DMA_INIT_DATA 0x0104 /* 28 bit */
+
+/* 0:16beat, 1:8beat, 2:4beat, 3:2beat, 4:1beat */
+#define DIT_REG_TX_DESC_CTRL_SRC 0x0108 /* 3 bit */
+#define DIT_REG_TX_DESC_CTRL_DST 0x010C /* 3 bit */
+#define DIT_REG_TX_HEAD_CTRL 0x0110 /* 3 bit */
+#define DIT_REG_TX_MOD_HD_CTRL 0x0114 /* 3 bit */
+#define DIT_REG_TX_PKT_CTRL 0x0118 /* 3 bit */
+#define DIT_REG_TX_CHKSUM_CTRL 0x011C /* 3 bit */
+
+#define DIT_REG_RX_DESC_CTRL_SRC 0x0120 /* 3 bit */
+#define DIT_REG_RX_DESC_CTRL_DST 0x0124 /* 3 bit */
+#define DIT_REG_RX_HEAD_CTRL 0x0128 /* 3 bit */
+#define DIT_REG_RX_MOD_HD_CTRL 0x012C /* 3 bit */
+#define DIT_REG_RX_PKT_CTRL 0x0130 /* 3 bit */
+#define DIT_REG_RX_CHKSUM_CTRL 0x0134 /* 3 bit */
+
+#define DIT_REG_DMA_CHKSUM_OFF 0x0138 /* 2 bit */
+#define DIT_REG_INT_ENABLE 0x0140
+#define DIT_REG_INT_MASK 0x0144
+#define DIT_REG_INT_PENDING 0x0148
+#define DIT_REG_STATUS 0x014C
+
+/* start address for Tx desc */
+#define DIT_REG_TX_RING_START_ADDR_0_SRC_A 0x0200
+#define DIT_REG_TX_RING_START_ADDR_1_SRC_A 0x0204
+#define DIT_REG_TX_RING_START_ADDR_0_SRC DIT_REG_TX_RING_START_ADDR_0_SRC_A
+#define DIT_REG_TX_RING_START_ADDR_1_SRC DIT_REG_TX_RING_START_ADDR_1_SRC_A
+#define DIT_REG_TX_RING_START_ADDR_0_DST00 0x0218
+#define DIT_REG_TX_RING_START_ADDR_1_DST00 0x021C
+#define DIT_REG_TX_RING_START_ADDR_0_DST0 DIT_REG_TX_RING_START_ADDR_0_DST00
+#define DIT_REG_TX_RING_START_ADDR_1_DST0 DIT_REG_TX_RING_START_ADDR_1_DST00
+#define DIT_REG_TX_RING_START_ADDR_0_DST1 0x0220
+#define DIT_REG_TX_RING_START_ADDR_1_DST1 0x0224
+#define DIT_REG_TX_RING_START_ADDR_0_DST2 0x0228
+#define DIT_REG_TX_RING_START_ADDR_1_DST2 0x022C
+#define DIT_REG_TX_RING_START_ADDR_0_DST01 0x0230
+#define DIT_REG_TX_RING_START_ADDR_1_DST01 0x0234
+#define DIT_REG_TX_RING_START_ADDR_0_DST02 0x0238
+#define DIT_REG_TX_RING_START_ADDR_1_DST02 0x023C
+#define DIT_REG_TX_RING_START_ADDR_0_DST03 0x0240
+#define DIT_REG_TX_RING_START_ADDR_1_DST03 0x0244
+
+/* start address for Rx desc */
+#define DIT_REG_RX_RING_START_ADDR_0_SRC_A 0x0248
+#define DIT_REG_RX_RING_START_ADDR_1_SRC_A 0x024C
+#define DIT_REG_RX_RING_START_ADDR_0_SRC DIT_REG_RX_RING_START_ADDR_0_SRC_A
+#define DIT_REG_RX_RING_START_ADDR_1_SRC DIT_REG_RX_RING_START_ADDR_1_SRC_A
+#define DIT_REG_RX_RING_START_ADDR_0_DST00 0x0260
+#define DIT_REG_RX_RING_START_ADDR_1_DST00 0x0264
+#define DIT_REG_RX_RING_START_ADDR_0_DST0 DIT_REG_RX_RING_START_ADDR_0_DST00
+#define DIT_REG_RX_RING_START_ADDR_1_DST0 DIT_REG_RX_RING_START_ADDR_1_DST00
+#define DIT_REG_RX_RING_START_ADDR_0_DST1 0x0268
+#define DIT_REG_RX_RING_START_ADDR_1_DST1 0x026C
+#define DIT_REG_RX_RING_START_ADDR_0_DST2 0x0270
+#define DIT_REG_RX_RING_START_ADDR_1_DST2 0x0274
+#define DIT_REG_RX_RING_START_ADDR_0_DST01 0x0278
+#define DIT_REG_RX_RING_START_ADDR_1_DST01 0x027C
+#define DIT_REG_RX_RING_START_ADDR_0_DST02 0x0280
+#define DIT_REG_RX_RING_START_ADDR_1_DST02 0x0284
+#define DIT_REG_RX_RING_START_ADDR_0_DST03 0x0288
+#define DIT_REG_RX_RING_START_ADDR_1_DST03 0x028C
+
+/* tail for 2.2.1 */
+#define DIT_REG_TX_SRC_A_TAIL_ADDR_0_TEMP 0x0290
+#define DIT_REG_TX_SRC_A_TAIL_ADDR_1_TEMP 0x0294
+#define DIT_REG_RX_SRC_A_TAIL_ADDR_0_TEMP 0x0308
+#define DIT_REG_RX_SRC_A_TAIL_ADDR_1_TEMP 0x030C
+#define DIT_REG_TX_SRC_A_TAIL_VALID 0x0320
+#define DIT_REG_RX_SRC_A_TAIL_VALID 0x032C
+#define DIT_REG_DST_DESC_RESET 0x0338
+
+/* address for Tx desc */
+#define DIT_REG_NAT_TX_DESC_ADDR_0_SRC_A 0x4000 /* 32 bit */
+#define DIT_REG_NAT_TX_DESC_ADDR_1_SRC_A 0x4004 /* 4 bit */
+#define DIT_REG_NAT_TX_DESC_ADDR_EN_SRC_A 0x4008 /* 1 bit */
+#define DIT_REG_NAT_TX_DESC_ADDR_0_SRC DIT_REG_NAT_TX_DESC_ADDR_0_SRC_A
+#define DIT_REG_NAT_TX_DESC_ADDR_1_SRC DIT_REG_NAT_TX_DESC_ADDR_1_SRC_A
+#define DIT_REG_NAT_TX_DESC_ADDR_EN_SRC DIT_REG_NAT_TX_DESC_ADDR_EN_SRC_A
+#define DIT_REG_NAT_TX_DESC_ADDR_0_DST00 0x4024
+#define DIT_REG_NAT_TX_DESC_ADDR_1_DST00 0x4028
+#define DIT_REG_NAT_TX_DESC_ADDR_0_DST0 DIT_REG_NAT_TX_DESC_ADDR_0_DST00
+#define DIT_REG_NAT_TX_DESC_ADDR_1_DST0 DIT_REG_NAT_TX_DESC_ADDR_1_DST00
+#define DIT_REG_NAT_TX_DESC_ADDR_0_DST1 0x402C
+#define DIT_REG_NAT_TX_DESC_ADDR_1_DST1 0x4030
+#define DIT_REG_NAT_TX_DESC_ADDR_0_DST2 0x4034
+#define DIT_REG_NAT_TX_DESC_ADDR_1_DST2 0x4038
+#define DIT_REG_NAT_TX_DESC_ADDR_0_DST01 0x403C
+#define DIT_REG_NAT_TX_DESC_ADDR_1_DST01 0x4040
+#define DIT_REG_NAT_TX_DESC_ADDR_0_DST02 0x4044
+#define DIT_REG_NAT_TX_DESC_ADDR_1_DST02 0x4048
+#define DIT_REG_NAT_TX_DESC_ADDR_0_DST03 0x404C
+#define DIT_REG_NAT_TX_DESC_ADDR_1_DST03 0x4050
+
+/* address for Rx desc */
+#define DIT_REG_NAT_RX_DESC_ADDR_0_SRC_A 0x4054 /* 32 bit */
+#define DIT_REG_NAT_RX_DESC_ADDR_1_SRC_A 0x4058 /* 4 bit */
+#define DIT_REG_NAT_RX_DESC_ADDR_EN_SRC_A 0x405C /* 1 bit */
+#define DIT_REG_NAT_RX_DESC_ADDR_0_SRC DIT_REG_NAT_RX_DESC_ADDR_0_SRC_A
+#define DIT_REG_NAT_RX_DESC_ADDR_1_SRC DIT_REG_NAT_RX_DESC_ADDR_1_SRC_A
+#define DIT_REG_NAT_RX_DESC_ADDR_EN_SRC DIT_REG_NAT_RX_DESC_ADDR_EN_SRC_A
+#define DIT_REG_NAT_RX_DESC_ADDR_0_DST00 0x4078
+#define DIT_REG_NAT_RX_DESC_ADDR_1_DST00 0x407C
+#define DIT_REG_NAT_RX_DESC_ADDR_0_DST0 DIT_REG_NAT_RX_DESC_ADDR_0_DST00
+#define DIT_REG_NAT_RX_DESC_ADDR_1_DST0 DIT_REG_NAT_RX_DESC_ADDR_1_DST00
+#define DIT_REG_NAT_RX_DESC_ADDR_0_DST1 0x4080
+#define DIT_REG_NAT_RX_DESC_ADDR_1_DST1 0x4084
+#define DIT_REG_NAT_RX_DESC_ADDR_0_DST2 0x4088
+#define DIT_REG_NAT_RX_DESC_ADDR_1_DST2 0x408C
+#define DIT_REG_NAT_RX_DESC_ADDR_0_DST01 0x4090
+#define DIT_REG_NAT_RX_DESC_ADDR_1_DST01 0x4094
+#define DIT_REG_NAT_RX_DESC_ADDR_0_DST02 0x4098
+#define DIT_REG_NAT_RX_DESC_ADDR_1_DST02 0x409C
+#define DIT_REG_NAT_RX_DESC_ADDR_0_DST03 0x40A0
+#define DIT_REG_NAT_RX_DESC_ADDR_1_DST03 0x40A4
+
+#define DIT_REG_NAT_TTLDEC_EN 0x4150
+
+/* total: DIT_REG_NAT_INTERFACE_NUM_MAX, interval: DIT_REG_NAT_INTERFACE_NUM_INTERVAL */
+#define DIT_REG_NAT_INTERFACE_NUM 0x4200
+
+#define DIT_REG_VERSION 0x9000
+
+#define DIT_REG_NAT_INTERFACE_NUM_MAX (8)
+#define DIT_REG_NAT_INTERFACE_NUM_INTERVAL (4)
+
+enum dit_nat_ttldec_en_bits {
+ TX_TTLDEC_EN_BIT,
+ RX_TTLDEC_EN_BIT,
+};
+
+enum dit_dst_desc_reset_bits {
+ TX_DST_DESC_RESET_BIT,
+ RX_DST_DESC_RESET_BIT,
+};
+
+struct dit_src_desc {
+ u64 src_addr:36,
+ _reserved_0:12,
+ /* the below 16 bits are "private info" on the document */
+ ch_id:8,
+ pre_csum:1, /* checksum successful from pktproc */
+ udp_csum_zero:1, /* reset udp checksum 0 after NAT */
+ _reserved_2:6;
+ u64 length:16,
+ interface:8,
+ _reserved_1:24,
+ control:8,
+ status:8;
+} __packed;
+
+/* DIT_INT_PENDING */
+enum dit_int_pending_bits {
+ TX_DST00_INT_PENDING_BIT = 3,
+ TX_DST0_INT_PENDING_BIT = TX_DST00_INT_PENDING_BIT,
+ TX_DST1_INT_PENDING_BIT = 7,
+ TX_DST2_INT_PENDING_BIT,
+ RX_DST00_INT_PENDING_BIT = 19,
+ RX_DST0_INT_PENDING_BIT = RX_DST00_INT_PENDING_BIT,
+ RX_DST1_INT_PENDING_BIT = 23,
+ RX_DST2_INT_PENDING_BIT,
+ RSVD0_INT_PENDING_BIT = 30,
+ ERR_INT_PENDING_BIT = RSVD0_INT_PENDING_BIT, /* No ERR interrupt */
+};
+
+#define DIT_TX_INT_PENDING_MASK \
+ (BIT(TX_DST00_INT_PENDING_BIT) | BIT(TX_DST1_INT_PENDING_BIT) | \
+ BIT(TX_DST2_INT_PENDING_BIT))
+
+#define DIT_RX_INT_PENDING_MASK \
+ (BIT(RX_DST00_INT_PENDING_BIT) | BIT(RX_DST1_INT_PENDING_BIT) | \
+ BIT(RX_DST2_INT_PENDING_BIT))
+
+enum dit_int_enable_bits {
+ TX_DST00_INT_ENABLE_BIT = 3,
+ TX_DST1_INT_ENABLE_BIT = 7,
+ TX_DST2_INT_ENABLE_BIT,
+ RX_DST00_INT_ENABLE_BIT = 19,
+ RX_DST1_INT_ENABLE_BIT = 23,
+ RX_DST2_INT_ENABLE_BIT,
+};
+
+#define DIT_INT_ENABLE_MASK \
+ (BIT(TX_DST00_INT_ENABLE_BIT) | BIT(TX_DST1_INT_ENABLE_BIT) | \
+ BIT(TX_DST2_INT_ENABLE_BIT) | \
+ BIT(RX_DST00_INT_ENABLE_BIT) | BIT(RX_DST1_INT_ENABLE_BIT) | \
+ BIT(RX_DST2_INT_ENABLE_BIT))
+
+#define DIT_INT_MASK_MASK (0xFFFFFFFF)
+
+#endif /* __DIT_2_2_0_H__ */
diff --git a/dit/dit_common.h b/dit/dit_common.h
new file mode 100644
index 0000000..fdcb82a
--- /dev/null
+++ b/dit/dit_common.h
@@ -0,0 +1,340 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * EXYNOS DIT(Direct IP Translator) Driver support
+ *
+ */
+
+#ifndef __DIT_COMMON_H__
+#define __DIT_COMMON_H__
+
+#ifndef DIT_DEBUG
+#define DIT_DEBUG
+#endif
+
+#ifdef DIT_DEBUG
+#define DIT_DEBUG_LOW
+#endif
+
+#include "dit.h"
+#include "cpif_page.h"
+
+#define DIT_VERSION(x, y, z) \
+ ((((x) & 0xFF) << 24) | (((y) & 0xFF) << 16) | (((z) & 0xFF) << 8))
+
+#if defined(CONFIG_EXYNOS_DIT_VERSION) && (DIT_VERSION(2, 2, 0) == CONFIG_EXYNOS_DIT_VERSION)
+#include "dit_2_2_0.h"
+#else
+#include "dit_2_1_0.h"
+#endif
+
+#define DIT_REG_SW_COMMAND 0x0000
+
+/* total: DIT_REG_CLAT_ADDR_MAX, interval: DIT_REG_CLAT_TX_FILTER_INTERVAL */
+#define DIT_REG_CLAT_TX_FILTER 0x2000
+/* total: DIT_REG_CLAT_ADDR_MAX, interval: DIT_REG_CLAT_TX_PLAT_PREFIX_INTERVAL */
+#define DIT_REG_CLAT_TX_PLAT_PREFIX_0 0x2020
+#define DIT_REG_CLAT_TX_PLAT_PREFIX_1 0x2024
+#define DIT_REG_CLAT_TX_PLAT_PREFIX_2 0x2028
+/* total: DIT_REG_CLAT_ADDR_MAX, interval: DIT_REG_CLAT_TX_CLAT_SRC_INTERVAL */
+#define DIT_REG_CLAT_TX_CLAT_SRC_0 0x2080
+#define DIT_REG_CLAT_TX_CLAT_SRC_1 0x2084
+#define DIT_REG_CLAT_TX_CLAT_SRC_2 0x2088
+#define DIT_REG_CLAT_TX_CLAT_SRC_3 0x208C
+
+/* total: DIT_REG_NAT_LOCAL_ADDR_MAX, interval: DIT_REG_NAT_LOCAL_INTERVAL */
+#define DIT_REG_NAT_LOCAL_ADDR 0x4100
+
+#define DIT_REG_NAT_ZERO_CHK_OFF 0x4144
+#define DIT_REG_NAT_ETHERNET_EN 0x414C
+
+/* total: DIT_REG_NAT_LOCAL_ADDR_MAX, interval: DIT_REG_ETHERNET_MAC_INTERVAL */
+#define DIT_REG_NAT_ETHERNET_DST_MAC_ADDR_0 0x6000 /* 32 bit */
+#define DIT_REG_NAT_ETHERNET_DST_MAC_ADDR_1 0x6004 /* 16 bit */
+#define DIT_REG_NAT_ETHERNET_SRC_MAC_ADDR_0 0x6008 /* 32 bit */
+#define DIT_REG_NAT_ETHERNET_SRC_MAC_ADDR_1 0x600C /* 16 bit */
+#define DIT_REG_NAT_ETHERNET_TYPE 0x6010 /* 16 bit */
+
+#define DIT_REG_NAT_TX_PORT_INIT_START 0x6210
+#define DIT_REG_NAT_TX_PORT_INIT_DONE 0x6214
+#define DIT_REG_NAT_RX_PORT_INIT_START 0x6228
+#define DIT_REG_NAT_RX_PORT_INIT_DONE 0x622C
+
+/* total: DIT_REG_NAT_LOCAL_PORT_MAX, interval: DIT_REG_NAT_LOCAL_INTERVAL */
+#define DIT_REG_NAT_RX_PORT_TABLE_SLOT 0xC000
+
+/* total numbers and intervals */
+#define DIT_REG_NAT_LOCAL_ADDR_MAX (16)
+#define DIT_REG_NAT_LOCAL_PORT_MAX (2048)
+#define DIT_REG_NAT_LOCAL_INTERVAL (4)
+#define DIT_REG_ETHERNET_MAC_INTERVAL (0x20)
+#define DIT_REG_CLAT_ADDR_MAX (8)
+#define DIT_REG_CLAT_TX_FILTER_INTERVAL (4)
+#define DIT_REG_CLAT_TX_PLAT_PREFIX_INTERVAL (12)
+#define DIT_REG_CLAT_TX_CLAT_SRC_INTERVAL (16)
+
+/* macro for DIT register operation */
+#define WRITE_REG_PADDR_LO(dc, paddr, offset) \
+ writel(PADDR_LO(paddr), dc->register_base + offset)
+#define WRITE_REG_PADDR_HI(dc, paddr, offset) \
+ writel(PADDR_HI(paddr), dc->register_base + offset)
+#define WRITE_REG_VALUE(dc, value, offset) \
+ writel(value, dc->register_base + offset)
+#define READ_REG_VALUE(dc, offset) \
+ readl(dc->register_base + offset)
+#define WRITE_SHR_VALUE(dc, value) \
+ ({ \
+ if (!IS_ERR_OR_NULL(dc->sharability_base)) \
+ writel(value, dc->sharability_base + dc->sharability_offset); \
+ })
+#define BACKUP_REG_VALUE(dc, dst, offset, size) \
+ memcpy_fromio(dst, dc->register_base + offset, size)
+#define RESTORE_REG_VALUE(dc, src, offset, size) \
+ memcpy_toio(dc->register_base + offset, src, size)
+
+/* macro for DIT function pointer */
+#define DIT_INDIRECT_CALL(dc, f, ...) \
+ ({ \
+ dc->f ? dc->f(__VA_ARGS__) : -EOPNOTSUPP; \
+ })
+
+#define DIT_RX_BURST_16BEAT (0)
+
+enum dit_desc_ring {
+ DIT_DST_DESC_RING_0,
+ DIT_DST_DESC_RING_1,
+ DIT_DST_DESC_RING_2,
+ DIT_DST_DESC_RING_MAX,
+ DIT_SRC_DESC_RING = DIT_DST_DESC_RING_MAX,
+ DIT_DESC_RING_MAX
+};
+
+enum dit_desc_control_bits {
+ DIT_DESC_C_RESERVED, /* Reserved */
+ DIT_DESC_C_END, /* end packet of LRO */
+ DIT_DESC_C_START, /* first packet of LRO */
+ DIT_DESC_C_RINGEND, /* End of descriptor */
+ DIT_DESC_C_INT, /* Interrupt enabled */
+ DIT_DESC_C_CSUM, /* csum enabled */
+ DIT_DESC_C_TAIL, /* last buffer */
+ DIT_DESC_C_HEAD /* first buffer */
+};
+
+#define DIT_SRC_KICK_CONTROL_MASK \
+ (BIT(DIT_DESC_C_HEAD) | BIT(DIT_DESC_C_TAIL) | \
+ BIT(DIT_DESC_C_INT) | BIT(DIT_DESC_C_RINGEND))
+
+enum dit_desc_status_bits {
+ DIT_DESC_S_DONE, /* DMA done */
+ DIT_DESC_S_RESERVED, /* Reserved */
+ DIT_DESC_S_TCPCF, /* Failed TCP csum */
+ DIT_DESC_S_IPCSF, /* Failed IP csum */
+ DIT_DESC_S_IGNR, /* Ignore csum */
+ DIT_DESC_S_TCPC, /* TCP/UDP csum done: should be 0 if IGNR */
+ DIT_DESC_S_IPCS, /* IP header csum done: should be 0 if IGNR */
+ DIT_DESC_S_PFD /* passed packet filter */
+};
+
+#define DIT_CHECKSUM_FAILED_STATUS_MASK \
+ (BIT(DIT_DESC_S_TCPCF) | BIT(DIT_DESC_S_IPCSF) | BIT(DIT_DESC_S_IGNR))
+
+enum dit_sw_command_bits {
+ DMA_INIT_COMMAND_BIT,
+ TX_COMMAND_BIT,
+ RX_COMMAND_BIT,
+};
+
+enum dit_nat_ethernet_en_bits {
+ TX_ETHERNET_EN_BIT,
+ RX_ETHERNET_EN_BIT,
+};
+
+#define DIT_ALL_INT_PENDING_MASK \
+ (DIT_TX_INT_PENDING_MASK | DIT_RX_INT_PENDING_MASK)
+
+/* DIT_STATUS
+ * zero means idle
+ */
+enum dit_status_mask {
+ TX_STATUS_MASK = 0x0F,
+ RX_STATUS_MASK = 0xF0,
+};
+
+enum dit_packet_info_bits {
+ DIT_PACKET_INFO_UDP_BIT = 6,
+ DIT_PACKET_INFO_TCP_BIT,
+ DIT_PACKET_INFO_IPV6_BIT = 10,
+ DIT_PACKET_INFO_IPV4_BIT,
+};
+
+struct dit_dst_desc {
+ u64 dst_addr:36,
+ packet_info:12,
+ /* the below 16 bits are "private info" on the document */
+ ch_id:8,
+ pre_csum:1,
+ udp_csum_zero:1,
+ _reserved_2:6;
+ u64 length:16,
+ org_port:16,
+ trans_port:16,
+ control:8,
+ status:8;
+} __packed;
+
+struct dit_desc_info {
+ unsigned int src_wp;
+ unsigned int src_rp;
+ unsigned int dst_wp[DIT_DST_DESC_RING_MAX];
+ unsigned int dst_rp[DIT_DST_DESC_RING_MAX];
+
+ unsigned int src_desc_ring_len;
+ struct dit_src_desc *src_desc_ring;
+ struct sk_buff **src_skb_buf;
+ u32 buf_size;
+
+ phys_addr_t pktproc_pbase;
+ u32 pktproc_queue_num;
+ u32 pktproc_desc_len;
+ u32 *pktproc_fore_ptr;
+
+ unsigned int dst_desc_ring_len;
+ struct dit_dst_desc *dst_desc_ring[DIT_DST_DESC_RING_MAX];
+ struct sk_buff **dst_skb_buf[DIT_DST_DESC_RING_MAX];
+ bool dst_skb_buf_filled[DIT_DST_DESC_RING_MAX];
+
+ /* use_dma_map */
+ dma_addr_t src_desc_ring_daddr;
+ dma_addr_t dst_desc_ring_daddr[DIT_DST_DESC_RING_MAX];
+ dma_addr_t *dst_skb_buf_daddr[DIT_DST_DESC_RING_MAX];
+
+ /* page pool */
+ struct cpif_page_pool *dst_page_pool[DIT_DST_DESC_RING_MAX];
+};
+
+struct dit_ctrl_t {
+ struct device *dev;
+ struct link_device *ld;
+ struct net_device *netdev;
+ struct napi_struct napi;
+ int *irq_pending_bit;
+ char const **irq_name;
+ int *irq_buf;
+ int irq_len;
+ int irq_affinity;
+ int irq_num_tx;
+ int irq_affinity_tx;
+ int idle_ip_index;
+
+ void __iomem *register_base;
+ void __iomem *sharability_base;
+ u32 sharability_offset;
+ u32 sharability_value;
+ bool use_dma_map;
+
+ u32 hw_version;
+ u32 reg_version;
+ u32 hw_capabilities;
+ bool use_dir[DIT_DIR_MAX];
+ bool stop_enqueue[DIT_DIR_MAX];
+ bool use_clat;
+ bool hal_support;
+ bool hal_enqueue_rx;
+ u32 rx_extra_desc_ring_len;
+
+ struct dit_desc_info desc_info[DIT_DIR_MAX];
+
+ /* for kicked flag, reg_value_q and init_done */
+ spinlock_t src_lock;
+ bool kicked[DIT_DIR_MAX];
+ bool kick_reserved[DIT_DIR_MAX];
+ struct list_head reg_value_q;
+ bool init_done;
+ bool init_reserved;
+
+ atomic_t init_running;
+
+ bool use_page_recycling_rx;
+ u32 page_recycling_skb_padding;
+
+#if defined(DIT_DEBUG_LOW)
+ int pktgen_ch;
+ int force_bypass;
+#endif
+
+ /* every functions should return int for DIT_INDIRECT_CALL */
+ int (*get_reg_version)(u32 *version);
+ int (*set_reg_upstream)(struct net_device *netdev);
+ int (*set_desc_filter_bypass)(enum dit_direction dir, struct dit_src_desc *src_desc,
+ u8 *src, bool *is_upstream_pkt);
+ int (*set_src_desc_tail)(enum dit_direction dir, struct dit_desc_info *desc_info,
+ unsigned int tail);
+ int (*do_init_hw)(void);
+ int (*do_init_desc)(enum dit_direction dir);
+};
+
+struct dit_snapshot_t {
+ char *name;
+ int head;
+ int tail;
+
+ u64 packets;
+ /* cumulative amount */
+ u64 total_packets;
+ u64 clat_packets;
+
+ u32 max_usage;
+ u32 alloc_skbs;
+ u32 dma_maps;
+};
+
+struct dit_reg_value_item {
+ struct list_head list;
+ u32 value;
+ u32 offset;
+};
+
+struct dit_iface {
+ u8 upstream_ch;
+};
+
+enum dit_dump_bits {
+ DIT_DUMP_SNAPSHOT_BIT,
+ DIT_DUMP_DESC_BIT,
+ DIT_DUMP_PORT_TABLE_BIT,
+ DIT_DUMP_MAX,
+};
+
+#define DIT_DUMP_ALL \
+ (BIT(DIT_DUMP_SNAPSHOT_BIT) | BIT(DIT_DUMP_DESC_BIT) | \
+ BIT(DIT_DUMP_PORT_TABLE_BIT))
+
+enum dit_idle_ip {
+ DIT_IDLE_IP_ACTIVE = 0,
+ DIT_IDLE_IP_IDLE,
+};
+
+/*
+ * if there is 1 src desc and it is at the ring_end,
+ * DIT will reads 3 descs from the ring_end.
+ * for the safety, add additional 2 descs.
+ */
+#define DIT_SRC_DESC_RING_LEN_PADDING (2)
+
+/* prevent zero size alloc */
+#define DIT_DST_DESC_RING_LEN_PADDING (1)
+
+bool dit_is_kicked_any(void);
+int dit_check_dst_ready(enum dit_direction dir, enum dit_desc_ring ring_num);
+int dit_enqueue_reg_value_with_ext_lock(u32 value, u32 offset);
+int dit_enqueue_reg_value(u32 value, u32 offset);
+int dit_read_rx_dst_poll(struct napi_struct *napi, int budget);
+int dit_manage_rx_dst_data_buffers(bool fill);
+bool dit_is_busy(enum dit_direction dir);
+
+int dit_ver_create(struct dit_ctrl_t *dc_ptr);
+
+#endif /* __DIT_COMMON_H__ */
+
diff --git a/dit/dit_hal.c b/dit/dit_hal.c
new file mode 100644
index 0000000..a6a8e67
--- /dev/null
+++ b/dit/dit_hal.c
@@ -0,0 +1,745 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * EXYNOS DIT(Direct IP Translator) Driver support
+ *
+ */
+
+#include <linux/poll.h>
+
+#include "modem_utils.h"
+#include "dit_hal.h"
+
+#define DIT_HAL_STATS_MAX (S64_MAX)
+
+static struct dit_ctrl_t *dc;
+static struct dit_hal_ctrl_t *dhc;
+
+static int dit_hal_open(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static int dit_hal_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static unsigned int dit_hal_poll(struct file *filp, struct poll_table_struct *wait)
+{
+ poll_wait(filp, &dhc->wq, wait);
+
+ if (!list_empty(&dhc->event_q))
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static ssize_t dit_hal_read(struct file *filp, char *buf, size_t count, loff_t *fpos)
+{
+ struct offload_event event;
+ struct offload_event_item *event_item;
+ unsigned long flags;
+
+ if (count < sizeof(event)) {
+ mif_err("not support small buffer size: %zu\n", count);
+ return 0;
+ }
+
+ spin_lock_irqsave(&dhc->event_lock, flags);
+ if (list_empty(&dhc->event_q)) {
+ mif_err("no event\n");
+ spin_unlock_irqrestore(&dhc->event_lock, flags);
+ return 0;
+ }
+
+ event_item = list_first_entry(&dhc->event_q, struct offload_event_item, list);
+ list_del(&event_item->list);
+ spin_unlock_irqrestore(&dhc->event_lock, flags);
+
+ event.event_num = event_item->event_num;
+ kvfree(event_item);
+
+ if (copy_to_user((void __user *)buf, (void *)&event, sizeof(event)))
+ return 0;
+
+ mif_info("event=%d\n", event.event_num);
+
+ return sizeof(event);
+}
+
+static int dit_hal_init(void)
+{
+ int i;
+ struct offload_event_item *event_item;
+ unsigned long flags;
+
+ if (!dc->hal_support) {
+ mif_err("does not support hal\n");
+ return -EPERM;
+ }
+
+ dhc->last_event_num = OFFLOAD_MAX;
+
+ spin_lock_irqsave(&dhc->stats_lock, flags);
+ dhc->stats.data_warning = DIT_HAL_STATS_MAX;
+ dhc->stats.data_limit = DIT_HAL_STATS_MAX;
+ dhc->stats.rx_bytes = 0;
+ dhc->stats.tx_bytes = 0;
+ dhc->stats.rx_diff = 0;
+ dhc->stats.tx_diff = 0;
+ spin_unlock_irqrestore(&dhc->stats_lock, flags);
+
+ for (i = 0; i < DIT_DST_DESC_RING_MAX; i++)
+ dhc->dst_iface[i].iface_set = false;
+
+ spin_lock_irqsave(&dhc->event_lock, flags);
+ while (!list_empty(&dhc->event_q)) {
+ event_item = list_first_entry(&dhc->event_q, struct offload_event_item, list);
+ list_del(&event_item->list);
+ kvfree(event_item);
+ }
+ spin_unlock_irqrestore(&dhc->event_lock, flags);
+
+ return 0;
+}
+
+static int dit_hal_set_event(enum offload_event_num event_num)
+{
+ struct offload_event_item *event_item;
+ unsigned long flags;
+
+ if (event_num == dhc->last_event_num)
+ return -EEXIST;
+
+ event_item = kvzalloc(sizeof(struct offload_event_item), GFP_ATOMIC);
+ if (!event_item) {
+ mif_err("event=%d generation failed\n", event_num);
+ return -ENOMEM;
+ }
+
+ event_item->event_num = event_num;
+ spin_lock_irqsave(&dhc->event_lock, flags);
+ list_add_tail(&event_item->list, &dhc->event_q);
+ spin_unlock_irqrestore(&dhc->event_lock, flags);
+
+ dhc->last_event_num = event_num;
+ wake_up(&dhc->wq);
+
+ return 0;
+}
+
+struct net_device *dit_hal_get_dst_netdev(enum dit_desc_ring ring_num)
+{
+ if (ring_num < DIT_DST_DESC_RING_0 || ring_num >= DIT_DST_DESC_RING_MAX)
+ return NULL;
+
+#if defined(DIT_DEBUG_LOW)
+ if (dc->pktgen_ch && (ring_num == DIT_DST_DESC_RING_0)) {
+ struct io_device *iod;
+
+ iod = link_get_iod_with_channel(dc->ld, dc->pktgen_ch);
+
+ return iod ? iod->ndev : NULL;
+ }
+#endif
+
+ if (dhc && dhc->dst_iface[ring_num].iface_set)
+ return dhc->dst_iface[ring_num].netdev;
+
+ return NULL;
+}
+EXPORT_SYMBOL(dit_hal_get_dst_netdev);
+
+static bool dit_hal_check_data_warning_reached(void)
+{
+ unsigned long flags;
+ bool ret = false;
+
+ if (!dhc)
+ return false;
+
+ spin_lock_irqsave(&dhc->stats_lock, flags);
+ if ((dhc->stats.rx_bytes + dhc->stats.tx_bytes) >= dhc->stats.data_warning) {
+ dhc->stats.data_warning = DIT_HAL_STATS_MAX;
+ ret = true;
+ }
+ spin_unlock_irqrestore(&dhc->stats_lock, flags);
+
+ return ret;
+}
+
+static bool dit_hal_check_data_limit_reached(void)
+{
+ unsigned long flags;
+ bool ret = false;
+
+ if (!dhc)
+ return false;
+
+ spin_lock_irqsave(&dhc->stats_lock, flags);
+ if ((dhc->stats.rx_bytes + dhc->stats.tx_bytes) >= dhc->stats.data_limit)
+ ret = true;
+ spin_unlock_irqrestore(&dhc->stats_lock, flags);
+
+ return ret;
+}
+
+void dit_hal_add_data_bytes(u64 rx_bytes, u64 tx_bytes)
+{
+ unsigned long flags;
+
+ if (!dhc)
+ return;
+
+ spin_lock_irqsave(&dhc->stats_lock, flags);
+ dhc->stats.rx_bytes += rx_bytes;
+ dhc->stats.tx_bytes += tx_bytes;
+ dhc->stats.rx_diff += rx_bytes;
+ dhc->stats.tx_diff += tx_bytes;
+ spin_unlock_irqrestore(&dhc->stats_lock, flags);
+
+ if (dit_hal_check_data_warning_reached())
+ dit_hal_set_event(OFFLOAD_WARNING_REACHED);
+
+ if (dit_hal_check_data_limit_reached())
+ dit_hal_set_event(OFFLOAD_STOPPED_LIMIT_REACHED);
+}
+EXPORT_SYMBOL(dit_hal_add_data_bytes);
+
+static bool dit_hal_get_forwarded_stats(struct forward_stats *stats)
+{
+ unsigned long flags;
+ bool ret = false;
+
+ spin_lock_irqsave(&dhc->stats_lock, flags);
+ if (strncmp(stats->iface, dhc->stats.iface, IFNAMSIZ))
+ goto exit;
+
+ stats->rx_bytes = dhc->stats.rx_bytes;
+ stats->tx_bytes = dhc->stats.tx_bytes;
+ stats->rx_diff = dhc->stats.rx_diff;
+ stats->tx_diff = dhc->stats.tx_diff;
+
+ ret = true;
+
+exit:
+ dhc->stats.rx_diff = 0;
+ dhc->stats.tx_diff = 0;
+ spin_unlock_irqrestore(&dhc->stats_lock, flags);
+ return ret;
+}
+
+/* struct forward_limit is for V1.1 */
+static bool dit_hal_set_data_limit(struct forward_stats *stats,
+ struct forward_limit *limit)
+{
+ unsigned long flags;
+
+ if (!stats && !limit)
+ return false;
+
+ spin_lock_irqsave(&dhc->stats_lock, flags);
+ if (stats) {
+ strlcpy(dhc->stats.iface, stats->iface, IFNAMSIZ);
+ dhc->stats.data_warning = DIT_HAL_STATS_MAX;
+ dhc->stats.data_limit = stats->data_limit;
+ } else {
+ strlcpy(dhc->stats.iface, limit->iface, IFNAMSIZ);
+ dhc->stats.data_warning = limit->data_warning;
+ dhc->stats.data_limit = limit->data_limit;
+ }
+ dhc->stats.rx_bytes = 0;
+ dhc->stats.tx_bytes = 0;
+ spin_unlock_irqrestore(&dhc->stats_lock, flags);
+
+ return true;
+}
+
+static bool dit_hal_check_ready_to_start(void)
+{
+ int i;
+
+ spin_lock(&dhc->hal_lock);
+ if (!dhc->hal_enabled) {
+ spin_unlock(&dhc->hal_lock);
+ return false;
+ }
+ spin_unlock(&dhc->hal_lock);
+
+ if (dit_hal_check_data_limit_reached())
+ return false;
+
+ if (!dhc->dst_iface[DIT_DST_DESC_RING_0].iface_set)
+ return false;
+
+ for (i = DIT_DST_DESC_RING_1; i < DIT_DST_DESC_RING_MAX; i++) {
+ if (dhc->dst_iface[i].iface_set)
+ return true;
+ }
+
+ return false;
+}
+
+static int dit_hal_add_dst_iface(bool is_upstream,
+ struct iface_info *info)
+{
+ enum dit_desc_ring dst_min;
+ enum dit_desc_ring dst_max;
+ int i;
+
+ if (is_upstream) {
+ dst_min = DIT_DST_DESC_RING_0;
+ dst_max = DIT_DST_DESC_RING_1;
+
+ /* set upstream always */
+ dhc->dst_iface[dst_min].iface_set = false;
+ } else {
+ dst_min = DIT_DST_DESC_RING_1;
+ dst_max = DIT_DST_DESC_RING_MAX;
+
+ /* check duplication */
+ for (i = dst_min; i < dst_max; i++) {
+ if (strncmp(info->iface, dhc->dst_iface[i].iface, IFNAMSIZ))
+ continue;
+
+ dhc->dst_iface[i].netdev =
+ dev_get_by_name(&init_net, info->iface);
+ if (dhc->dst_iface[i].netdev)
+ dhc->dst_iface[i].iface_set = true;
+
+ info->dst_ring = i;
+ return i;
+ }
+ }
+
+ if (strlen(info->iface) == 0)
+ return -1;
+
+ /* find empty space */
+ for (i = dst_min; i < dst_max; i++) {
+ if (dhc->dst_iface[i].iface_set)
+ continue;
+
+ strlcpy(dhc->dst_iface[i].iface, info->iface, IFNAMSIZ);
+ dhc->dst_iface[i].netdev =
+ dev_get_by_name(&init_net, info->iface);
+ if (dhc->dst_iface[i].netdev)
+ dhc->dst_iface[i].iface_set = true;
+
+ info->dst_ring = i;
+ return i;
+ }
+
+ return -1;
+}
+
+static void dit_hal_remove_dst_iface(bool is_upstream,
+ struct iface_info *info)
+{
+ enum dit_desc_ring dst_min;
+ enum dit_desc_ring dst_max;
+ int i;
+
+ if (is_upstream) {
+ dhc->dst_iface[DIT_DST_DESC_RING_0].iface_set = false;
+ return;
+ }
+
+ dst_min = DIT_DST_DESC_RING_1;
+ dst_max = DIT_DST_DESC_RING_MAX;
+
+ for (i = dst_min; i < dst_max; i++) {
+ if (!dhc->dst_iface[i].iface_set ||
+ strncmp(info->iface, dhc->dst_iface[i].iface, IFNAMSIZ))
+ continue;
+
+ dhc->dst_iface[i].iface_set = false;
+ dev_put(dhc->dst_iface[i].netdev);
+ break;
+ }
+}
+
+static bool dit_hal_set_local_addr(struct nat_local_addr *local_addr)
+{
+ struct net_device *netdev;
+ struct dev_addr_map *devaddr;
+ unsigned long flags;
+ int ret = false;
+
+ spin_lock_irqsave(&dc->src_lock, flags);
+ /* local IP addr */
+ if (dit_enqueue_reg_value_with_ext_lock(local_addr->addr,
+ DIT_REG_NAT_LOCAL_ADDR +
+ (local_addr->index * DIT_REG_NAT_LOCAL_INTERVAL)) < 0)
+ goto exit;
+
+ /* addr can be 0 when remove */
+ if (!local_addr->addr) {
+ ret = true;
+ goto exit;
+ }
+
+ /* DST dev addr */
+ if (dit_enqueue_reg_value_with_ext_lock(local_addr->dev_addr_l,
+ DIT_REG_NAT_ETHERNET_DST_MAC_ADDR_0 +
+ (local_addr->index * DIT_REG_ETHERNET_MAC_INTERVAL)) < 0)
+ goto exit;
+ if (dit_enqueue_reg_value_with_ext_lock((u32) local_addr->dev_addr_h,
+ DIT_REG_NAT_ETHERNET_DST_MAC_ADDR_1 +
+ (local_addr->index * DIT_REG_ETHERNET_MAC_INTERVAL)) < 0)
+ goto exit;
+
+ /* SRC dev addr */
+ netdev = dit_hal_get_dst_netdev(local_addr->dst_ring);
+ if (!netdev) {
+ mif_err("failed to get local dev addr for 0x%08X\n",
+ ntohl(local_addr->addr));
+ goto exit;
+ }
+
+ devaddr = (struct dev_addr_map *) netdev->dev_addr;
+ if (dit_enqueue_reg_value_with_ext_lock(devaddr->dev_addr_l,
+ DIT_REG_NAT_ETHERNET_SRC_MAC_ADDR_0 +
+ (local_addr->index * DIT_REG_ETHERNET_MAC_INTERVAL)) < 0)
+ goto exit;
+ if (dit_enqueue_reg_value_with_ext_lock((u32) devaddr->dev_addr_h,
+ DIT_REG_NAT_ETHERNET_SRC_MAC_ADDR_1 +
+ (local_addr->index * DIT_REG_ETHERNET_MAC_INTERVAL)) < 0)
+ goto exit;
+
+ /* IPv4 only */
+ if (dit_enqueue_reg_value_with_ext_lock((u32) htons(ETH_P_IP),
+ DIT_REG_NAT_ETHERNET_TYPE +
+ (local_addr->index * DIT_REG_ETHERNET_MAC_INTERVAL)) < 0)
+ goto exit;
+
+ ret = true;
+
+exit:
+ spin_unlock_irqrestore(&dc->src_lock, flags);
+ return ret;
+}
+
+static bool dit_hal_set_local_port(struct nat_local_port *local_port)
+{
+ spin_lock(&dhc->hal_lock);
+ if (!dhc->hal_enabled) {
+ spin_unlock(&dhc->hal_lock);
+ mif_err("hal is not enabled\n");
+ return false;
+ }
+
+ dit_enqueue_reg_value(local_port->hw_val,
+ DIT_REG_NAT_RX_PORT_TABLE_SLOT +
+ (local_port->reply_port_dst_l * DIT_REG_NAT_LOCAL_INTERVAL));
+ spin_unlock(&dhc->hal_lock);
+
+ return true;
+}
+
+bool dit_hal_set_clat_info(struct mem_link_device *mld, struct clat_info *clat)
+{
+ unsigned int offset;
+ unsigned long flags;
+ bool ret = false;
+
+ if (!dc->use_clat || !dc->ld)
+ return false;
+
+ spin_lock_irqsave(&dc->src_lock, flags);
+ /* IPv4 addr of TUN device */
+ offset = clat->clat_index * DIT_REG_CLAT_TX_FILTER_INTERVAL;
+ if (dit_enqueue_reg_value_with_ext_lock(clat->ipv4_local_subnet.s_addr,
+ DIT_REG_CLAT_TX_FILTER + offset) < 0)
+ goto exit;
+
+ /* IPv6 addr for TUN device */
+ offset = clat->clat_index * DIT_REG_CLAT_TX_CLAT_SRC_INTERVAL;
+ if (dit_enqueue_reg_value_with_ext_lock(clat->ipv6_local_subnet.s6_addr32[0],
+ DIT_REG_CLAT_TX_CLAT_SRC_0 + offset) < 0)
+ goto exit;
+ if (dit_enqueue_reg_value_with_ext_lock(clat->ipv6_local_subnet.s6_addr32[1],
+ DIT_REG_CLAT_TX_CLAT_SRC_1 + offset) < 0)
+ goto exit;
+ if (dit_enqueue_reg_value_with_ext_lock(clat->ipv6_local_subnet.s6_addr32[2],
+ DIT_REG_CLAT_TX_CLAT_SRC_2 + offset) < 0)
+ goto exit;
+ if (dit_enqueue_reg_value_with_ext_lock(clat->ipv6_local_subnet.s6_addr32[3],
+ DIT_REG_CLAT_TX_CLAT_SRC_3 + offset) < 0)
+ goto exit;
+
+ /* PLAT prefix */
+ offset = clat->clat_index * DIT_REG_CLAT_TX_PLAT_PREFIX_INTERVAL;
+ if (dit_enqueue_reg_value_with_ext_lock(clat->plat_subnet.s6_addr32[0],
+ DIT_REG_CLAT_TX_PLAT_PREFIX_0 + offset) < 0)
+ goto exit;
+ if (dit_enqueue_reg_value_with_ext_lock(clat->plat_subnet.s6_addr32[1],
+ DIT_REG_CLAT_TX_PLAT_PREFIX_1 + offset) < 0)
+ goto exit;
+ if (dit_enqueue_reg_value_with_ext_lock(clat->plat_subnet.s6_addr32[2],
+ DIT_REG_CLAT_TX_PLAT_PREFIX_2 + offset) < 0)
+ goto exit;
+
+ ret = true;
+
+ /* set clat_ndev with clat registers */
+ if (clat->ipv4_iface[0])
+ iodevs_for_each(dc->ld->msd, mld->tc->set_iod_clat_netdev, clat);
+
+exit:
+ spin_unlock_irqrestore(&dc->src_lock, flags);
+
+ /* clear clat_ndev but take a delay to prevent null ndev */
+ if (ret && !clat->ipv4_iface[0]) {
+ msleep(100);
+ iodevs_for_each(dc->ld->msd, mld->tc->set_iod_clat_netdev, clat);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(dit_hal_set_clat_info);
+
+static void dit_hal_try_stop_enqueue_rx(bool stop)
+{
+ if (unlikely(!dc))
+ return;
+
+ if (dc->hal_enqueue_rx)
+ dc->stop_enqueue[DIT_DIR_RX] = stop;
+}
+
+static void dit_hal_set_reg_upstream(void)
+{
+ struct net_device *netdev = NULL;
+
+ if (unlikely(!dc))
+ return;
+
+ if (dhc->dst_iface[DIT_DST_DESC_RING_0].iface_set)
+ netdev = dhc->dst_iface[DIT_DST_DESC_RING_0].netdev;
+
+ DIT_INDIRECT_CALL(dc, set_reg_upstream, netdev);
+}
+
+static long dit_hal_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct iface_info info;
+ struct forward_stats stats;
+ struct forward_limit limit;
+ struct nat_local_addr local_addr;
+ struct nat_local_port local_port;
+ struct hw_info hw;
+ int ret;
+
+ if (unlikely(!dc) || unlikely(!dc->ld))
+ return -EPERM;
+
+ if (!dhc)
+ return -EPERM;
+
+ switch (cmd) {
+ case OFFLOAD_IOCTL_INIT_OFFLOAD:
+ mif_info("hal init\n");
+
+ ret = dit_hal_init();
+ if (ret) {
+ mif_err("hal init failed. ret: %d\n", ret);
+ return ret;
+ }
+ mutex_lock(&dhc->ioctl_lock);
+ ret = dit_manage_rx_dst_data_buffers(true);
+ mutex_unlock(&dhc->ioctl_lock);
+ if (ret) {
+ mif_err("hal buffer fill failed. ret: %d\n", ret);
+ return ret;
+ }
+
+ dit_hal_try_stop_enqueue_rx(false);
+ spin_lock(&dhc->hal_lock);
+ dhc->hal_enabled = true;
+ spin_unlock(&dhc->hal_lock);
+ break;
+ case OFFLOAD_IOCTL_STOP_OFFLOAD:
+ mif_info("hal stopped\n");
+
+ spin_lock(&dhc->hal_lock);
+ dhc->hal_enabled = false;
+ spin_unlock(&dhc->hal_lock);
+ dit_hal_try_stop_enqueue_rx(true);
+
+ dit_hal_set_event(INTERNAL_OFFLOAD_STOPPED);
+
+ /* init port table and take a delay for the prior kick */
+ dit_init(NULL, DIT_INIT_NORMAL, DIT_STORE_NONE);
+ msleep(100);
+ mutex_lock(&dhc->ioctl_lock);
+ ret = dit_manage_rx_dst_data_buffers(false);
+ mutex_unlock(&dhc->ioctl_lock);
+ if (ret)
+ mif_err("hal buffer free. ret: %d\n", ret);
+
+ /* don't call dit_hal_init() here for the last event delivery */
+ break;
+ case OFFLOAD_IOCTL_GET_FORWD_STATS:
+ if (copy_from_user(&stats, (const void __user *)arg,
+ sizeof(struct forward_stats)))
+ return -EFAULT;
+
+ if (!dit_hal_get_forwarded_stats(&stats))
+ return -EINVAL;
+
+ if (copy_to_user((void __user *)arg, (void *)&stats,
+ sizeof(struct forward_stats)))
+ return -EFAULT;
+ break;
+ case OFFLOAD_IOCTL_SET_DATA_LIMIT:
+ if (copy_from_user(&stats, (const void __user *)arg,
+ sizeof(struct forward_stats)))
+ return -EFAULT;
+
+ if (!dit_hal_set_data_limit(&stats, NULL))
+ return -EINVAL;
+ break;
+ case OFFLOAD_IOCTL_SET_DATA_WARNING_LIMIT:
+ if (copy_from_user(&limit, (const void __user *)arg,
+ sizeof(struct forward_limit)))
+ return -EFAULT;
+
+ if (!dit_hal_set_data_limit(NULL, &limit))
+ return -EINVAL;
+ break;
+ case OFFLOAD_IOCTL_SET_UPSTRM_PARAM:
+ if (copy_from_user(&info, (const void __user *)arg,
+ sizeof(struct iface_info)))
+ return -EFAULT;
+
+ /* hal can remove upstream by null iface name */
+ ret = dit_hal_add_dst_iface(true, &info);
+ dit_hal_set_reg_upstream();
+ if (ret < 0) {
+ dit_hal_set_event(OFFLOAD_STOPPED_ERROR);
+ break;
+ }
+
+ if (dit_hal_check_ready_to_start())
+ dit_hal_set_event(OFFLOAD_STARTED);
+ break;
+ case OFFLOAD_IOCTL_ADD_DOWNSTREAM:
+ if (copy_from_user(&info, (const void __user *)arg,
+ sizeof(struct iface_info)))
+ return -EFAULT;
+
+ if (dit_hal_add_dst_iface(false, &info) < 0)
+ return -ENOSPC;
+ if (dit_hal_check_ready_to_start())
+ dit_hal_set_event(OFFLOAD_STARTED);
+
+ if (copy_to_user((void __user *)arg, (void *)&info,
+ sizeof(struct iface_info)))
+ return -EFAULT;
+ break;
+ case OFFLOAD_IOCTL_REMOVE_DOWNSTRM:
+ if (copy_from_user(&info, (const void __user *)arg,
+ sizeof(struct iface_info)))
+ return -EFAULT;
+
+ msleep(100);
+ dit_hal_remove_dst_iface(false, &info);
+ if (!dit_hal_check_ready_to_start())
+ dit_hal_set_event(OFFLOAD_STOPPED_ERROR);
+ break;
+ /* ToDo: need to implement */
+ case OFFLOAD_IOCTL_SET_LOCAL_PRFIX:
+ break;
+ case OFFLOAD_IOCTL_SET_NAT_LOCAL_ADDR:
+ if (copy_from_user(&local_addr, (const void __user *)arg,
+ sizeof(struct nat_local_addr)))
+ return -EFAULT;
+
+ if (!dit_hal_set_local_addr(&local_addr))
+ return -EINVAL;
+ break;
+ case OFFLOAD_IOCTL_SET_NAT_LOCAL_PORT:
+ if (copy_from_user(&local_port, (const void __user *)arg,
+ sizeof(struct nat_local_port)))
+ return -EFAULT;
+
+ if (!dit_hal_set_local_port(&local_port))
+ return -EINVAL;
+ break;
+ case OFFLOAD_IOCTL_GET_HW_INFO:
+ hw.version = dc->hw_version;
+ hw.capabilities = dc->hw_capabilities;
+ if (copy_to_user((void __user *)arg, (void *)&hw,
+ sizeof(struct hw_info)))
+ return -EFAULT;
+ break;
+ default:
+ mif_err("unknown command: 0x%X\n", cmd);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct file_operations dit_hal_fops = {
+ .owner = THIS_MODULE,
+ .open = dit_hal_open,
+ .poll = dit_hal_poll,
+ .read = dit_hal_read,
+ .release = dit_hal_release,
+ .compat_ioctl = dit_hal_ioctl,
+ .unlocked_ioctl = dit_hal_ioctl,
+};
+
+static struct miscdevice dit_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = DIT_HAL_DEV_NAME,
+ .fops = &dit_hal_fops,
+};
+
+int dit_hal_create(struct dit_ctrl_t *dc_ptr)
+{
+ int ret = 0;
+
+ if (!dc_ptr) {
+ mif_err("dc not valid\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ dc = dc_ptr;
+
+ dhc = devm_kzalloc(dc->dev, sizeof(struct dit_hal_ctrl_t), GFP_KERNEL);
+ if (!dhc) {
+ mif_err("dit hal ctrl alloc failed\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ init_waitqueue_head(&dhc->wq);
+ INIT_LIST_HEAD(&dhc->event_q);
+ spin_lock_init(&dhc->hal_lock);
+ spin_lock_init(&dhc->event_lock);
+ spin_lock_init(&dhc->stats_lock);
+ mutex_init(&dhc->ioctl_lock);
+
+ ret = misc_register(&dit_misc);
+ if (ret) {
+ mif_err("misc register error\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ if (dhc && dc) {
+ devm_kfree(dc->dev, dhc);
+ dhc = NULL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(dit_hal_create);
diff --git a/dit/dit_hal.h b/dit/dit_hal.h
new file mode 100644
index 0000000..6d88865
--- /dev/null
+++ b/dit/dit_hal.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * EXYNOS DIT(Direct IP Translator) Driver support
+ *
+ */
+
+#ifndef __DIT_HAL_H__
+#define __DIT_HAL_H__
+
+#include "dit_common.h"
+
+#define DIT_HAL_DEV_NAME "dit2"
+
+struct iface_info {
+ char iface[IFNAMSIZ];
+ u16 dst_ring;
+} __packed;
+
+struct forward_stats {
+ char iface[IFNAMSIZ];
+ u64 data_limit;
+ u64 rx_bytes;
+ u64 tx_bytes;
+ u64 rx_diff;
+ u64 tx_diff;
+} __packed;
+
+/* For tetheroffload hal service V1.1 */
+struct forward_limit {
+ char iface[IFNAMSIZ];
+ u64 data_warning;
+ u64 data_limit;
+} __packed;
+
+struct nat_local_addr {
+ u16 index;
+ u8 dst_ring;
+ __be32 addr;
+ union {
+ u8 dev_addr[ETH_ALEN];
+ struct {
+ u32 dev_addr_l;
+ u16 dev_addr_h;
+ };
+ };
+} __packed;
+
+struct nat_local_port {
+ u16 reply_port_dst_l; /* an index of table */
+ union {
+ struct {
+ u32 enable:1,
+ reply_port_dst_h:8,
+ origin_port_src:16,
+ addr_index:4,
+ dst_ring:2,
+ is_udp:1;
+ };
+ u32 hw_val;
+ };
+} __packed;
+
+struct hw_info {
+ u32 version;
+ u32 capabilities;
+} __packed;
+
+#define OFFLOAD_IOC_MAGIC ('D')
+
+#define OFFLOAD_IOCTL_INIT_OFFLOAD _IO(OFFLOAD_IOC_MAGIC, 0x00)
+#define OFFLOAD_IOCTL_STOP_OFFLOAD _IO(OFFLOAD_IOC_MAGIC, 0x01)
+#define OFFLOAD_IOCTL_SET_LOCAL_PRFIX _IO(OFFLOAD_IOC_MAGIC, 0x02)
+#define OFFLOAD_IOCTL_GET_FORWD_STATS _IOWR(OFFLOAD_IOC_MAGIC, 0x03, struct forward_stats)
+#define OFFLOAD_IOCTL_SET_DATA_LIMIT _IOW(OFFLOAD_IOC_MAGIC, 0x04, struct forward_stats)
+#define OFFLOAD_IOCTL_SET_UPSTRM_PARAM _IOW(OFFLOAD_IOC_MAGIC, 0x05, struct iface_info)
+#define OFFLOAD_IOCTL_ADD_DOWNSTREAM _IOWR(OFFLOAD_IOC_MAGIC, 0x06, struct iface_info)
+#define OFFLOAD_IOCTL_REMOVE_DOWNSTRM _IOW(OFFLOAD_IOC_MAGIC, 0x07, struct iface_info)
+#define OFFLOAD_IOCTL_SET_DATA_WARNING_LIMIT _IOW(OFFLOAD_IOC_MAGIC, 0x08, struct forward_limit)
+
+#define OFFLOAD_IOCTL_SET_NAT_LOCAL_ADDR _IOW(OFFLOAD_IOC_MAGIC, 0x20, struct nat_local_addr)
+#define OFFLOAD_IOCTL_SET_NAT_LOCAL_PORT _IOW(OFFLOAD_IOC_MAGIC, 0x21, struct nat_local_port)
+
+/* mandatory */
+#define OFFLOAD_IOCTL_GET_HW_INFO _IOR(OFFLOAD_IOC_MAGIC, 0xE0, struct hw_info)
+
+enum offload_event_num {
+ OFFLOAD_STARTED = 1,
+ OFFLOAD_STOPPED_ERROR = 2,
+ OFFLOAD_STOPPED_UNSUPPORTED = 3,
+ OFFLOAD_SUPPORT_AVAILABLE = 4,
+ OFFLOAD_STOPPED_LIMIT_REACHED = 5,
+ OFFLOAD_WARNING_REACHED = 6,
+
+ /* OEM defined event */
+ INTERNAL_OFFLOAD_STOPPED = 5000,
+ OFFLOAD_MAX = S32_MAX,
+};
+
+struct offload_event {
+ s32 event_num;
+} __packed;
+
+struct offload_event_item {
+ struct list_head list;
+ enum offload_event_num event_num;
+};
+
+struct dev_addr_map {
+ u32 dev_addr_l;
+ u16 dev_addr_h;
+} __packed;
+
+struct dit_hal_dst_iface {
+ bool iface_set;
+ char iface[IFNAMSIZ];
+ struct net_device *netdev;
+};
+
+struct dit_hal_stats {
+ char iface[IFNAMSIZ];
+ u64 data_warning;
+ u64 data_limit;
+ u64 rx_bytes;
+ u64 tx_bytes;
+ u64 rx_diff;
+ u64 tx_diff;
+};
+
+struct dit_hal_ctrl_t {
+ bool hal_enabled;
+ spinlock_t hal_lock;
+
+ struct dit_hal_dst_iface dst_iface[DIT_DST_DESC_RING_MAX];
+
+ struct dit_hal_stats stats;
+ spinlock_t stats_lock;
+
+ enum offload_event_num last_event_num;
+ wait_queue_head_t wq;
+ struct list_head event_q;
+ spinlock_t event_lock;
+ struct mutex ioctl_lock;
+};
+
+int dit_hal_create(struct dit_ctrl_t *dc_ptr);
+struct net_device *dit_hal_get_dst_netdev(enum dit_desc_ring ring_num);
+void dit_hal_add_data_bytes(u64 rx_bytes, u64 tx_bytes);
+
+#endif /* __DIT_HAL_H__ */
+
diff --git a/dit/dit_net.c b/dit/dit_net.c
new file mode 100644
index 0000000..2fdd83d
--- /dev/null
+++ b/dit/dit_net.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * EXYNOS DIT(Direct IP Translator) Driver support
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/ip.h>
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+
+#include "modem_utils.h"
+#include "dit_net.h"
+
+int dit_net_receive_skb(struct sk_buff *skb)
+{
+ return 0;
+}
+
+static int dit_net_open(struct net_device *dev)
+{
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int dit_net_stop(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+
+ return 0;
+}
+
+static const struct net_device_ops dit_net_ops = {
+ .ndo_open = dit_net_open,
+ .ndo_stop = dit_net_stop,
+ // .ndo_start_xmit = vnet_xmit,
+};
+
+static void dit_net_setup(struct net_device *dev)
+{
+ dev->netdev_ops = &dit_net_ops;
+ dev->header_ops = 0;
+ dev->type = ARPHRD_RAWIP;
+ dev->flags = 0;
+ dev->addr_len = 0;
+ dev->hard_header_len = 0;
+ dev->tx_queue_len = 1000;
+ dev->mtu = MIF_BUFF_DEFAULT_CELL_SIZE;
+ dev->watchdog_timeo = 1000;
+}
+
+int dit_net_init(struct dit_ctrl_t *dc)
+{
+ struct net_device *dev;
+ struct dit_net_priv *priv;
+ int ret;
+
+ if (dc->netdev)
+ return 0;
+
+ dev = alloc_netdev(sizeof(struct dit_net_priv),
+ DIT_NET_DEV_NAME,
+ NET_NAME_UNKNOWN,
+ dit_net_setup);
+ if (!dev) {
+ mif_err("dit net dev alloc failed\n");
+ return -ENOMEM;
+ }
+
+ ret = register_netdev(dev);
+ if (ret) {
+ mif_err("unable to register dit netdev rc=%d\n", ret);
+ return ret;
+ }
+
+ dc->netdev = dev;
+ priv = netdev_priv(dev);
+ priv->dc = dc;
+
+ netif_napi_add_weight(dc->netdev, &dc->napi, dit_read_rx_dst_poll, NAPI_POLL_WEIGHT);
+ napi_enable(&dc->napi);
+
+ return 0;
+}
+EXPORT_SYMBOL(dit_net_init);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Samsung DIT Net Driver");
+
diff --git a/dit/dit_net.h b/dit/dit_net.h
new file mode 100644
index 0000000..2b0d135
--- /dev/null
+++ b/dit/dit_net.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * EXYNOS DIT(Direct IP Translator) Driver support
+ *
+ */
+
+#ifndef __DIT_NET_H__
+#define __DIT_NET_H__
+
+#define DIT_NET_DEV_NAME "dit%d"
+
+#include "dit_common.h"
+
+struct dit_net_priv {
+ struct dit_ctrl_t *dc;
+};
+
+int dit_net_init(struct dit_ctrl_t *dc);
+
+#endif /* __DIT_NET_H__ */
+
diff --git a/hook.c b/hook.c
new file mode 100644
index 0000000..0b931d4
--- /dev/null
+++ b/hook.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * tracepoint hook handling
+ *
+ * Copyright (C) 2021 Samsung Electronics Co., Ltd
+ *
+ */
+
+#include <trace/hooks/sched.h>
+#include "../../../kernel/sched/sched.h"
+
+#define TASK_VENDOR 0x2000
+/******************************************************************************
+ * tracepoint of Android vendor hook *
+ ******************************************************************************/
+static void cpif_hook_do_wake_up_sync(void *data,
+ struct wait_queue_head *wq_head, int *done)
+{
+#ifdef WF_ANDROID_VENDOR
+ *done = 1;
+ __wake_up_sync_key(wq_head, TASK_INTERRUPTIBLE | TASK_VENDOR,
+ poll_to_key(EPOLLIN | EPOLLPRI | EPOLLRDNORM | EPOLLRDBAND));
+#endif
+}
+
+static void cpif_hook_set_wake_flags(void *data,
+ int *wake_flags, unsigned int *mode)
+{
+#ifdef WF_ANDROID_VENDOR
+ if (*mode & TASK_VENDOR) {
+ *mode &= ~TASK_VENDOR;
+ *wake_flags = WF_ANDROID_VENDOR;
+ }
+#endif
+}
+
+int hook_init(void)
+{
+ int ret;
+
+ ret = register_trace_android_vh_do_wake_up_sync(cpif_hook_do_wake_up_sync, NULL);
+ if (ret)
+ return ret;
+
+ ret = register_trace_android_vh_set_wake_flags(cpif_hook_set_wake_flags, NULL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(hook_init);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Samsung CPIF vendor hook driver");
diff --git a/include/circ_queue.h b/include/circ_queue.h
new file mode 100644
index 0000000..acaa6fe
--- /dev/null
+++ b/include/circ_queue.h
@@ -0,0 +1,282 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * @file circ_queue.h
+ * @brief header file for general circular queue operations
+ * @date 2014/02/18
+ * @author Hankook Jang (hankook.jang@samsung.com)
+ */
+
+/*
+ * Copyright (C) 2010 Samsung Electronics.
+ *
+ */
+
+#ifndef __MODEM_CIRCULAR_QUEUE_H__
+#define __MODEM_CIRCULAR_QUEUE_H__
+
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+/*
+ * @brief the structure for a circular queue in a memory-type interface
+ */
+struct circ_queue {
+ spinlock_t lock;
+
+ /*
+ * the flag and counter for checking busy status of a circualr queue
+ */
+ atomic_t busy;
+
+ /*
+ * the start address of the data buffer in a circualr queue
+ */
+ void __iomem *buff;
+
+ /*
+ * the size of the data buffer in a circular queue
+ */
+ unsigned int size;
+
+ /*
+ * the pointer to the "HEAD (IN)" variable that contains a byte offset
+ * from @b @@buff
+ */
+ void __iomem *head;
+
+ /*
+ * the pointer to the "TAIL (OUT)" variable that contains a byte offset
+ * from @b @@buff
+ */
+ void __iomem *tail;
+};
+
+/*
+ * @brief get the start address of the data buffer in a circular queue
+ * @param q the pointer to a circular queue
+ * @return the start address of the data buffer in the @e @@q
+ */
+static inline char *get_buff(struct circ_queue *q)
+{
+ return q->buff;
+}
+
+/*
+ * @brief get the size of the data buffer in a circular queue
+ * @param q the pointer to a circular queue
+ * @return the size of the data buffer in the @e @@q
+ */
+static inline unsigned int get_size(struct circ_queue *q)
+{
+ return q->size;
+}
+
+/*
+ * @brief get the "HEAD (IN)" pointer value of a circular queue
+ * @param q the pointer to a circular queue
+ * @return the "HEAD (IN)" pointer value of the @e @@q
+ */
+static inline unsigned int get_head(struct circ_queue *q)
+{
+ return ioread32(q->head);
+}
+
+/*
+ * @brief set the "HEAD (IN)" pointer value of a circular queue with @b
+ * @@in
+ * @param q the pointer to a circular queue
+ * @param in the value to be stored into the "HEAD (IN)" pointer
+ */
+static inline void set_head(struct circ_queue *q, unsigned int in)
+{
+ iowrite32(in, q->head);
+}
+
+/*
+ * @brief get the "TAIL (OUT)" pointer value of a circular queue
+ * @param q the pointer to a circular queue
+ * @return the "TAIL (OUT)" pointer value of the @e @@q
+ */
+static inline unsigned int get_tail(struct circ_queue *q)
+{
+ return ioread32(q->tail);
+}
+
+/*
+ * @brief set the "TAIL (OUT)" pointer value of a circular queue with @e
+ * @@out
+ * @param q the pointer to a circular queue
+ * @param out the value to be stored into the "TAIL (OUT)" pointer
+ */
+static inline void set_tail(struct circ_queue *q, unsigned int out)
+{
+ iowrite32(out, q->tail);
+}
+
+/*
+ * @brief check whether or not both "IN" and "OUT" pointer values are valid
+ * @param qsize the size of the data buffer in a circular queue
+ * @param in the value of the "HEAD (IN)" pointer
+ * @param out the value of the "TAIL (OUT)" pointer
+ * @retval "true" if all pointer values are valid
+ * @retval "false" if either IN or OUT pointer value is NOT valid
+ */
+static inline bool circ_valid(unsigned int qsize,
+ unsigned int in,
+ unsigned int out)
+{
+ if (unlikely(in >= qsize))
+ return false;
+
+ if (unlikely(out >= qsize))
+ return false;
+
+ return true;
+}
+
+/*
+ * @brief check whether or not a circular queue is empty
+ * @param in the value of the "HEAD (IN)" pointer
+ * @param out the value of the "TAIL (OUT)" pointer
+ * @retval "true" if a circular queue is empty
+ * @retval "false" if a circular queue is NOT empty
+ */
+static inline bool circ_empty(unsigned int in, unsigned int out)
+{
+ return (in == out);
+}
+
+/*
+ * @brief get the size of free space in a circular queue
+ * @param qsize the size of the data buffer in a circular queue
+ * @param in the value of the "HEAD (IN)" pointer
+ * @param out the value of the "TAIL (OUT)" pointer
+ * @return the size of free space in a circular queue
+ */
+static inline unsigned int circ_get_space(unsigned int qsize,
+ unsigned int in,
+ unsigned int out)
+{
+ return (in < out) ? (out - in - 1) : (qsize + out - in - 1);
+}
+
+static inline bool circ_full(unsigned int qsize, unsigned int in,
+ unsigned int out)
+{
+ return (circ_get_space(qsize, in, out) == 0);
+}
+
+/*
+ * @brief get the size of data in a circular queue
+ * @param qsize the size of the data buffer in a circular queue
+ * @param in the value of the "HEAD (IN)" pointer
+ * @param out the value of the "TAIL (OUT)" pointer
+ * @return the size of data in a circular queue
+ */
+static inline unsigned int circ_get_usage(unsigned int qsize,
+ unsigned int in,
+ unsigned int out)
+{
+ return (in >= out) ? (in - out) : (qsize - out + in);
+}
+
+/*
+ * @brief calculate a new pointer value for a circular queue
+ * @param qsize the size of the data buffer in a circular queue
+ * @param p the old value of a queue pointer
+ * @param len the length to be added to the @e @@p pointer value
+ * @return the new value for the queue pointer
+ */
+static inline unsigned int circ_new_ptr(unsigned int qsize,
+ unsigned int p,
+ unsigned int len)
+{
+ unsigned int np = (p + len);
+
+ while (np >= qsize)
+ np -= qsize;
+ return np;
+}
+
+/*
+ * @brief calculate a previous pointer value for a circular queue
+ * @param qsize the size of the data buffer in a circular queue
+ * @param p the old value of a queue pointer
+ * @param len the length to be prior to the @e @@p pointer value
+ * @return the new value for the queue pointer
+ */
+static inline unsigned int circ_prev_ptr(unsigned int qsize,
+ unsigned int p,
+ unsigned int len)
+{
+ int np = ((int)p - len);
+
+ while (np < 0)
+ np += qsize;
+ return (unsigned int)np;
+}
+
+/*
+ * @brief copy the data in a circular queue to a local buffer
+ * @param dst the start address of the local buffer
+ * @param src the start address of the data buffer in a circular queue
+ * @param qsize the size of the data buffer in a circular queue
+ * @param out the offset in the data buffer to be read
+ * @param len the length of data to be read
+ * @remark This function should be invoked after checking the data length.
+ */
+static inline void circ_read(u8 *dst, u8 *src, unsigned int qsize,
+ unsigned int out, unsigned int len)
+{
+ if ((out + len) <= qsize) {
+ /* ----- (out) (in) ----- */
+ /* ----- 7f 00 00 7e ----- */
+ memcpy(dst, (src + out), len);
+ } else {
+ unsigned int len1;
+
+ /* (in) ----------- (out) */
+ /* 00 7e ----------- 7f 00 */
+
+ /* 1) data start (out) ~ buffer end */
+ len1 = qsize - out;
+ memcpy(dst, (src + out), len1);
+
+ /* 2) buffer start ~ data end (in?) */
+ memcpy((dst + len1), src, (len - len1));
+ }
+}
+
+/*
+ * @brief copy the data in a local buffer to a circular queue
+ * @param dst the start address of the data buffer in a circular queue
+ * @param src the start address of the data in a local buffer
+ * @param qsize the size of the data buffer in a circular queue
+ * @param in the offset in the data buffer for the data to be stored
+ * @param len the length of data to be stored
+ * @remark This function should be invoked after checking the free space.
+ */
+static inline void circ_write(u8 *dst, u8 *src, unsigned int qsize,
+ unsigned int in, unsigned int len)
+{
+ if ((in + len) < qsize) {
+ /* (in) ----------- (out) */
+ /* 00 7e ----------- 7f 00 */
+ memcpy((dst + in), src, len);
+ } else {
+ unsigned int space;
+
+ /* ----- (out) (in) ----- */
+ /* ----- 7f 00 00 7e ----- */
+
+ /* 1) space start (in) ~ buffer end */
+ space = qsize - in;
+ memcpy((dst + in), src, ((len > space) ? space : len));
+
+ /* 2) buffer start ~ data end */
+ if (len > space)
+ memcpy(dst, (src + space), (len - space));
+ }
+}
+
+#endif
diff --git a/include/exynos_ipc.h b/include/exynos_ipc.h
new file mode 100644
index 0000000..d1b6228
--- /dev/null
+++ b/include/exynos_ipc.h
@@ -0,0 +1,264 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2010 Samsung Electronics.
+ *
+ */
+
+#ifndef __EXYNOS_IPC_H__
+#define __EXYNOS_IPC_H__
+
+#include <linux/types.h>
+#include "modem_v1.h"
+
+#define SHM_BOOT_MAGIC 0xBDBD
+#define SHM_DUMP_MAGIC 0xBDBD
+#define SHM_IPC_MAGIC 0xAA
+
+#define EXYNOS_SINGLE_MASK (0x3<<6)
+#define EXYNOS_MULTI_START_MASK (0x1<<7)
+#define EXYNOS_MULTI_LAST_MASK (0x1<<6)
+
+#define EXYNOS_START_MASK 0xABCD
+#define EXYNOS_START_OFFSET 0
+#define EXYNOS_START_SIZE 2
+
+#define EXYNOS_FRAME_SEQ_OFFSET 2
+#define EXYNOS_FRAME_SIZE 2
+
+#define EXYNOS_FRAG_CONFIG_OFFSET 4
+#define EXYNOS_FRAG_CONFIG_SIZE 2
+
+#define EXYNOS_LEN_OFFSET 6
+#define EXYNOS_LEN_SIZE 2
+
+#define EXYNOS_CH_ID_OFFSET 8
+#define EXYNOS_CH_SIZE 1
+
+#define EXYNOS_CH_SEQ_OFFSET 9
+#define EXYNOS_CH_SEQ_SIZE 1
+
+#define EXYNOS_HEADER_SIZE 12
+
+#define EXYNOS_FMT_NUM 1
+#define EXYNOS_RFS_NUM 10
+
+/* EXYNOS link-layer header */
+struct __packed exynos_link_header {
+ u16 sync;
+ u16 seq;
+ u16 cfg;
+ u16 len;
+ u8 ch_id;
+ u8 ch_seq;
+};
+
+struct __packed exynos_seq_num {
+ u16 frame_cnt;
+ u8 ch_cnt[255];
+};
+
+static inline bool exynos_start_valid(u8 *frm)
+{
+ u16 cfg = *(u16 *)(frm + EXYNOS_START_OFFSET);
+
+ return cfg == EXYNOS_START_MASK ? true : false;
+}
+
+static inline bool exynos_multi_start_valid(u8 *frm)
+{
+ u16 cfg = *(u16 *)(frm + EXYNOS_FRAG_CONFIG_OFFSET);
+
+ return ((cfg >> 8) & EXYNOS_MULTI_START_MASK) == EXYNOS_MULTI_START_MASK;
+}
+
+static inline bool exynos_multi_last_valid(u8 *frm)
+{
+ u16 cfg = *(u16 *)(frm + EXYNOS_FRAG_CONFIG_OFFSET);
+
+ return ((cfg >> 8) & EXYNOS_MULTI_LAST_MASK) == EXYNOS_MULTI_LAST_MASK;
+}
+
+static inline bool exynos_single_frame(u8 *frm)
+{
+ u16 cfg = *(u16 *)(frm + EXYNOS_FRAG_CONFIG_OFFSET);
+
+ return ((cfg >> 8) & EXYNOS_SINGLE_MASK) == EXYNOS_SINGLE_MASK;
+}
+
+static inline bool exynos_multi_frame(u8 *frm)
+{
+ u16 cfg = *(u16 *)(frm + EXYNOS_FRAG_CONFIG_OFFSET);
+
+ return ((cfg >> 8) & EXYNOS_SINGLE_MASK) != EXYNOS_SINGLE_MASK;
+}
+
+static inline bool exynos_rcs_ch(u8 ch)
+{
+ return (ch == EXYNOS_CH_ID_RCS_0 || ch == EXYNOS_CH_ID_RCS_1) ? true : false;
+}
+
+static inline u8 exynos_get_ch(u8 *frm)
+{
+ return frm[EXYNOS_CH_ID_OFFSET];
+}
+
+static inline unsigned int exynos_get_frame_seq(u8 *frm)
+{
+ u16 cfg = *(u16 *)(frm + EXYNOS_FRAME_SEQ_OFFSET);
+
+ return cfg;
+}
+
+static inline unsigned int exynos_get_ch_seq(u8 *frm)
+{
+ return frm[EXYNOS_CH_SEQ_OFFSET];
+}
+
+static inline unsigned int exynos_calc_padding_size(unsigned int len)
+{
+ unsigned int residue = len & 0x7;
+
+ return residue ? (8 - residue) : 0;
+}
+
+static inline unsigned int exynos_get_frame_len(u8 *frm)
+{
+ return (unsigned int)*(u16 *)(frm + EXYNOS_LEN_OFFSET);
+}
+
+static inline bool exynos_fmt_ch(u8 ch)
+{
+ return (ch == EXYNOS_CH_ID_FMT_0 || ch == EXYNOS_CH_ID_FMT_1) ? true : false;
+}
+
+static inline bool exynos_rfs_ch(u8 ch)
+{
+ return (ch >= EXYNOS_CH_ID_RFS_0 && ch <= EXYNOS_CH_ID_RFS_9) ?
+ true : false;
+}
+
+static inline bool exynos_boot_ch(u8 ch)
+{
+ return (ch == EXYNOS_CH_ID_BOOT) ? true : false;
+}
+
+static inline bool exynos_dump_ch(u8 ch)
+{
+ return (ch == EXYNOS_CH_ID_LOOPBACK) ? true : false;
+}
+
+static inline bool exynos_bootdump_ch(u8 ch)
+{
+ return (ch == EXYNOS_CH_ID_BOOT || ch == EXYNOS_CH_ID_DUMP) ?
+ true : false;
+}
+
+static inline bool exynos_ipc_ch(u8 ch)
+{
+ return (ch > 0 && (ch != EXYNOS_CH_ID_BOOT && ch != EXYNOS_CH_ID_DUMP)) ?
+ true : false;
+}
+
+static inline bool exynos_ps_ch(u8 ch)
+{
+#if IS_ENABLED(CONFIG_CH_EXTENSION)
+ return (ch >= EXYNOS_CH_EX_ID_PDP_0 && ch <= EXYNOS_CH_EX_ID_PDP_MAX) ?
+#else
+ return (ch >= EXYNOS_CH_ID_PDP_0 && ch <= EXYNOS_CH_ID_PDP_9) ?
+#endif
+ true : false;
+}
+
+static inline bool exynos_log_ch(u8 ch)
+{
+ return (ch == EXYNOS_CH_ID_CPLOG) ? true : false;
+}
+
+static inline bool exynos_router_ch(u8 ch)
+{
+ return (ch == EXYNOS_CH_ID_BT_DUN) ? true : false;
+}
+
+static inline bool exynos_embms_ch(u8 ch)
+{
+ return (ch == EXYNOS_CH_ID_EMBMS_0 || ch == EXYNOS_CH_ID_EMBMS_1) ? true : false;
+}
+
+static inline bool exynos_uts_ch(u8 ch)
+{
+ return (ch == EXYNOS_CH_ID_UTS) ? true : false;
+}
+
+static inline bool exynos_wfs0_ch(u8 ch)
+{
+ return (ch == EXYNOS_CH_ID_WFS_0) ? true : false;
+}
+
+static inline bool exynos_wfs1_ch(u8 ch)
+{
+ return (ch == EXYNOS_CH_ID_WFS_1) ? true : false;
+}
+
+static inline bool exynos_oem_ch(u8 ch)
+{
+ return (ch >= EXYNOS_CH_ID_OEM_0 && ch <= EXYNOS_CH_ID_OEM_7) ?
+ true : false;
+}
+
+static inline unsigned int exynos_get_total_len(u8 *frm)
+{
+ unsigned int len;
+ unsigned int pad;
+
+ len = exynos_get_frame_len(frm);
+ pad = exynos_calc_padding_size(len) ? exynos_calc_padding_size(len) : 0;
+ return len + pad;
+}
+
+static inline u16 modify_next_frame(u16 fr_cfg)
+{
+ u8 frame_index;
+
+ frame_index = fr_cfg & 0x3fff;
+
+ if (!(--frame_index)) {
+ fr_cfg &= 0x3fff;
+ fr_cfg |= 0x4000;
+ }
+
+ return (fr_cfg & 0xff00) | frame_index;
+}
+
+static inline bool exynos_padding_exist(u8 *frm)
+{
+ return exynos_calc_padding_size(exynos_get_frame_len(frm)) ? true : false;
+}
+
+static inline u32 exynos_multi_packet_index(u16 ctrl)
+{
+ return ((ctrl >> 8) & 0x3f);
+}
+
+static inline u32 exynos_multi_frame_index(u16 ctrl)
+{
+ return ctrl & 0xff;
+}
+
+static inline bool exynos_multi_last(u16 ctrl)
+{
+ return (ctrl >> 8) & EXYNOS_MULTI_LAST_MASK ? true : false;
+}
+
+static inline unsigned int exynos_get_hdr_len(u8 *frm)
+{
+ return EXYNOS_HEADER_SIZE;
+}
+static inline bool exynos_ext_len(u8 *frm)
+{
+ return 0;
+}
+static inline u8 exynos_get_ctrl(u8 *frm)
+{
+ return 0;
+}
+#endif
diff --git a/include/legacy.h b/include/legacy.h
new file mode 100644
index 0000000..8e04de2
--- /dev/null
+++ b/include/legacy.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BOOT_H__
+#define __BOOT_H__
+
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/kfifo.h>
+#include <linux/netdevice.h>
+
+#include "circ_queue.h"
+#include "sipc5.h"
+
+#define BAD_MSG_BUFFER_SIZE 32
+
+enum legacy_ipc_map {
+ IPC_MAP_FMT = 0,
+#if IS_ENABLED(CONFIG_MODEM_IF_LEGACY_QOS)
+ IPC_MAP_HPRIO_RAW,
+#endif
+ IPC_MAP_NORM_RAW,
+ IPC_MAP_MAX,
+};
+
+struct legacy_ipc_device {
+ enum legacy_ipc_map id;
+ char name[16];
+
+ struct circ_queue txq;
+ struct circ_queue rxq;
+
+ u16 msg_mask;
+ u16 req_ack_mask;
+ u16 res_ack_mask;
+
+ struct sk_buff_head *skb_txq;
+ struct sk_buff_head *skb_rxq;
+
+ unsigned int req_ack_cnt[MAX_DIR];
+
+ spinlock_t tx_lock;
+};
+
+struct legacy_link_device {
+
+ struct link_device *ld;
+
+ atomic_t active;
+
+ u32 __iomem *magic;
+ u32 __iomem *mem_access;
+
+ struct legacy_ipc_device *dev[IPC_MAP_MAX];
+};
+
+int create_legacy_link_device(struct mem_link_device *mld);
+int init_legacy_link(struct legacy_link_device *bl);
+int xmit_to_legacy_link(struct mem_link_device *mld, u8 ch,
+ struct sk_buff *skb, enum legacy_ipc_map legacy_buffer_index);
+struct sk_buff *recv_from_legacy_link(struct mem_link_device *mld,
+ struct legacy_ipc_device *dev, unsigned int in, int *ret);
+bool check_legacy_tx_pending(struct mem_link_device *mld);
+
+#endif /* end of __BOOT_H__ */
diff --git a/include/sbd.h b/include/sbd.h
new file mode 100644
index 0000000..f5cd4d9
--- /dev/null
+++ b/include/sbd.h
@@ -0,0 +1,422 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2010 Samsung Electronics.
+ *
+ */
+
+#ifndef __MODEM_INCLUDE_SBD_H__
+#define __MODEM_INCLUDE_SBD_H__
+
+#include <linux/types.h>
+#include <linux/kfifo.h>
+#include "modem_v1.h"
+#include "circ_queue.h"
+
+/*
+ * Abbreviations
+ * =============
+ * SB, sb Shared Buffer
+ * SBD, sbd Shared Buffer Descriptor
+ * SBDV, sbdv Shared Buffer Descriptor Vector (Array)
+ * --------
+ * RB, rb Ring Buffer
+ * RBO, rbo Ring Buffer Offset (offset of an RB)
+ * RBD, rbd Ring Buffer Descriptor (descriptor of an RB)
+ * RBDO, rbdo Ring Buffer Descriptor Offset (offset of an RBD)
+ * --------
+ * RBP, rbp Ring Buffer Pointer (read pointer, write pointer)
+ * RBPS, rbps Ring Buffer Pointer Set (set of RBPs)
+ * --------
+ * CH, ch Channel
+ * CHD, chd Channel Descriptor
+ * --------
+ * desc descriptor
+ */
+
+#define CMD_DESC_RGN_OFFSET 0
+#define CMD_DESC_RGN_SIZE SZ_64K
+
+#define CTRL_RGN_OFFSET (CMD_DESC_RGN_OFFSET)
+#define CTRL_RGN_SIZE (1 * SZ_1K)
+
+#define CMD_RGN_OFFSET (CTRL_RGN_OFFSET + CTRL_RGN_SIZE)
+#define CMD_RGN_SIZE (1 * SZ_1K)
+
+#define DESC_RGN_OFFSET (CMD_RGN_OFFSET + CMD_RGN_SIZE)
+#define DESC_RGN_SIZE (CMD_DESC_RGN_SIZE - CTRL_RGN_SIZE - CMD_RGN_SIZE)
+
+#define BUFF_RGN_OFFSET (CMD_DESC_RGN_SIZE)
+
+#define MAX_SBD_SIPC_CHANNELS IOD_CH_ID_MAX /* 2 ^ 8 */
+#define MAX_SBD_LINK_IDS 32 /* up to 32 ids */
+
+/*
+ * @brief Priority for QoS(Quality of Service)
+ */
+enum qos_prio {
+ QOS_HIPRIO = 10,
+ QOS_NORMAL,
+ QOS_MAX_PRIO,
+};
+
+/*
+ * @brief SBD Ring Buffer Descriptor
+ * (i.e. IPC Channel Descriptor Structure in MIPI LLI_IPC_AN)
+ */
+struct __packed sbd_rb_desc {
+ /*
+ * ch Channel number defined in the Samsung IPC specification
+ * --------
+ * reserved
+ */
+ u16 ch;
+ u16 reserved;
+
+ /* direction 0 (UL, TX, AP-to-CP), 1 (DL, RX, CP-to-AP)
+ * --------
+ * signaling 0 (polling), 1 (interrupt)
+ */
+ u16 direction;
+ u16 signaling;
+
+ /*
+ * Mask to be written to the signal register (viz. 1 << @@id)
+ * (i.e. write_signal)
+ */
+ u32 sig_mask;
+
+ /*
+ * length Length of an SBD Ring Buffer
+ * --------
+ * id (1) ID for a link channel that consists of an RB pair
+ * (2) Index into each array in the set of RBP arrays
+ * N.B. set of RBP arrays =
+ * {[ul_rp], [ul_wp], [dl_rp], [dl_wp]}
+ */
+ u16 length;
+ u16 id;
+
+ /*
+ * buff_size Size of each data buffer in an SBD RB (default 2048)
+ * --------
+ * payload_offset Offset of the payload in each data buffer(default 0)
+ */
+ u16 buff_size;
+ u16 payload_offset;
+};
+
+/*
+ * @brief SBD Channel Descriptor
+ */
+struct __packed sbd_rb_channel {
+ u32 rb_desc_offset; /* RB Offset for Descriptor */
+ u32 buff_pos_array_offset; /* RB Offset for Array of Each Buffer Position */
+};
+
+/*
+ * @brief SBD Global Descriptor
+ */
+struct __packed sbd_global_desc {
+ /* Version */
+ u32 version;
+
+ /* Number of link channels */
+ u32 num_channels;
+
+ /* Offset of the array of "SBD Ring Buffer Pointers Set" in SHMEM */
+ u32 rpwp_array_offset;
+
+ /* Array of SBD channel descriptors */
+ struct sbd_rb_channel rb_ch[MAX_SBD_LINK_IDS][ULDL];
+
+ /* Array of SBD ring buffer descriptor pairs */
+ struct sbd_rb_desc rb_desc[MAX_SBD_LINK_IDS][ULDL];
+};
+
+/*
+ * @brief SBD ring buffer (with logical view)
+ * @remark physical SBD ring buffer
+ * = {length, *rp, *wp, offset array, size array}
+ */
+struct sbd_ring_buffer {
+ /* Spin-lock for each SBD RB */
+ spinlock_t lock;
+
+ /* Pointer to the "SBD link" device instance to which an RB belongs */
+ struct sbd_link_device *sl;
+
+ /* UL/DL socket buffer queues */
+ struct sk_buff_head skb_q;
+
+ /* Whether or not link-layer header is used */
+ bool lnk_hdr;
+
+ /*
+ * Variables for receiving a frame with the SIPC5 "EXT_LEN" attribute
+ * (With SBD architecture, a frame with EXT_LEN can be scattered into
+ * consecutive multiple data buffer slots.)
+ */
+ bool more;
+ unsigned int total;
+ unsigned int rcvd;
+
+ /* Link ID, SIPC channel, and IPC direction */
+ u16 id; /* for @desc->id */
+ u16 ch; /* for @desc->ch */
+ u16 dir; /* for @desc->direction */
+ u16 len; /* for @desc->length */
+ u16 buff_size; /* for @desc->buff_size */
+ u16 payload_offset; /* for @desc->payload_offset */
+
+ /* Pointer to the array of pointers to each data buffer */
+ u8 **buff;
+
+ /* Pointer to the data buffer region in SHMEM */
+ u8 *buff_rgn;
+
+ /* Pointers to variables in the shared region for a physical SBD RB */
+ u16 *rp; /* sl->rp[@dir][@id] */
+ u16 *wp; /* sl->wp[@dir][@id] */
+ u32 *buff_pos_array; /* Array of Each Buffer Position */
+ u32 *buff_len_array; /* Array of Each Buffer Length */
+ struct sbd_rb_channel *rb_ch; /* desc->rb_ch[@id][@dir] */
+ struct sbd_rb_desc *rb_desc; /* desc->rb_desc[@id][@dir] */
+
+ /* Pointer to the IO device and the link device to which an SBD RB belongs */
+ struct io_device *iod;
+ struct link_device *ld;
+
+ /* Flow control */
+ atomic_t busy;
+};
+
+struct sbd_link_attr {
+ /* Link ID and SIPC channel number */
+ u16 id;
+ u16 ch;
+
+ /* Whether or not link-layer header is used */
+ bool lnk_hdr;
+
+ /* Length of an SBD RB */
+ unsigned int rb_len[ULDL];
+
+ /* Size of the data buffer for each SBD in an SBD RB */
+ unsigned int buff_size[ULDL];
+};
+
+struct sbd_ipc_device {
+ /* Pointer to the IO device to which an SBD IPC device belongs */
+ struct io_device *iod;
+
+ /* SBD IPC device ID == Link ID --> rb.id */
+ u16 id;
+
+ /* SIPC Channel ID --> rb.ch */
+ u16 ch;
+
+ /* UL/DL SBD RB pair in the kernel space */
+ struct sbd_ring_buffer rb[ULDL];
+};
+
+struct sbd_link_device {
+ /* Pointer to the link device to which an SBD link belongs */
+ struct link_device *ld;
+
+ /* Flag for checking whether or not an SBD link is active */
+ atomic_t active;
+
+ /* Version of SBD IPC */
+ unsigned int version;
+
+ /*
+ * Start address of SHMEM
+ * @shmem = SHMEM.VA
+ */
+ u8 *shmem;
+ unsigned int shmem_size;
+ unsigned int zmb_offset;
+
+ /* The number of link channels for AP-CP IPC */
+ unsigned int num_channels;
+
+ /* Table of link attributes */
+ struct sbd_link_attr link_attr[MAX_SBD_LINK_IDS];
+
+ /* Logical IPC devices */
+ struct sbd_ipc_device ipc_dev[MAX_SBD_LINK_IDS];
+
+ /*
+ * (1) Conversion tables from "Link ID (ID)" to "SIPC Channel Number (CH)"
+ * (2) Conversion tables from "SIPC Channel Number (CH)" to "Link ID (ID)"
+ */
+ u16 id2ch[MAX_SBD_LINK_IDS];
+ u16 ch2id[MAX_SBD_SIPC_CHANNELS];
+
+ /*
+ * Pointers to each array of arrays of SBD RB Pointers,
+ * viz. rp[UL] = pointer to ul_rp[]
+ * rp[DL] = pointer to dl_rp[]
+ * wp[UL] = pointer to ul_wp[]
+ * wp[DL] = pointer to dl_wp[]
+ */
+ u16 *rp[ULDL];
+ u16 *wp[ULDL];
+
+ /*
+ * Above are variables for managing and controlling SBD IPC
+ * ========================================================================
+ * Below are pointers to the descriptor sections in SHMEM
+ */
+
+ /* Pointer to the SBD global descriptor header */
+ struct sbd_global_desc *g_desc;
+
+ unsigned long rxdone_mask;
+};
+
+static inline void sbd_activate(struct sbd_link_device *sl)
+{
+ if (sl)
+ atomic_set(&sl->active, 1);
+}
+
+static inline void sbd_deactivate(struct sbd_link_device *sl)
+{
+ if (sl)
+ atomic_set(&sl->active, 0);
+}
+
+static inline bool sbd_active(struct sbd_link_device *sl)
+{
+ if (!sl)
+ return false;
+ return atomic_read(&sl->active) ? true : false;
+}
+
+static inline u16 sbd_ch2id(struct sbd_link_device *sl, u16 ch)
+{
+ return sl->ch2id[ch];
+}
+
+static inline u16 sbd_id2ch(struct sbd_link_device *sl, u16 id)
+{
+ return sl->id2ch[id];
+}
+
+static inline struct sbd_ipc_device *sbd_ch2dev(struct sbd_link_device *sl,
+ u16 ch)
+{
+ u16 id = sbd_ch2id(sl, ch);
+
+ return (id < MAX_SBD_LINK_IDS) ? &sl->ipc_dev[id] : NULL;
+}
+
+static inline struct sbd_ipc_device *sbd_id2dev(struct sbd_link_device *sl,
+ u16 id)
+{
+ return (id < MAX_SBD_LINK_IDS) ? &sl->ipc_dev[id] : NULL;
+}
+
+static inline struct sbd_ring_buffer *sbd_ch2rb(struct sbd_link_device *sl,
+ unsigned int ch,
+ enum direction dir)
+{
+ u16 id = sbd_ch2id(sl, ch);
+
+ return (id < MAX_SBD_LINK_IDS) ? &sl->ipc_dev[id].rb[dir] : NULL;
+}
+
+static inline struct sbd_ring_buffer *sbd_ch2rb_with_skb(struct sbd_link_device *sl,
+ unsigned int ch,
+ enum direction dir,
+ struct sk_buff *skb)
+{
+ u16 id;
+
+ if (sipc_ps_ch(ch))
+ ch = (skb && skb->queue_mapping == 1) ? QOS_HIPRIO : QOS_NORMAL;
+
+ id = sbd_ch2id(sl, ch);
+ return (id < MAX_SBD_LINK_IDS) ? &sl->ipc_dev[id].rb[dir] : NULL;
+}
+
+static inline struct sbd_ring_buffer *sbd_id2rb(struct sbd_link_device *sl,
+ unsigned int id,
+ enum direction dir)
+{
+ return (id < MAX_SBD_LINK_IDS) ? &sl->ipc_dev[id].rb[dir] : NULL;
+}
+
+static inline bool rb_empty(struct sbd_ring_buffer *rb)
+{
+ WARN_ON(!rb);
+
+ return circ_empty(*rb->rp, *rb->wp);
+}
+
+static inline unsigned int rb_space(struct sbd_ring_buffer *rb)
+{
+ WARN_ON(!rb);
+
+ return circ_get_space(rb->len, *rb->wp, *rb->rp);
+}
+
+static inline unsigned int rb_usage(struct sbd_ring_buffer *rb)
+{
+ WARN_ON(!rb);
+
+ return circ_get_usage(rb->len, *rb->wp, *rb->rp);
+}
+
+static inline void set_lnk_hdr(struct sbd_ring_buffer *rb, struct sk_buff *skb)
+{
+ skbpriv(skb)->lnk_hdr = rb->lnk_hdr && !rb->more;
+}
+
+static inline void check_more(struct sbd_ring_buffer *rb, struct sk_buff *skb)
+{
+ if (rb->lnk_hdr) {
+ if (!rb->more) {
+ if (sipc5_get_frame_len(skb->data) > rb->buff_size) {
+ rb->more = true;
+ rb->total = sipc5_get_frame_len(skb->data);
+ rb->rcvd = skb->len;
+ }
+ } else {
+ rb->rcvd += skb->len;
+ if (rb->rcvd >= rb->total) {
+ rb->more = false;
+ rb->total = 0;
+ rb->rcvd = 0;
+ }
+ }
+ }
+}
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_WITH_SBD_ARCH)
+int create_sbd_link_device(struct link_device *ld, struct sbd_link_device *sl,
+ u8 *shmem_base, unsigned int shmem_size);
+
+int init_sbd_link(struct sbd_link_device *sl);
+bool check_sbd_tx_pending(struct mem_link_device *mld);
+
+int sbd_pio_tx(struct sbd_ring_buffer *rb, struct sk_buff *skb);
+int sbd_pio_rx(struct sbd_ring_buffer *rb, struct sk_buff **skb);
+#else
+static inline int create_sbd_link_device(struct link_device *ld, struct sbd_link_device *sl,
+ u8 *shmem_base, unsigned int shmem_size) { return 0; }
+
+static inline int init_sbd_link(struct sbd_link_device *sl) { return 0; }
+static inline bool check_sbd_tx_pending(struct mem_link_device *mld)
+ { return false; }
+
+static inline int sbd_pio_tx(struct sbd_ring_buffer *rb,
+ struct sk_buff *skb) { return 0; }
+static inline int sbd_pio_rx(struct sbd_ring_buffer *rb, struct sk_buff **skb)
+ { return 0; }
+#endif
+
+#define SBD_UL_LIMIT 16 /* Uplink burst limit */
+
+#endif
diff --git a/include/sipc5.h b/include/sipc5.h
new file mode 100644
index 0000000..2bfd250
--- /dev/null
+++ b/include/sipc5.h
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2010 Samsung Electronics.
+ *
+ */
+
+#ifndef __SIPC5_H__
+#define __SIPC5_H__
+
+#include <linux/types.h>
+#include "modem_v1.h"
+
+/* SIPC5 link-layer header */
+struct __packed sipc5_link_header {
+ u8 cfg;
+ u8 ch;
+ u16 len;
+ union {
+ struct multi_frame_control ctrl;
+ u16 ext_len;
+ };
+};
+
+#define SIPC5_START_MASK (0xF8)
+#define SIPC5_CONFIG_MASK (0x07)
+#define SIPC5_EXT_FIELD_MASK (0x03)
+
+#define SIPC5_PADDING_EXIST (0x04)
+#define SIPC5_EXT_FIELD_EXIST (0x02)
+#define SIPC5_CTL_FIELD_EXIST (0x01)
+
+#define SIPC5_MULTI_FRAME_CFG (0x03)
+#define SIPC5_EXT_LENGTH_CFG (0x02)
+
+#define SIPC5_CONFIG_OFFSET 0
+#define SIPC5_CONFIG_SIZE 1
+
+#define SIPC5_CH_ID_OFFSET 1
+#define SIPC5_CH_ID_SIZE 1
+
+#define SIPC5_LEN_OFFSET 2
+#define SIPC5_LEN_SIZE 2
+
+#define SIPC5_CTRL_OFFSET 4
+#define SIPC5_CTRL_SIZE 1
+
+#define SIPC5_EXT_LEN_OFFSET 4
+#define SIPC5_EXT_LEN_SIZE 2
+
+#define SIPC5_MIN_HEADER_SIZE 4
+#define SIPC5_HEADER_SIZE_WITH_CTL_FLD 5
+#define SIPC5_HEADER_SIZE_WITH_EXT_LEN 6
+#define SIPC5_MAX_HEADER_SIZE SIPC5_HEADER_SIZE_WITH_EXT_LEN
+
+static inline bool sipc5_start_valid(u8 *frm)
+{
+ return (*frm & SIPC5_START_MASK) == SIPC5_START_MASK;
+}
+
+static inline bool sipc5_padding_exist(u8 *frm)
+{
+ return (*frm & SIPC5_PADDING_EXIST) ? true : false;
+}
+
+static inline bool sipc5_multi_frame(u8 *frm)
+{
+ return (*frm & SIPC5_EXT_FIELD_MASK) == SIPC5_MULTI_FRAME_CFG;
+}
+
+static inline bool sipc5_ext_len(u8 *frm)
+{
+ return (*frm & SIPC5_EXT_FIELD_MASK) == SIPC5_EXT_LENGTH_CFG;
+}
+
+static inline u8 sipc5_get_ch(u8 *frm)
+{
+ return frm[SIPC5_CH_ID_OFFSET];
+}
+
+static inline u8 sipc5_get_ctrl(u8 *frm)
+{
+ return frm[SIPC5_CTRL_OFFSET];
+}
+
+static inline unsigned int sipc5_calc_padding_size(unsigned int len)
+{
+ unsigned int residue = len & (BITS_PER_LONG / 8 - 1);
+
+ return residue ? (BITS_PER_LONG / 8 - residue) : 0;
+}
+
+/*
+ * @brief get the length of the header of an SIPC5 link frame
+ * @param frm the pointer to an SIPC5 link frame
+ * @return the size of the header of an SIPC5 link frame
+ */
+static inline unsigned int sipc5_get_hdr_len(u8 *frm)
+{
+ if (unlikely(frm[0] & SIPC5_EXT_FIELD_EXIST)) {
+ if (sipc5_multi_frame(frm))
+ return SIPC5_HEADER_SIZE_WITH_CTL_FLD;
+ else
+ return SIPC5_HEADER_SIZE_WITH_EXT_LEN;
+ } else {
+ return SIPC5_MIN_HEADER_SIZE;
+ }
+}
+
+/*
+ * @brief get the real length of an SIPC5 link frame WITHOUT padding
+ * @param frm the pointer to an SIPC5 link frame
+ * @return the real length of an SIPC5 link frame WITHOUT padding
+ */
+static inline unsigned int sipc5_get_frame_len(u8 *frm)
+{
+ u16 *sz16 = (u16 *)(frm + SIPC5_LEN_OFFSET);
+ u32 *sz32 = (u32 *)(frm + SIPC5_LEN_OFFSET);
+
+ if (unlikely(frm[0] & SIPC5_EXT_FIELD_EXIST)) {
+ if (sipc5_multi_frame(frm))
+ return *sz16;
+ else
+ return *sz32;
+ } else {
+ return *sz16;
+ }
+}
+
+/*
+ * @brief get the total length of an SIPC5 link frame with padding
+ * @param frm the pointer to an SIPC5 link frame
+ * @return the total length of an SIPC5 link frame with padding
+ */
+static inline unsigned int sipc5_get_total_len(u8 *frm)
+{
+ unsigned int len;
+ unsigned int pad;
+
+ len = sipc5_get_frame_len(frm);
+ pad = sipc5_padding_exist(frm) ? sipc5_calc_padding_size(len) : 0;
+ return len + pad;
+}
+
+/*
+ * @param ch the channel ID
+ * @return true if the channel ID is for FMT channel
+ */
+static inline bool sipc5_fmt_ch(u8 ch)
+{
+ return (ch >= SIPC5_CH_ID_FMT_0 && ch <= SIPC5_CH_ID_FMT_9) ?
+ true : false;
+}
+
+/*
+ * @param ch the channel ID
+ * @return true if the channel ID is for RFS channel
+ */
+static inline bool sipc5_rfs_ch(u8 ch)
+{
+ return (ch >= SIPC5_CH_ID_RFS_0 && ch <= SIPC5_CH_ID_RFS_9) ?
+ true : false;
+}
+
+/*
+ * @param ch the channel ID
+ * @return true if the channel ID is for BOOT channel
+ */
+static inline bool sipc5_boot_ch(u8 ch)
+{
+ return (ch >= SIPC5_CH_ID_BOOT_0 && ch <= SIPC5_CH_ID_BOOT_9) ?
+ true : false;
+}
+
+/*
+ * @param ch the channel ID
+ * @return true if the channel ID is for DUMP channel
+ */
+static inline bool sipc5_dump_ch(u8 ch)
+{
+ return (ch >= SIPC5_CH_ID_DUMP_0 && ch <= SIPC5_CH_ID_DUMP_9) ?
+ true : false;
+}
+
+/*
+ * @param ch the channel ID
+ * @return true if the channel ID is for BOOT/DUMP channel
+ */
+static inline bool sipc5_bootdump_ch(u8 ch)
+{
+ return (ch >= SIPC5_CH_ID_BOOT_0 && ch <= SIPC5_CH_ID_DUMP_9) ?
+ true : false;
+}
+
+/*
+ * @param ch the channel ID
+ * @return true if the channel ID is for IPC channel
+ */
+static inline bool sipc5_ipc_ch(u8 ch)
+{
+ return (ch > 0 && (ch < SIPC5_CH_ID_BOOT_0 || ch > SIPC5_CH_ID_DUMP_9))
+ ? true : false;
+}
+
+static inline bool sipc_ps_ch(u8 ch)
+{
+#if IS_ENABLED(CONFIG_CH_EXTENSION)
+ return (ch >= SIPC_CH_EX_ID_PDP_0 && ch <= SIPC_CH_EX_ID_PDP_MAX) ?
+#else
+ return (ch >= SIPC_CH_ID_PDP_0 && ch <= SIPC_CH_ID_PDP_14) ?
+#endif
+ true : false;
+}
+
+static inline bool sipc_csd_ch(u8 ch)
+{
+ return (ch >= SIPC_CH_ID_CS_VT_DATA && ch <= SIPC_CH_ID_CS_VT_VIDEO) ?
+ true : false;
+}
+
+static inline bool sipc_log_ch(u8 ch)
+{
+ return (ch >= SIPC_CH_ID_CPLOG1 && ch <= SIPC_CH_ID_CPLOG2) ?
+ true : false;
+}
+
+static inline bool sipc_router_ch(u8 ch)
+{
+ return (ch == SIPC_CH_ID_BT_DUN) ?
+ true : false;
+}
+
+static inline bool sipc_misc_ch(u8 ch)
+{
+ return (ch == SIPC_CH_ID_CASS) ? true : false;
+}
+
+#endif
diff --git a/ipc_io_device.c b/ipc_io_device.c
new file mode 100644
index 0000000..ef289c4
--- /dev/null
+++ b/ipc_io_device.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Samsung Electronics.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/if_arp.h>
+#include <linux/ip.h>
+#include <linux/if_ether.h>
+#include <linux/etherdevice.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <trace/events/napi.h>
+#include <net/ip.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/netdevice.h>
+
+#include "modem_prj.h"
+#include "modem_utils.h"
+#include "modem_dump.h"
+
+static int ipc_open(struct inode *inode, struct file *filp)
+{
+ struct io_device *iod = to_io_device(inode->i_cdev);
+ struct modem_shared *msd = iod->msd;
+ struct link_device *ld;
+ int ret;
+
+ filp->private_data = (void *)iod;
+
+ atomic_inc(&iod->opened);
+
+ list_for_each_entry(ld, &msd->link_dev_list, list) {
+ if (IS_CONNECTED(iod, ld) && ld->init_comm) {
+ ret = ld->init_comm(ld, iod);
+ if (ret < 0) {
+ mif_err("%s<->%s: ERR! init_comm fail(%d)\n",
+ iod->name, ld->name, ret);
+ atomic_dec(&iod->opened);
+ return ret;
+ }
+ }
+ }
+
+ mif_info("%s (opened %d) by %s\n",
+ iod->name, atomic_read(&iod->opened), current->comm);
+
+ return 0;
+}
+
+static int ipc_release(struct inode *inode, struct file *filp)
+{
+ struct io_device *iod = (struct io_device *)filp->private_data;
+ struct modem_shared *msd = iod->msd;
+ struct link_device *ld;
+ int i;
+
+ if (atomic_dec_and_test(&iod->opened)) {
+ skb_queue_purge(&iod->sk_rx_q);
+
+ /* purge multi_frame queue */
+ for (i = 0; i < NUM_SIPC_MULTI_FRAME_IDS; i++)
+ skb_queue_purge(&iod->sk_multi_q[i]);
+ }
+
+ list_for_each_entry(ld, &msd->link_dev_list, list) {
+ if (IS_CONNECTED(iod, ld) && ld->terminate_comm)
+ ld->terminate_comm(ld, iod);
+ }
+
+ mif_info("%s (opened %d) by %s\n",
+ iod->name, atomic_read(&iod->opened), current->comm);
+
+ return 0;
+}
+
+static unsigned int ipc_poll(struct file *filp, struct poll_table_struct *wait)
+{
+ struct io_device *iod = (struct io_device *)filp->private_data;
+ struct modem_ctl *mc;
+ struct sk_buff_head *rxq;
+
+ if (!iod)
+ return POLLERR;
+
+ mc = iod->mc;
+ rxq = &iod->sk_rx_q;
+
+ if (skb_queue_empty(rxq))
+ poll_wait(filp, &iod->wq, wait);
+
+ switch (mc->phone_state) {
+ case STATE_BOOTING:
+ case STATE_ONLINE:
+ if (!skb_queue_empty(rxq))
+ return POLLIN | POLLRDNORM;
+ break;
+ case STATE_CRASH_EXIT:
+ case STATE_CRASH_RESET:
+ case STATE_NV_REBUILDING:
+ case STATE_CRASH_WATCHDOG:
+ mif_err_limited("%s: %s.state == %s\n", iod->name, mc->name, mc_state(mc));
+
+ if (iod->format == IPC_FMT)
+ return POLLHUP;
+ break;
+ case STATE_RESET:
+ mif_err_limited("%s: %s.state == %s\n", iod->name, mc->name, mc_state(mc));
+
+ if (iod->attrs & IO_ATTR_STATE_RESET_NOTI)
+ return POLLHUP;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static long ipc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct io_device *iod = (struct io_device *)filp->private_data;
+ struct link_device *ld = get_current_link(iod);
+ struct modem_ctl *mc = iod->mc;
+ enum modem_state p_state;
+ int ret = 0, value;
+
+ switch (cmd) {
+ case IOCTL_GET_CP_STATUS:
+ mif_debug("%s: IOCTL_GET_CP_STATUS\n", iod->name);
+
+ p_state = mc->phone_state;
+
+ if (p_state != STATE_ONLINE) {
+ mif_debug("%s: IOCTL_GET_CP_STATUS (state %s)\n",
+ iod->name, cp_state_str(p_state));
+ }
+
+ switch (p_state) {
+ case STATE_NV_REBUILDING:
+ mc->phone_state = STATE_ONLINE;
+ break;
+ /* Do not return an internal state */
+ case STATE_RESET:
+ p_state = STATE_OFFLINE;
+ break;
+ default:
+ break;
+ }
+
+ return p_state;
+
+ case IOCTL_TRIGGER_CP_CRASH:
+ {
+ char *buff = ld->crash_reason.string;
+ void __user *user_buff = (void __user *)arg;
+
+ switch (ld->protocol) {
+ case PROTOCOL_SIPC:
+ if (arg)
+ ld->crash_reason.type = (u32)arg;
+ mif_err("%s: IOCTL_TRIGGER_CP_CRASH (%lu)\n",
+ iod->name, arg);
+ break;
+
+ case PROTOCOL_SIT:
+ ld->crash_reason.type =
+ CRASH_REASON_RIL_TRIGGER_CP_CRASH;
+
+ if (arg) {
+ if (copy_from_user(buff, user_buff, CP_CRASH_INFO_SIZE))
+ mif_info("No argument from USER\n");
+ } else
+ mif_info("No argument from USER\n");
+
+ mif_info("Crash Reason:%s\n", buff);
+ break;
+
+ default:
+ mif_err("ERR - unknown protocol\n");
+ break;
+ }
+
+ if (!mc->ops.trigger_cp_crash) {
+ mif_err("%s: trigger_cp_crash is null\n", iod->name);
+ return -EINVAL;
+ }
+
+ return mc->ops.trigger_cp_crash(mc);
+ }
+
+ case IOCTL_GET_OPENED_STATUS:
+ mif_debug("%s: IOCTL_GET_OPENED_STATUS\n", iod->name);
+ value = atomic_read(&iod->opened);
+ ret = copy_to_user((void __user *)arg, &value, sizeof(value));
+ if (ret)
+ mif_err("IOCTL_GET_OPENED_STATUS error: %d\n", ret);
+ return ret;
+
+ case IOCTL_LOAD_GNSS_IMAGE:
+ if (!ld->load_gnss_image) {
+ mif_err("%s: load_gnss_image is null\n", iod->name);
+ return -EINVAL;
+ }
+
+ mif_info_limited("%s: IOCTL_LOAD_GNSS_IMAGE\n", iod->name);
+
+ return ld->load_gnss_image(ld, iod, arg);
+
+ case IOCTL_READ_GNSS_IMAGE:
+ if (!ld->read_gnss_image) {
+ mif_err("%s: read_gnss_image is null\n", iod->name);
+ return -EINVAL;
+ }
+
+ mif_info_limited("%s: IOCTL_READ_GNSS_IMAGE\n", iod->name);
+ return ld->read_gnss_image(ld, iod, arg);
+
+ default:
+ /* If you need to handle the ioctl for specific link device,
+ * then assign the link ioctl handler to ld->ioctl
+ * It will be call for specific link ioctl
+ */
+ if (ld->ioctl)
+ return ld->ioctl(ld, iod, cmd, arg);
+
+ mif_info("%s: ERR! undefined cmd 0x%X\n", iod->name, cmd);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define INIT_END_WAIT_MS 150
+
+static ssize_t ipc_write(struct file *filp, const char __user *data,
+ size_t count, loff_t *fpos)
+{
+ struct io_device *iod = (struct io_device *)filp->private_data;
+ struct link_device *ld = get_current_link(iod);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ struct modem_ctl *mc = iod->mc;
+ u8 cfg = 0;
+ u16 cfg_sit = 0;
+ unsigned int headroom = 0;
+ unsigned int copied = 0, tot_frame = 0, copied_frm = 0;
+ /* 64bit prevent */
+ unsigned int cnt = (unsigned int)count;
+ struct timespec64 ts;
+ int curr_init_end_cnt;
+ int retry = 0;
+
+ /* Record the timestamp */
+ ktime_get_ts64(&ts);
+
+ if (iod->format <= IPC_RFS && iod->ch == 0)
+ return -EINVAL;
+
+ if (unlikely(!cp_online(mc)) && ld->is_ipc_ch(iod->ch)) {
+ mif_debug("%s: ERR! %s->state == %s\n",
+ iod->name, mc->name, mc_state(mc));
+ return -EPERM;
+ }
+
+ if (iod->link_header) {
+ switch (ld->protocol) {
+ case PROTOCOL_SIPC:
+ cfg = sipc5_build_config(iod, ld, cnt);
+ headroom = sipc5_get_hdr_len(&cfg);
+ break;
+ case PROTOCOL_SIT:
+ cfg_sit = exynos_build_fr_config(iod, ld, cnt);
+ headroom = EXYNOS_HEADER_SIZE;
+ break;
+ default:
+ mif_err("protocol error %d\n", ld->protocol);
+ return -EINVAL;
+ }
+ }
+
+ /* Wait for a while if a new CMD_INIT_END is sent */
+ while ((curr_init_end_cnt = atomic_read(&mld->init_end_cnt)) != mld->last_init_end_cnt &&
+ retry++ < 3) {
+ mif_info_limited("%s: wait for INIT_END done (%dms) cnt:%d last:%d cmd:0x%02X\n",
+ iod->name, INIT_END_WAIT_MS,
+ curr_init_end_cnt, mld->last_init_end_cnt,
+ mld->read_ap2cp_irq(mld));
+
+ if (atomic_inc_return(&mld->init_end_busy) > 1)
+ curr_init_end_cnt = -1;
+
+ msleep(INIT_END_WAIT_MS);
+ if (curr_init_end_cnt >= 0)
+ mld->last_init_end_cnt = curr_init_end_cnt;
+
+ atomic_dec(&mld->init_end_busy);
+ }
+
+ if (unlikely(!mld->last_init_end_cnt)) {
+ mif_err_limited("%s: INIT_END is not done\n", iod->name);
+ return -EAGAIN;
+ }
+
+ while (copied < cnt) {
+ struct sk_buff *skb;
+ char *buff;
+ unsigned int remains = cnt - copied;
+ unsigned int tailroom = 0;
+ unsigned int tx_bytes;
+ unsigned int alloc_size;
+ int ret;
+
+ if (check_add_overflow(remains, headroom, &alloc_size))
+ alloc_size = SZ_2K;
+
+ switch (ld->protocol) {
+ case PROTOCOL_SIPC:
+ if (iod->max_tx_size)
+ alloc_size = min_t(unsigned int, alloc_size,
+ iod->max_tx_size);
+ break;
+ case PROTOCOL_SIT:
+ alloc_size = min_t(unsigned int, alloc_size, SZ_2K);
+ break;
+ default:
+ mif_err("protocol error %d\n", ld->protocol);
+ return -EINVAL;
+ }
+
+ /* Calculate tailroom for padding size */
+ if (iod->link_header && ld->aligned)
+ tailroom = ld->calc_padding_size(alloc_size);
+
+ alloc_size += tailroom;
+
+ skb = alloc_skb(alloc_size, GFP_KERNEL);
+ if (!skb) {
+ mif_info("%s: ERR! alloc_skb fail (alloc_size:%d)\n",
+ iod->name, alloc_size);
+ return -ENOMEM;
+ }
+
+ tx_bytes = alloc_size - headroom - tailroom;
+
+ /* Reserve the space for a link header */
+ skb_reserve(skb, headroom);
+
+ /* Copy an IPC message from the user space to the skb */
+ buff = skb_put(skb, tx_bytes);
+ if (copy_from_user(buff, data + copied, tx_bytes)) {
+ mif_err("%s->%s: ERR! copy_from_user fail(count %lu)\n",
+ iod->name, ld->name, (unsigned long)count);
+ dev_kfree_skb_any(skb);
+ return -EFAULT;
+ }
+
+ /* Update size of copied payload */
+ copied += tx_bytes;
+ /* Update size of total frame included hdr, pad size */
+ tot_frame += alloc_size;
+
+ /* Store the IO device, the link device, etc. */
+ skbpriv(skb)->iod = iod;
+ skbpriv(skb)->ld = ld;
+
+ skbpriv(skb)->lnk_hdr = iod->link_header;
+ skbpriv(skb)->sipc_ch = iod->ch;
+
+ /* Copy the timestamp to the skb */
+ skbpriv(skb)->ts = ts;
+#ifdef DEBUG_MODEM_IF_IODEV_TX
+ mif_pkt(iod->ch, "IOD-TX", skb);
+#endif
+
+ /* Build SIPC5 link header*/
+ if (cfg || cfg_sit) {
+ buff = skb_push(skb, headroom);
+
+ switch (ld->protocol) {
+ case PROTOCOL_SIPC:
+ sipc5_build_header(iod, buff, cfg,
+ tx_bytes, cnt - copied);
+ break;
+ case PROTOCOL_SIT:
+ exynos_build_header(iod, ld, buff, cfg_sit, 0, tx_bytes);
+ /* modify next link header for multiframe */
+ if (((cfg_sit >> 8) & EXYNOS_SINGLE_MASK) != EXYNOS_SINGLE_MASK)
+ cfg_sit = modify_next_frame(cfg_sit);
+ break;
+ default:
+ mif_err("protocol error %d\n", ld->protocol);
+ return -EINVAL;
+ }
+ }
+
+ /* Apply padding */
+ if (tailroom)
+ skb_put(skb, tailroom);
+
+ /**
+ * Send the skb with a link device
+ */
+ ret = ld->send(ld, iod, skb);
+ if (ret < 0) {
+ mif_err("%s->%s: %s->send fail(%d, tx:%d len:%lu)\n",
+ iod->name, mc->name, ld->name,
+ ret, tx_bytes, (unsigned long)count);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ copied_frm += ret;
+ }
+
+ if (copied_frm != tot_frame) {
+ mif_info("%s->%s: WARN! %s->send ret:%d (len:%lu)\n",
+ iod->name, mc->name, ld->name,
+ copied_frm, (unsigned long)count);
+ }
+
+ return count;
+}
+
+static ssize_t ipc_read(struct file *filp, char *buf, size_t count,
+ loff_t *fpos)
+{
+ struct io_device *iod = (struct io_device *)filp->private_data;
+ struct sk_buff_head *rxq = &iod->sk_rx_q;
+ struct sk_buff *skb;
+ int copied;
+
+ if (skb_queue_empty(rxq)) {
+ long tmo = msecs_to_jiffies(100);
+
+ wait_event_timeout(iod->wq, !skb_queue_empty(rxq), tmo);
+ }
+
+ skb = skb_dequeue(rxq);
+ if (unlikely(!skb)) {
+ mif_info("%s: NO data in RXQ\n", iod->name);
+ return 0;
+ }
+
+ copied = skb->len > count ? count : skb->len;
+
+ if (copy_to_user(buf, skb->data, copied)) {
+ mif_err("%s: ERR! copy_to_user fail\n", iod->name);
+ dev_kfree_skb_any(skb);
+ return -EFAULT;
+ }
+
+ if (iod->ch == SIPC_CH_ID_CPLOG1) {
+ struct net_device *ndev = iod->ndev;
+
+ if (!ndev) {
+ mif_err("%s: ERR! no iod->ndev\n", iod->name);
+ } else {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += copied;
+ }
+ }
+
+#ifdef DEBUG_MODEM_IF_IODEV_RX
+ mif_pkt(iod->ch, "IOD-RX", skb);
+#endif
+ mif_debug("%s: data:%d copied:%d qlen:%d\n",
+ iod->name, skb->len, copied, rxq->qlen);
+
+ if (skb->len > copied) {
+ skb_pull(skb, copied);
+ skb_queue_head(rxq, skb);
+ } else {
+ dev_consume_skb_any(skb);
+ }
+
+ return copied;
+}
+
+
+const struct file_operations ipc_io_fops = {
+ .owner = THIS_MODULE,
+ .open = ipc_open,
+ .release = ipc_release,
+ .poll = ipc_poll,
+ .unlocked_ioctl = ipc_ioctl,
+ .compat_ioctl = ipc_ioctl,
+ .write = ipc_write,
+ .read = ipc_read,
+};
+
+const struct file_operations *get_ipc_io_fops(void)
+{
+ return &ipc_io_fops;
+}
+
diff --git a/link_device.c b/link_device.c
new file mode 100644
index 0000000..a94dad2
--- /dev/null
+++ b/link_device.c
@@ -0,0 +1,4444 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010 Samsung Electronics.
+ *
+ */
+
+#include <linux/irq.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/platform_device.h>
+#include <linux/kallsyms.h>
+#include <linux/shm_ipc.h>
+#include <linux/suspend.h>
+#include <linux/reboot.h>
+#include <linux/pci.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/icmp.h>
+#include <net/xfrm.h>
+#if IS_ENABLED(CONFIG_ECT)
+#include <soc/google/ect_parser.h>
+#endif
+#include <soc/google/cal-if.h>
+#include <soc/google/modem_notifier.h>
+#include <linux/soc/samsung/exynos-smc.h>
+#include <trace/events/napi.h>
+#include "mcu_ipc.h"
+#include "modem_prj.h"
+#include "modem_utils.h"
+#include "link_device.h"
+#include "modem_dump.h"
+#include "modem_ctrl.h"
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE)
+#include "s51xx_pcie.h"
+#endif
+#if IS_ENABLED(CONFIG_EXYNOS_DIT)
+#include "dit.h"
+#endif
+#include "direct_dm.h"
+
+#define MIF_TX_QUOTA 64
+
+enum smc_error_flag {
+ CP_NO_ERROR = 0,
+ CP_NOT_ALIGN_64KB,
+ CP_MEM_TOO_BIG,
+ CP_FLAG_OUT_RANGE,
+ CP_WRONG_TZASC_REGION_NUM,
+ CP_WRONG_BL_SIZE = 5,
+ CP_MEM_OUT_OF_RANGE,
+ CP_NOT_ALIGN_16B,
+ CP_MEM_IN_SECURE_DRAM,
+ CP_ASP_ENABLE_FAIL,
+ CP_ASP_DISABLE_FAIL = 10,
+ CP_NOT_WORKING,
+ CP_ALREADY_WORKING,
+ CP_ALREADY_DUMP_MODE,
+ CP_NOT_VALID_MAGIC,
+ CP_SHOULD_BE_DISABLE = 15,
+ CP_ALREADY_ENABLE_CPMEM_ON,
+ CP_ALREADY_SET_WND,
+ CP_FAIL_TO_SET_WND,
+ CP_INVALID_CP_BASE,
+ CP_CORRUPTED_CP_MEM_INFO = 20,
+ CP_WHILE_CHECKING_SIGN,
+ CP_NOT_WHILE_CHECKING_SIGN,
+ CP_IS_IN_INVALID_STATE,
+ CP_IS_IN_INVALID_STATE2,
+ CP_ERR_WHILE_CP_SIGN_CHECK,
+
+ CP_CHECK_SIGN_NOT_FINISH = 0x100
+};
+
+static inline void start_tx_timer(struct mem_link_device *mld,
+ struct hrtimer *timer);
+
+#if IS_ENABLED(CONFIG_EXYNOS_CPIF_IOMMU)
+#define SYSMMU_BAAW_SIZE 0x8000000
+#endif
+
+/*============================================================================*/
+static inline void purge_txq(struct mem_link_device *mld)
+{
+ struct link_device *ld = &mld->link_dev;
+ int i;
+
+ /* Purge the skb_txq in every rb */
+ if (ld->sbd_ipc) {
+ struct sbd_link_device *sl = &mld->sbd_link_dev;
+
+ for (i = 0; i < sl->num_channels; i++) {
+ struct sbd_ring_buffer *rb = sbd_id2rb(sl, i, TX);
+
+ skb_queue_purge(&rb->skb_q);
+ }
+ }
+
+ /* Purge the skb_txq in every IPC device
+ * (IPC_MAP_FMT, IPC_MAP_NORM_RAW, etc.)
+ */
+ for (i = 0; i < IPC_MAP_MAX; i++) {
+ struct legacy_ipc_device *dev = mld->legacy_link_dev.dev[i];
+
+ skb_queue_purge(dev->skb_txq);
+ }
+}
+
+/*============================================================================*/
+static void shmem_handle_cp_crash(struct mem_link_device *mld,
+ enum modem_state state)
+{
+ struct link_device *ld = &mld->link_dev;
+ struct modem_ctl *mc = ld->mc;
+
+ /* Disable normal IPC */
+ iowrite32(ld->magic_crash, mld->legacy_link_dev.magic);
+ iowrite32(0, mld->legacy_link_dev.mem_access);
+
+#if IS_ENABLED(CONFIG_CPIF_TP_MONITOR)
+ tpmon_stop();
+#endif
+
+ stop_net_ifaces(ld, 0);
+ purge_txq(mld);
+
+ if (cp_online(mc)) {
+ switch (state) {
+ case STATE_CRASH_RESET:
+ modem_notify_event(MODEM_EVENT_RESET, mc);
+ break;
+ case STATE_CRASH_EXIT:
+ modem_notify_event(MODEM_EVENT_EXIT, mc);
+ break;
+ case STATE_CRASH_WATCHDOG:
+ modem_notify_event(MODEM_EVENT_WATCHDOG, mc);
+ break;
+ default:
+ mif_err("Invalid state to notify\n");
+ break;
+ }
+ }
+
+ if (cp_online(mc) || cp_booting(mc))
+ change_modem_state(mc, state);
+
+ atomic_set(&mld->forced_cp_crash, 0);
+}
+
+static void handle_no_cp_crash_ack(struct timer_list *t)
+{
+ struct mem_link_device *mld = from_timer(mld, t, crash_ack_timer);
+ struct link_device *ld = &mld->link_dev;
+ struct modem_ctl *mc = ld->mc;
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE) && IS_ENABLED(CONFIG_CP_WRESET_WA)
+ if (ld->link_type == LINKDEV_PCIE) {
+ if (mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_CP2AP_CP_ACTIVE], true) == 0)
+ mc->s5100_cp_reset_required = false;
+ else
+ mc->s5100_cp_reset_required = true;
+
+ mif_info("Set s5100_cp_reset_required to %u\n", mc->s5100_cp_reset_required);
+ }
+#endif
+
+ if (cp_crashed(mc))
+ mif_debug("%s: STATE_CRASH_EXIT without CRASH_ACK\n", ld->name);
+ else {
+ mif_err("%s: ERR! No CRASH_ACK from CP\n", ld->name);
+ shmem_handle_cp_crash(mld, STATE_CRASH_EXIT);
+ }
+}
+
+static void link_trigger_cp_crash(struct mem_link_device *mld, u32 crash_type,
+ char *reason)
+{
+ struct link_device *ld = &mld->link_dev;
+ struct modem_ctl *mc = ld->mc;
+ bool reason_done = false;
+
+ if (!cp_online(mc) && !cp_booting(mc)) {
+ mif_err("%s: %s.state %s != ONLINE <%ps>\n",
+ ld->name, mc->name, mc_state(mc), CALLER);
+ return;
+ }
+
+ if (atomic_inc_return(&mld->forced_cp_crash) > 1) {
+ mif_err("%s: ALREADY in progress <%ps>\n", ld->name, CALLER);
+ return;
+ }
+
+ /* Disable normal IPC */
+ iowrite32(ld->magic_crash, mld->legacy_link_dev.magic);
+ iowrite32(0, mld->legacy_link_dev.mem_access);
+
+ mif_stop_logging();
+
+ switch (ld->protocol) {
+ case PROTOCOL_SIPC:
+ case PROTOCOL_SIT:
+ break;
+ default:
+ mif_err("ERR - unknown protocol\n");
+ goto set_type;
+ }
+
+ switch (crash_type) {
+ case CRASH_REASON_MIF_TX_ERR:
+ case CRASH_REASON_MIF_RIL_BAD_CH:
+ case CRASH_REASON_MIF_RX_BAD_DATA:
+ case CRASH_REASON_MIF_FORCED:
+ break;
+ case CRASH_REASON_USER:
+ case CRASH_REASON_CLD:
+ if (ld->protocol != PROTOCOL_SIPC)
+ goto set_type;
+ break;
+ case CRASH_REASON_RIL_TRIGGER_CP_CRASH:
+ if (ld->protocol != PROTOCOL_SIT)
+ goto set_type;
+ reason_done = true;
+ break;
+ default:
+ goto set_type;
+ }
+
+ if (!reason_done && reason && reason[0] != '\0') {
+ strlcpy(ld->crash_reason.string, reason, CP_CRASH_INFO_SIZE);
+ reason_done = true;
+ }
+
+set_type:
+ if (!reason_done)
+ memset(ld->crash_reason.string, 0, CP_CRASH_INFO_SIZE);
+
+ ld->crash_reason.type = crash_type;
+
+ stop_net_ifaces(ld, 0);
+
+ if (mld->debug_info)
+ mld->debug_info();
+
+ /**
+ * If there is no CRASH_ACK from CP in a timeout,
+ * handle_no_cp_crash_ack() will be executed.
+ */
+ mif_add_timer(&mld->crash_ack_timer, FORCE_CRASH_ACK_TIMEOUT,
+ handle_no_cp_crash_ack);
+
+ update_ctrl_msg(&mld->ap2cp_united_status, ld->crash_reason.type,
+ mc->sbi_crash_type_mask, mc->sbi_crash_type_pos);
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_SHMEM)
+ if (ld->interrupt_types == INTERRUPT_MAILBOX) {
+ /* Send CRASH_EXIT command to a CP */
+ send_ipc_irq_debug(mld, cmd2int(CMD_CRASH_EXIT));
+ }
+#endif
+
+#if IS_ENABLED(CONFIG_SEC_MODEM_S5100)
+ if (ld->interrupt_types == INTERRUPT_GPIO)
+ /* Raise DUMP_NOTI GPIO to CP */
+ s5100_force_crash_exit_ext(crash_type);
+#endif
+
+ mif_err("%s->%s: CP_CRASH_REQ by %d, %s <%ps>\n",
+ ld->name, mc->name, ld->crash_reason.type,
+ ld->crash_reason.string, CALLER);
+}
+
+static bool rild_ready(struct link_device *ld)
+{
+ struct io_device *fmt_iod;
+ struct io_device *rfs_iod;
+ int fmt_opened;
+ int rfs_opened;
+
+ switch (ld->protocol) {
+ case PROTOCOL_SIT:
+ return true;
+ default:
+ fmt_iod = link_get_iod_with_channel(ld, ld->chid_fmt_0);
+ if (!fmt_iod) {
+ mif_err("%s: No FMT io_device\n", ld->name);
+ return false;
+ }
+
+ rfs_iod = link_get_iod_with_channel(ld, ld->chid_rfs_0);
+ if (!rfs_iod) {
+ mif_err("%s: No RFS io_device\n", ld->name);
+ return false;
+ }
+
+ fmt_opened = atomic_read(&fmt_iod->opened);
+ rfs_opened = atomic_read(&rfs_iod->opened);
+ mif_err_limited("%s: %s.opened=%d, %s.opened=%d\n", ld->name,
+ fmt_iod->name, fmt_opened, rfs_iod->name, rfs_opened);
+ if (fmt_opened > 0 && rfs_opened > 0)
+ return true;
+
+ return false;
+ }
+}
+
+static void write_clk_table_to_shmem(struct mem_link_device *mld)
+{
+ struct clock_table *clk_tb;
+ u32 *clk_data;
+ int i, j;
+
+ if (mld->clk_table == NULL) {
+ mif_err("clk_table is not defined\n");
+ return;
+ }
+
+ clk_tb = (struct clock_table *)mld->clk_table;
+
+ strcpy(clk_tb->parser_version, "CT1");
+ clk_tb->total_table_count = mld->total_freq_table_count;
+
+ memcpy(clk_tb->table_info[0].table_name, "MIF\0", 4);
+ clk_tb->table_info[0].table_count = mld->mif_table.num_of_table;
+
+ memcpy(clk_tb->table_info[1].table_name, "CP_C", 4);
+ clk_tb->table_info[1].table_count = mld->cp_cpu_table.num_of_table;
+
+ memcpy(clk_tb->table_info[2].table_name, "CP\0", 4);
+ clk_tb->table_info[2].table_count = mld->cp_table.num_of_table;
+
+ memcpy(clk_tb->table_info[3].table_name, "CP_E", 4);
+ clk_tb->table_info[3].table_count = mld->cp_em_table.num_of_table;
+
+ memcpy(clk_tb->table_info[4].table_name, "CP_M", 4);
+ clk_tb->table_info[4].table_count = mld->cp_mcw_table.num_of_table;
+
+
+ clk_data = (u32 *)&(clk_tb->table_info[clk_tb->total_table_count]);
+
+ /* MIF */
+ for (i = 0; i < mld->mif_table.num_of_table; i++) {
+ *clk_data = mld->mif_table.freq[i];
+ clk_data++;
+ }
+
+ /* CP_CPU */
+ for (i = 0; i < mld->cp_cpu_table.num_of_table; i++) {
+ *clk_data = mld->cp_cpu_table.freq[i];
+ clk_data++;
+ }
+
+ /* CP */
+ for (i = 0; i < mld->cp_table.num_of_table; i++) {
+ *clk_data = mld->cp_table.freq[i];
+ clk_data++;
+ }
+
+ /* CP_EM */
+ for (i = 0; i < mld->cp_em_table.num_of_table; i++) {
+ *clk_data = mld->cp_em_table.freq[i];
+ clk_data++;
+ }
+
+ /* CP_MCW */
+ for (i = 0; i < mld->cp_mcw_table.num_of_table; i++) {
+ *clk_data = mld->cp_mcw_table.freq[i];
+ clk_data++;
+ }
+
+ mif_info("PARSER_VERSION: %s\n", clk_tb->parser_version);
+ mif_info("TOTAL_TABLE_COUNT: %d\n", clk_tb->total_table_count);
+
+ for (i = 0; i < clk_tb->total_table_count; i++) {
+ mif_info("TABLE_NAME[%d] : %s\n", i+1,
+ clk_tb->table_info[i].table_name);
+ mif_info("TABLE_COUNT[%d]: %d\n", i+1,
+ clk_tb->table_info[i].table_count);
+ }
+
+ clk_data = (u32 *)&(clk_tb->table_info[clk_tb->total_table_count]);
+
+ for (i = 0; i < clk_tb->total_table_count; i++) {
+ for (j = 0; j < clk_tb->table_info[i].table_count; j++) {
+ mif_info("CLOCK_TABLE[%d][%d] : %d\n",
+ i+1, j+1, *clk_data);
+ clk_data++;
+ }
+ }
+}
+
+static void set_ap_capabilities(struct mem_link_device *mld)
+{
+ int part;
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC_UL)
+ cpif_set_bit(mld->ap_capability[0], AP_CAP_0_PKTPROC_UL_BIT);
+#endif
+#if IS_ENABLED(CONFIG_CH_EXTENSION)
+ cpif_set_bit(mld->ap_capability[0], AP_CAP_0_CH_EXTENSION_BIT);
+#endif
+ if (mld->pktproc_use_36bit_addr)
+ cpif_set_bit(mld->ap_capability[0], AP_CAP_0_PKTPROC_36BIT_ADDR_BIT);
+
+ for (part = 0; part < AP_CP_CAP_PARTS; part++) {
+ iowrite32(mld->ap_capability[part], mld->ap_capability_offset[part]);
+
+ mif_info("capability part:%d AP:0x%08x\n", part, mld->ap_capability[part]);
+ }
+}
+
+static int init_ap_capabilities(struct mem_link_device *mld, int part)
+{
+ int cap;
+ int ret = 0;
+
+ if (!mld->ap_capability[part])
+ goto out;
+
+ for (cap = 0; cap < AP_CP_CAP_BIT_MAX; cap++) {
+ if (!cpif_check_bit(mld->ap_capability[part], cap))
+ continue;
+
+ /* should handle the matched capability */
+ ret = -EINVAL;
+
+ if (part == 0) {
+ switch (cap) {
+ case AP_CAP_0_PKTPROC_UL_BIT:
+#if IS_ENABLED(CONFIG_CP_PKTPROC_UL)
+ ret = pktproc_init_ul(&mld->pktproc_ul);
+ if (ret)
+ mif_err("pktproc_init_ul() ret:%d\n", ret);
+#endif
+ break;
+ case AP_CAP_0_CH_EXTENSION_BIT:
+ case AP_CAP_0_PKTPROC_36BIT_ADDR_BIT:
+ ret = 0;
+ break;
+ default:
+ mif_err("unsupported capability part:%d cap:%d\n", part, cap);
+ break;
+ }
+ }
+
+ if (ret)
+ break;
+ }
+
+out:
+ return ret;
+}
+
+static void cmd_init_start_handler(struct mem_link_device *mld)
+{
+ struct link_device *ld = &mld->link_dev;
+ struct modem_ctl *mc = ld->mc;
+ int err;
+
+ mif_info("%s: INIT_START <- %s (%s.state:%s init_end_cnt:%d)\n",
+ ld->name, mc->name, mc->name, mc_state(mc),
+ atomic_read(&mld->init_end_cnt));
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC)
+ err = pktproc_init(&mld->pktproc);
+ if (err < 0) {
+ mif_err("pktproc_init() error %d\n", err);
+ return;
+ }
+#endif
+
+#if IS_ENABLED(CONFIG_EXYNOS_DIT)
+ err = dit_init(ld, DIT_INIT_NORMAL, DIT_STORE_NONE);
+ if ((err < 0) && (err != -EPERM)) {
+ mif_err("dit_init() error %d\n", err);
+ return;
+ }
+#endif
+
+#if IS_ENABLED(CONFIG_CPIF_DIRECT_DM)
+ err = direct_dm_init(ld);
+ if (err < 0) {
+ mif_err("direct_dm_init() error %d\n", err);
+ return;
+ }
+#endif
+
+#if IS_ENABLED(CONFIG_CPIF_TP_MONITOR)
+ tpmon_init();
+#endif
+
+ toe_dev_init(mld);
+
+ if (ld->capability_check)
+ set_ap_capabilities(mld);
+
+ if (!ld->sbd_ipc) {
+ mif_err("%s: LINK_ATTR_SBD_IPC is NOT set\n", ld->name);
+ goto init_exit;
+ }
+
+ err = init_sbd_link(&mld->sbd_link_dev);
+ if (err < 0) {
+ mif_err("%s: init_sbd_link fail(%d)\n", ld->name, err);
+ return;
+ }
+
+ if (mld->attrs & LINK_ATTR_IPC_ALIGNED)
+ ld->aligned = true;
+ else
+ ld->aligned = false;
+
+ sbd_activate(&mld->sbd_link_dev);
+
+init_exit:
+ send_ipc_irq(mld, cmd2int(CMD_PIF_INIT_DONE));
+
+ mif_info("%s: PIF_INIT_DONE -> %s\n", ld->name, mc->name);
+}
+
+#define PHONE_START_IRQ_MARGIN 4
+#define PHONE_START_ACK_MARGIN 5
+static void cmd_phone_start_handler(struct mem_link_device *mld)
+{
+ static int phone_start_count;
+ struct link_device *ld = &mld->link_dev;
+ struct modem_ctl *mc = ld->mc;
+ unsigned long flags;
+ int err;
+
+ mif_info_limited("%s: PHONE_START <- %s (%s.state:%s init_end_cnt:%d)\n",
+ ld->name, mc->name, mc->name, mc_state(mc),
+ atomic_read(&mld->init_end_cnt));
+
+ if (mld->state == LINK_STATE_OFFLINE)
+ phone_start_count = 0;
+
+ if (atomic_read(&mld->init_end_cnt)) {
+ mif_err_limited("Abnormal PHONE_START from CP\n");
+
+ if (++phone_start_count > PHONE_START_IRQ_MARGIN) {
+ int ack_count = phone_start_count -
+ PHONE_START_IRQ_MARGIN;
+
+ if (ack_count > PHONE_START_ACK_MARGIN) {
+ link_trigger_cp_crash(mld,
+ CRASH_REASON_CP_RSV_0,
+ "Abnormal PHONE_START from CP");
+ return;
+ }
+
+ mif_err("%s: CMD(0x%x) -> %s\n", ld->name,
+ cmd2int(ack_count), mc->name);
+ send_ipc_irq_debug(mld, cmd2int(ack_count));
+
+ return;
+ }
+ }
+
+ spin_lock_irqsave(&mld->state_lock, flags);
+
+ if (mld->state == LINK_STATE_IPC) {
+ /*
+ * If there is no INIT_END command from AP, CP sends a CP_START
+ * command to AP periodically until it receives INIT_END from AP
+ * even though it has already been in ONLINE state.
+ */
+ if (rild_ready(ld)) {
+ mif_info("%s: INIT_END -> %s\n", ld->name, mc->name);
+ atomic_inc(&mld->init_end_cnt);
+ send_ipc_irq_debug(mld, cmd2int(CMD_INIT_END));
+ }
+ goto exit;
+ }
+
+ if (ld->capability_check) {
+ int part;
+
+ for (part = 0; part < AP_CP_CAP_PARTS; part++) {
+ /* get cp_capability */
+ mld->cp_capability[part] = ioread32(mld->cp_capability_offset[part]);
+
+ if ((mld->ap_capability[part] ^ mld->cp_capability[part]) &
+ mld->ap_capability[part]) {
+ /* if at least one feature is owned by AP only, crash CP */
+ mif_err("ERR! capability part:%d AP:0x%08x CP:0x%08x\n",
+ part, mld->ap_capability[part], mld->cp_capability[part]);
+ goto capability_failed;
+ }
+
+ mif_info("capability part:%d AP:0x%08x CP:0x%08x\n",
+ part, mld->ap_capability[part], mld->cp_capability[part]);
+
+ err = init_ap_capabilities(mld, part);
+ if (err) {
+ mif_err("%s: init_ap_capabilities part:%d fail(%d)\n",
+ ld->name, part, err);
+ goto exit;
+ }
+ }
+ }
+
+ err = init_legacy_link(&mld->legacy_link_dev);
+ if (err) {
+ mif_err("%s: init_legacy_link fail(%d)\n", ld->name, err);
+ goto exit;
+ }
+
+ atomic_set(&ld->netif_stopped, 0);
+ ld->tx_flowctrl_mask = 0;
+
+ if (rild_ready(ld)) {
+ mif_info("%s: INIT_END -> %s\n", ld->name, mc->name);
+ atomic_inc(&mld->init_end_cnt);
+ send_ipc_irq_debug(mld, cmd2int(CMD_INIT_END));
+ }
+
+#if IS_ENABLED(CONFIG_MCU_IPC)
+ if (mld->ap2cp_msg.type == MAILBOX_SR)
+ cp_mbox_dump_sr();
+#endif
+
+ ld->crash_reason.type = CRASH_REASON_NONE;
+ memset(ld->crash_reason.string, 0, CP_CRASH_INFO_SIZE);
+ mif_info("Set crash_reason type:%d\n", ld->crash_reason.type);
+
+ if ((ld->protocol == PROTOCOL_SIT) && (ld->link_type == LINKDEV_SHMEM))
+ write_clk_table_to_shmem(mld);
+
+ mld->state = LINK_STATE_IPC;
+ complete_all(&mc->init_cmpl);
+ modem_notify_event(MODEM_EVENT_ONLINE, mc);
+
+exit:
+ if (ld->sbd_ipc)
+ start_tx_timer(mld, &mld->sbd_print_timer);
+ spin_unlock_irqrestore(&mld->state_lock, flags);
+ return;
+
+capability_failed:
+ spin_unlock_irqrestore(&mld->state_lock, flags);
+ link_trigger_cp_crash(mld, CRASH_REASON_MIF_FORCED, "CP lacks capability\n");
+ return;
+}
+
+static void cmd_crash_reset_handler(struct mem_link_device *mld)
+{
+ struct link_device *ld = &mld->link_dev;
+ struct modem_ctl *mc = ld->mc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mld->state_lock, flags);
+ mld->state = LINK_STATE_OFFLINE;
+ if (ld->crash_reason.type == CRASH_REASON_NONE)
+ ld->crash_reason.type = CRASH_REASON_CP_ACT_CRASH;
+ spin_unlock_irqrestore(&mld->state_lock, flags);
+
+ mif_err("%s<-%s: ERR! CP_CRASH_RESET\n", ld->name, mc->name);
+
+ shmem_handle_cp_crash(mld, STATE_CRASH_RESET);
+}
+
+static void cmd_crash_exit_handler(struct mem_link_device *mld)
+{
+ struct link_device *ld = &mld->link_dev;
+ struct modem_ctl *mc = ld->mc;
+ unsigned long flags;
+
+ mif_stop_logging();
+
+ spin_lock_irqsave(&mld->state_lock, flags);
+ mld->state = LINK_STATE_CP_CRASH;
+ if (ld->crash_reason.type == CRASH_REASON_NONE)
+ ld->crash_reason.type = CRASH_REASON_CP_ACT_CRASH;
+ spin_unlock_irqrestore(&mld->state_lock, flags);
+
+ if (timer_pending(&mld->crash_ack_timer))
+ del_timer(&mld->crash_ack_timer);
+
+ if (atomic_read(&mld->forced_cp_crash))
+ mif_err("%s<-%s: CP_CRASH_ACK\n", ld->name, mc->name);
+ else
+ mif_err("%s<-%s: ERR! CP_CRASH_EXIT\n", ld->name, mc->name);
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE) && IS_ENABLED(CONFIG_CP_WRESET_WA)
+ if (ld->link_type == LINKDEV_PCIE) {
+ if (mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_CP2AP_CP_ACTIVE], true) == 0)
+ mc->s5100_cp_reset_required = false;
+ else
+ mc->s5100_cp_reset_required = true;
+
+ mif_info("Set s5100_cp_reset_required to %u\n", mc->s5100_cp_reset_required);
+ }
+#endif
+
+ shmem_handle_cp_crash(mld, STATE_CRASH_EXIT);
+}
+
+static void shmem_cmd_handler(struct mem_link_device *mld, u16 cmd)
+{
+ struct link_device *ld = &mld->link_dev;
+
+ switch (cmd) {
+ case CMD_INIT_START:
+ cmd_init_start_handler(mld);
+ break;
+
+ case CMD_PHONE_START:
+ cmd_phone_start_handler(mld);
+ break;
+
+ case CMD_CRASH_RESET:
+ cmd_crash_reset_handler(mld);
+ break;
+
+ case CMD_CRASH_EXIT:
+ cmd_crash_exit_handler(mld);
+ break;
+
+ default:
+ mif_err("%s: Unknown command 0x%04X\n", ld->name, cmd);
+ break;
+ }
+}
+
+/*============================================================================*/
+static inline int check_txq_space(struct mem_link_device *mld,
+ struct legacy_ipc_device *dev,
+ unsigned int qsize, unsigned int in,
+ unsigned int out, unsigned int count)
+{
+ struct link_device *ld = &mld->link_dev;
+ struct modem_ctl *mc = ld->mc;
+ unsigned int space;
+
+ if (!circ_valid(qsize, in, out)) {
+ mif_err_limited("%s: ERR! Invalid %s_TXQ{qsize:%d in:%d out:%d}\n",
+ ld->name, dev->name, qsize, in, out);
+ return -EIO;
+ }
+
+ space = circ_get_space(qsize, in, out);
+ if (unlikely(space < count)) {
+ if (cp_online(mc)) {
+ mif_err_limited("%s: NOSPC %s_TX{qsize:%d in:%d out:%d space:%d len:%d}\n",
+ ld->name, dev->name, qsize,
+ in, out, space, count);
+ }
+ return -ENOSPC;
+ }
+
+ return space;
+}
+
+static int txq_write(struct mem_link_device *mld, struct legacy_ipc_device *dev,
+ struct sk_buff *skb)
+{
+ char *src = skb->data;
+ unsigned int count = skb->len;
+ char *dst = get_txq_buff(dev);
+ unsigned int qsize = get_txq_buff_size(dev);
+ unsigned int in = get_txq_head(dev);
+ unsigned int out = get_txq_tail(dev);
+ int space;
+
+ space = check_txq_space(mld, dev, qsize, in, out, count);
+ if (unlikely(space < 0))
+ return space;
+
+ barrier();
+
+ circ_write(dst, src, qsize, in, count);
+
+ barrier();
+
+ set_txq_head(dev, circ_new_ptr(qsize, in, count));
+
+ /* Commit the item before incrementing the head */
+ smp_mb();
+
+ return count;
+}
+
+static int tx_frames_to_dev(struct mem_link_device *mld,
+ struct legacy_ipc_device *dev)
+{
+ struct sk_buff_head *skb_txq = dev->skb_txq;
+ int tx_bytes = 0;
+ int ret = 0;
+
+ while (1) {
+ struct sk_buff *skb;
+
+ skb = skb_dequeue(skb_txq);
+ if (unlikely(!skb))
+ break;
+
+ ret = txq_write(mld, dev, skb);
+ if (unlikely(ret < 0)) {
+ /* Take the skb back to the skb_txq */
+ skb_queue_head(skb_txq, skb);
+ break;
+ }
+
+ tx_bytes += ret;
+
+#ifdef DEBUG_MODEM_IF_LINK_TX
+ mif_pkt(skbpriv(skb)->sipc_ch, "LNK-TX", skb);
+#endif
+
+ dev_consume_skb_any(skb);
+ }
+
+ return (ret < 0) ? ret : tx_bytes;
+}
+
+static enum hrtimer_restart tx_timer_func(struct hrtimer *timer)
+{
+ struct mem_link_device *mld;
+ struct link_device *ld;
+ struct modem_ctl *mc;
+ int i;
+ bool need_schedule;
+ u16 mask;
+ unsigned long flags;
+
+ mld = container_of(timer, struct mem_link_device, tx_timer);
+ ld = &mld->link_dev;
+ mc = ld->mc;
+
+ need_schedule = false;
+ mask = 0;
+
+ spin_lock_irqsave(&mc->lock, flags);
+ if (unlikely(!ipc_active(mld)))
+ goto exit;
+
+ for (i = 0; i < IPC_MAP_MAX; i++) {
+ struct legacy_ipc_device *dev = mld->legacy_link_dev.dev[i];
+ int ret;
+
+ ret = txq_check_busy(mld, dev);
+ if (ret) {
+ need_schedule = true;
+ continue;
+ }
+
+ ret = tx_frames_to_dev(mld, dev);
+ if (unlikely(ret < 0)) {
+ if (ret == -EBUSY || ret == -ENOSPC) {
+ need_schedule = true;
+ txq_stop(mld, dev);
+ /* If txq has 2 or more packet and 2nd packet
+ * has -ENOSPC return, It request irq to consume
+ * the TX ring-buffer from CP
+ */
+ mask |= msg_mask(dev);
+ continue;
+ } else {
+ link_trigger_cp_crash(mld, CRASH_REASON_MIF_TX_ERR,
+ "tx_frames_to_dev error");
+ need_schedule = false;
+ goto exit;
+ }
+ }
+
+ if (ret > 0)
+ mask |= msg_mask(dev);
+
+ if (!skb_queue_empty(dev->skb_txq))
+ need_schedule = true;
+ }
+
+ if (mask)
+ send_ipc_irq(mld, mask2int(mask));
+
+exit:
+ if (need_schedule) {
+ ktime_t ktime = ktime_set(0, mld->tx_period_ns);
+
+ hrtimer_start(timer, ktime, HRTIMER_MODE_REL);
+ }
+ spin_unlock_irqrestore(&mc->lock, flags);
+
+ return HRTIMER_NORESTART;
+}
+
+static int tx_func(struct mem_link_device *mld, struct hrtimer *timer,
+ struct legacy_ipc_device *dev, struct sk_buff *skb)
+{
+ struct link_device *ld = &mld->link_dev;
+ struct modem_ctl *mc = ld->mc;
+ struct sk_buff_head *skb_txq = dev->skb_txq;
+ bool need_schedule = false;
+ u16 mask = msg_mask(dev);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&mc->lock, flags);
+ if (unlikely(!ipc_active(mld))) {
+ spin_unlock_irqrestore(&mc->lock, flags);
+ dev_kfree_skb_any(skb);
+ goto exit;
+ }
+ spin_unlock_irqrestore(&mc->lock, flags);
+
+ ret = txq_write(mld, dev, skb);
+ if (unlikely(ret < 0)) {
+ if (ret == -EBUSY || ret == -ENOSPC) {
+ skb_queue_head(skb_txq, skb);
+ need_schedule = true;
+ txq_stop(mld, dev);
+ /* If txq has 2 or more packet and 2nd packet
+ * has -ENOSPC return, It request irq to consume
+ * the TX ring-buffer from CP
+ */
+ send_ipc_irq(mld, mask2int(mask));
+ } else {
+ link_trigger_cp_crash(mld, CRASH_REASON_MIF_TX_ERR,
+ "tx_frames_to_dev error");
+ need_schedule = false;
+ }
+ goto exit;
+ }
+
+#ifdef DEBUG_MODEM_IF_LINK_TX
+ mif_pkt(skbpriv(skb)->sipc_ch, "LNK-TX", skb);
+#endif
+
+ dev_consume_skb_any(skb);
+
+ send_ipc_irq(mld, mask2int(mask));
+
+exit:
+ if (need_schedule) {
+ ktime_t ktime = ktime_set(0, mld->tx_period_ns);
+
+ hrtimer_start(timer, ktime, HRTIMER_MODE_REL);
+
+ return -1;
+ } else
+ return 1;
+}
+
+static inline void start_tx_timer_custom(struct mem_link_device *mld,
+ struct hrtimer *timer, unsigned int interval)
+{
+ struct link_device *ld = &mld->link_dev;
+ struct modem_ctl *mc = ld->mc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mc->lock, flags);
+ if (unlikely(cp_offline(mc))) {
+ spin_unlock_irqrestore(&mc->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&mc->lock, flags);
+
+ spin_lock_irqsave(&mc->tx_timer_lock, flags);
+ if (!hrtimer_is_queued(timer)) {
+ ktime_t ktime = ktime_set(0, interval);
+
+ hrtimer_start(timer, ktime, HRTIMER_MODE_REL);
+ }
+ spin_unlock_irqrestore(&mc->tx_timer_lock, flags);
+}
+
+static inline void start_tx_timer(struct mem_link_device *mld,
+ struct hrtimer *timer)
+{
+ start_tx_timer_custom(mld, timer, mld->tx_period_ns);
+}
+
+static inline void shmem_start_timers(struct mem_link_device *mld)
+{
+#if IS_ENABLED(CONFIG_CP_PKTPROC_UL)
+ start_tx_timer(mld, &mld->pktproc_tx_timer);
+#endif
+ if (sbd_active(&mld->sbd_link_dev))
+ start_tx_timer(mld, &mld->sbd_tx_timer);
+ start_tx_timer(mld, &mld->tx_timer);
+}
+
+static inline void cancel_tx_timer(struct mem_link_device *mld,
+ struct hrtimer *timer)
+{
+ struct link_device *ld = &mld->link_dev;
+ struct modem_ctl *mc = ld->mc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mc->tx_timer_lock, flags);
+ if (hrtimer_active(timer))
+ hrtimer_cancel(timer);
+ spin_unlock_irqrestore(&mc->tx_timer_lock, flags);
+}
+
+static inline void shmem_stop_timers(struct mem_link_device *mld)
+{
+#if IS_ENABLED(CONFIG_CP_PKTPROC_UL)
+ cancel_tx_timer(mld, &mld->pktproc_tx_timer);
+#endif
+ if (sbd_active(&mld->sbd_link_dev))
+ cancel_tx_timer(mld, &mld->sbd_tx_timer);
+ cancel_tx_timer(mld, &mld->tx_timer);
+}
+
+static int tx_frames_to_rb(struct sbd_ring_buffer *rb)
+{
+ struct sk_buff_head *skb_txq = &rb->skb_q;
+ int tx_bytes = 0;
+ int ret = 0;
+
+ while (1) {
+ struct sk_buff *skb;
+
+ skb = skb_dequeue(skb_txq);
+ if (unlikely(!skb))
+ break;
+
+ ret = sbd_pio_tx(rb, skb);
+ if (unlikely(ret < 0)) {
+ /* Take the skb back to the skb_txq */
+ skb_queue_head(skb_txq, skb);
+ break;
+ }
+
+ tx_bytes += ret;
+#ifdef DEBUG_MODEM_IF_LINK_TX
+ mif_pkt(rb->ch, "LNK-TX", skb);
+#endif
+ dev_consume_skb_any(skb);
+ }
+
+ return (ret < 0) ? ret : tx_bytes;
+}
+
+static enum hrtimer_restart sbd_tx_timer_func(struct hrtimer *timer)
+{
+ struct mem_link_device *mld =
+ container_of(timer, struct mem_link_device, sbd_tx_timer);
+ struct link_device *ld = &mld->link_dev;
+ struct modem_ctl *mc = ld->mc;
+ struct sbd_link_device *sl = &mld->sbd_link_dev;
+ int i;
+ bool need_schedule = false;
+ u16 mask = 0;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&mc->lock, flags);
+ if (unlikely(!ipc_active(mld))) {
+ spin_unlock_irqrestore(&mc->lock, flags);
+ goto exit;
+ }
+ spin_unlock_irqrestore(&mc->lock, flags);
+
+ for (i = 0; i < sl->num_channels; i++) {
+ struct sbd_ring_buffer *rb = sbd_id2rb(sl, i, TX);
+ int ret;
+
+ ret = sbd_txq_check_busy(rb);
+ if (ret) {
+ need_schedule = true;
+ continue;
+ }
+
+ ret = tx_frames_to_rb(rb);
+ if (unlikely(ret < 0)) {
+ if (ret == -EBUSY || ret == -ENOSPC) {
+ need_schedule = true;
+ sbd_txq_stop(rb);
+ mask = MASK_SEND_DATA;
+ continue;
+ } else {
+ link_trigger_cp_crash(mld, CRASH_REASON_MIF_TX_ERR,
+ "tx_frames_to_rb error");
+ need_schedule = false;
+ goto exit;
+ }
+ }
+
+ if (ret > 0)
+ mask = MASK_SEND_DATA;
+
+ if (!skb_queue_empty(&rb->skb_q))
+ need_schedule = true;
+ }
+
+ if (mask) {
+ spin_lock_irqsave(&mc->lock, flags);
+ if (unlikely(!ipc_active(mld))) {
+ spin_unlock_irqrestore(&mc->lock, flags);
+ need_schedule = false;
+ goto exit;
+ }
+ send_ipc_irq(mld, mask2int(mask));
+ spin_unlock_irqrestore(&mc->lock, flags);
+ }
+
+exit:
+ if (need_schedule) {
+ ktime_t ktime = ktime_set(0, mld->tx_period_ns);
+
+ hrtimer_start(timer, ktime, HRTIMER_MODE_REL);
+ }
+
+ return HRTIMER_NORESTART;
+}
+
+static int sbd_tx_func(struct mem_link_device *mld, struct hrtimer *timer,
+ struct sbd_ring_buffer *rb, struct sk_buff *skb)
+{
+ struct link_device *ld = &mld->link_dev;
+ struct modem_ctl *mc = ld->mc;
+ bool need_schedule = false;
+ u16 mask = MASK_SEND_DATA;
+ unsigned long flags = 0;
+ int ret = 0;
+
+ spin_lock_irqsave(&mc->lock, flags);
+ if (unlikely(!ipc_active(mld))) {
+ spin_unlock_irqrestore(&mc->lock, flags);
+ dev_kfree_skb_any(skb);
+ goto exit;
+ }
+ spin_unlock_irqrestore(&mc->lock, flags);
+
+ ret = sbd_pio_tx(rb, skb);
+ if (unlikely(ret < 0)) {
+ if (ret == -EBUSY || ret == -ENOSPC) {
+ skb_queue_head(&rb->skb_q, skb);
+ need_schedule = true;
+ send_ipc_irq(mld, mask2int(mask));
+ } else {
+ link_trigger_cp_crash(mld, CRASH_REASON_MIF_TX_ERR,
+ "tx_frames_to_rb error");
+ need_schedule = false;
+ }
+ goto exit;
+ }
+
+#ifdef DEBUG_MODEM_IF_LINK_TX
+ mif_pkt(rb->ch, "LNK-TX", skb);
+#endif
+ dev_consume_skb_any(skb);
+
+ spin_lock_irqsave(&mc->lock, flags);
+ if (unlikely(!ipc_active(mld))) {
+ spin_unlock_irqrestore(&mc->lock, flags);
+ need_schedule = false;
+ goto exit;
+ }
+ send_ipc_irq(mld, mask2int(mask));
+ spin_unlock_irqrestore(&mc->lock, flags);
+
+exit:
+ if (need_schedule) {
+ ktime_t ktime = ktime_set(0, mld->tx_period_ns);
+
+ hrtimer_start(timer, ktime, HRTIMER_MODE_REL);
+ return -1;
+ } else
+ return 1;
+}
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC_UL)
+static enum hrtimer_restart pktproc_tx_timer_func(struct hrtimer *timer)
+{
+ struct mem_link_device *mld = container_of(timer, struct mem_link_device, pktproc_tx_timer);
+ struct pktproc_adaptor_ul *ppa_ul = &mld->pktproc_ul;
+ struct modem_ctl *mc = mld->link_dev.mc;
+ bool need_schedule = false;
+ bool need_irq = false;
+#if IS_ENABLED(CONFIG_EXYNOS_DIT)
+ bool need_dit = false;
+#endif
+ unsigned long flags;
+ unsigned int count;
+ int ret, i;
+
+ for (i = 0; i < ppa_ul->num_queue; i++) {
+ struct pktproc_queue_ul *q = ppa_ul->q[i];
+
+ ret = pktproc_ul_q_check_busy(q);
+ if (ret) {
+ need_schedule = true;
+ continue;
+ }
+
+ do {
+#if IS_ENABLED(CONFIG_EXYNOS_DIT)
+ if (dit_check_dir_use_queue(DIT_DIR_TX, q->q_idx)) {
+ if (circ_empty(q->done_ptr, q->q_info->fore_ptr))
+ break;
+
+ need_dit = true;
+ } else
+#endif
+ {
+ count = circ_get_usage(q->q_info->num_desc,
+ q->done_ptr, q->q_info->fore_ptr);
+ if (count == 0)
+ break;
+
+ q->update_fore_ptr(q, count);
+ need_irq = true;
+ }
+ need_schedule = true;
+ } while (0);
+ }
+
+#if IS_ENABLED(CONFIG_EXYNOS_DIT)
+ /* irq will be raised after dit_kick() */
+ if (need_dit) {
+ dit_kick(DIT_DIR_TX, false);
+ } else
+#endif
+ if (need_irq) {
+ spin_lock_irqsave(&mc->lock, flags);
+ if (ipc_active(mld))
+ send_ipc_irq(mld, mask2int(MASK_SEND_DATA));
+ spin_unlock_irqrestore(&mc->lock, flags);
+ }
+
+ if (need_schedule) {
+ spin_lock_irqsave(&mc->tx_timer_lock, flags);
+ if (!hrtimer_is_queued(timer)) {
+ ktime_t ktime = ktime_set(0, mld->tx_period_ns);
+
+ hrtimer_start(timer, ktime, HRTIMER_MODE_REL);
+ }
+ spin_unlock_irqrestore(&mc->tx_timer_lock, flags);
+ }
+
+ return HRTIMER_NORESTART;
+}
+
+static inline bool is_icmp_pkt(struct sk_buff *skb)
+{
+ bool ret = false;
+ struct iphdr *iphdr_t;
+ struct icmphdr *icmph;
+ iphdr_t = (struct iphdr *)skb->data;
+
+ switch (ip_hdr(skb)->version) {
+ case 4:
+ if(iphdr_t->protocol==IPPROTO_ICMP){
+ icmph = (struct icmphdr *) (skb->data + 20);
+ if (icmph->type == 8)
+ ret = true;
+ }
+ break;
+ case 6:
+ if(ipv6_hdr(skb)->nexthdr==IPPROTO_ICMPV6){
+ icmph = (struct icmphdr *) (skb->data + 40);
+ if (icmph->type == 128)
+ ret = true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int xmit_ipc_to_pktproc(struct mem_link_device *mld, struct sk_buff *skb)
+{
+ struct pktproc_adaptor_ul *ppa_ul = &mld->pktproc_ul;
+ // Set the ul queue to high priority by default.
+ struct pktproc_queue_ul *q = ppa_ul->q[PKTPROC_UL_HIPRIO];
+ struct sec_path *sp = skb_sec_path(skb);
+ int len;
+ int ret = -EBUSY;
+ unsigned long flags;
+ bool icmp_pkt = false;
+
+ if (ppa_ul->padding_required)
+ len = skb->len + CP_PADDING;
+ else
+ len = skb->len;
+
+ icmp_pkt = is_icmp_pkt(skb);
+
+ /* Set ul queue
+ * 1) The queue is high priority(PKTPROC_UL_HIPRIO) by default.
+ * 2) If there is only one UL queue, set queue to PKTPROC_UL_QUEUE_0.
+ * This check need to enable CONFIG_CP_PKTPROC_UL_SINGLE_QUEUE.
+ * 3) If queue_mapping of skb is not 1(normal priority), set queue to
+ * PKTPROC_UL_NORM.
+ * 4) If queue_mapping of skb is 1(high priority), and skb length larger then
+ * the maximum packet size of high priority queue, set queue to
+ * PKTPROC_UL_NORM.
+ * 5) For any packets with IPsec headers, always use normal priority buffers.
+ * (b/247006240)
+ * 6) Check again if the skb length exceeds the maximum size of
+ * PKTPROC_UL_NORM queue.
+ */
+#if IS_ENABLED(CONFIG_CP_PKTPROC_UL_SINGLE_QUEUE)
+ if (ppa_ul->num_queue == 1)
+ q = ppa_ul->q[PKTPROC_UL_QUEUE_0];
+ else
+#endif
+ if (skb->queue_mapping != 1 ||
+ (skb->queue_mapping == 1 && len > q->max_packet_size) ||
+ (sp && sp->len > 0)) {
+ q = ppa_ul->q[PKTPROC_UL_NORM];
+ if (len > q->max_packet_size) {
+ mif_err_limited("ERR!PKTPROC UL QUEUE:%d skb len:%d too large (max:%u)\n",
+ q->q_idx, len, q->max_packet_size);
+ return -EINVAL;
+ }
+ }
+
+ if (spin_trylock_irqsave(&q->lock, flags)) {
+ ret = q->send_packet(q, skb);
+ spin_unlock_irqrestore(&q->lock, flags);
+ }
+
+ if (unlikely(ret < 0)) {
+ if ((ret != -EBUSY) && (ret != -ENOSPC)) {
+ link_trigger_cp_crash(mld, CRASH_REASON_MIF_TX_ERR,
+ "tx_frames_to_pktproc error");
+ return ret;
+ }
+
+ pktproc_ul_q_stop(q);
+ goto exit;
+ }
+
+#if IS_ENABLED(CONFIG_EXYNOS_DIT)
+ if (!dit_check_dir_use_queue(DIT_DIR_TX, q->q_idx))
+#endif
+ dev_consume_skb_any(skb);
+
+exit:
+ /* start tx timer with 0 interval for icmp packets only to reduce the ping latency */
+ if(ret > 0 && icmp_pkt)
+ start_tx_timer_custom(mld, &mld->pktproc_tx_timer, 0);
+ else if (ret)
+ /* start timer even on error */
+ start_tx_timer(mld, &mld->pktproc_tx_timer);
+
+ return ret;
+}
+#endif /* CONFIG_CP_PKTPROC_UL */
+
+static int xmit_ipc_to_rb(struct mem_link_device *mld, u8 ch,
+ struct sk_buff *skb)
+{
+ int ret, ret2;
+ struct link_device *ld = &mld->link_dev;
+ struct io_device *iod = skbpriv(skb)->iod;
+ struct modem_ctl *mc = ld->mc;
+ struct sbd_ring_buffer *rb = sbd_ch2rb_with_skb(&mld->sbd_link_dev, ch, TX, skb);
+ struct sk_buff_head *skb_txq;
+ unsigned long flags = 0;
+ int quota = MIF_TX_QUOTA;
+
+ if (!rb) {
+ mif_err("%s: %s->%s: ERR! NO SBD RB {ch:%d}\n",
+ ld->name, iod->name, mc->name, ch);
+ return -ENODEV;
+ }
+
+ skb_txq = &rb->skb_q;
+
+ if (unlikely(skb_txq->qlen >= MAX_SKB_TXQ_DEPTH)) {
+ mif_err_limited("%s: %s->%s: ERR! {ch:%d} skb_txq.len %d >= limit %d\n",
+ ld->name, iod->name, mc->name, ch,
+ skb_txq->qlen, MAX_SKB_TXQ_DEPTH);
+ ret = -EBUSY;
+ } else {
+ skb->len = min_t(int, skb->len, rb->buff_size);
+ ret = skb->len;
+
+ skb_queue_tail(skb_txq, skb);
+
+ if (hrtimer_active(&mld->sbd_tx_timer)) {
+ start_tx_timer(mld, &mld->sbd_tx_timer);
+ } else if (spin_trylock_irqsave(&rb->lock, flags)) {
+ do {
+ skb = skb_dequeue(skb_txq);
+ if (!skb)
+ break;
+
+ ret2 = sbd_tx_func(mld, &mld->sbd_tx_timer, rb, skb);
+ if (ret2 < 0)
+ break;
+ } while (--quota);
+
+ spin_unlock_irqrestore(&rb->lock, flags);
+ }
+ }
+
+ return ret;
+}
+
+bool check_mem_link_tx_pending(struct mem_link_device *mld)
+{
+ struct sbd_link_device *sl = &mld->sbd_link_dev;
+
+ if (sbd_active(sl))
+ return check_sbd_tx_pending(mld);
+ else
+ return check_legacy_tx_pending(mld);
+}
+
+static int xmit_ipc_to_dev(struct mem_link_device *mld, u8 ch, struct sk_buff *skb,
+ enum legacy_ipc_map legacy_buffer_index)
+{
+ int ret, ret2;
+ struct link_device *ld = &mld->link_dev;
+ struct io_device *iod = skbpriv(skb)->iod;
+ struct modem_ctl *mc = ld->mc;
+ struct legacy_ipc_device *dev = mld->legacy_link_dev.dev[legacy_buffer_index];
+ struct sk_buff_head *skb_txq;
+ unsigned long flags = 0;
+ int quota = MIF_TX_QUOTA;
+
+ if (!dev) {
+ mif_err("%s: %s->%s: ERR! NO IPC DEV {ch:%d}\n",
+ ld->name, iod->name, mc->name, ch);
+ return -ENODEV;
+ }
+
+ skb_txq = dev->skb_txq;
+
+ if (unlikely(skb_txq->qlen >= MAX_SKB_TXQ_DEPTH)) {
+ mif_err_limited("%s: %s->%s: ERR! %s TXQ.qlen %d >= limit %d\n",
+ ld->name, iod->name, mc->name, dev->name,
+ skb_txq->qlen, MAX_SKB_TXQ_DEPTH);
+ ret = -EBUSY;
+ } else {
+ ret = skb->len;
+
+ skb_queue_tail(skb_txq, skb);
+
+ if (hrtimer_active(&mld->tx_timer)) {
+ start_tx_timer(mld, &mld->tx_timer);
+ } else if (spin_trylock_irqsave(&dev->tx_lock, flags)) {
+ do {
+ skb = skb_dequeue(skb_txq);
+ if (!skb)
+ break;
+
+ ret2 = tx_func(mld, &mld->tx_timer, dev, skb);
+ if (ret2 < 0)
+ break;
+ } while (--quota);
+
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+ }
+ }
+
+ return ret;
+}
+
+static int xmit_to_cp(struct mem_link_device *mld, struct io_device *iod,
+ u8 ch, struct sk_buff *skb)
+{
+ struct link_device *ld = &mld->link_dev;
+
+ /* for boot/dump
+ * 1) assume that link (ex. PCI) is ready
+ * 2) do not need send_ipc_irq()
+ */
+ if (ld->is_bootdump_ch(ch))
+ return xmit_to_legacy_link(mld, ch, skb, IPC_MAP_NORM_RAW);
+
+ if (unlikely(!ipc_active(mld)))
+ return -EIO;
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC_UL)
+ if (ld->is_ps_ch(ch)) {
+ return xmit_ipc_to_pktproc(mld, skb);
+ } else if (ld->sbd_ipc && iod->sbd_ipc) {
+#else
+ if (ld->sbd_ipc && iod->sbd_ipc) {
+#endif
+ if (likely(sbd_active(&mld->sbd_link_dev)))
+ return xmit_ipc_to_rb(mld, ch, skb);
+ else
+ return -ENODEV;
+ } else {
+ if (ld->is_fmt_ch(ch) || ld->is_oem_ch(ch) ||
+ (ld->is_wfs0_ch != NULL && ld->is_wfs0_ch(ch)))
+ return xmit_ipc_to_dev(mld, ch, skb, IPC_MAP_FMT);
+
+#if IS_ENABLED(CONFIG_MODEM_IF_LEGACY_QOS)
+ if (skb->queue_mapping == 1)
+ return xmit_ipc_to_dev(mld, ch, skb, IPC_MAP_HPRIO_RAW);
+#endif
+ return xmit_ipc_to_dev(mld, ch, skb, IPC_MAP_NORM_RAW);
+ }
+}
+
+/*============================================================================*/
+static int pass_skb_to_demux(struct mem_link_device *mld, struct sk_buff *skb)
+{
+ struct link_device *ld = &mld->link_dev;
+ struct io_device *iod = skbpriv(skb)->iod;
+ int ret = 0;
+ u8 ch = skbpriv(skb)->sipc_ch;
+
+ if (unlikely(!iod)) {
+ mif_err("%s: ERR! No IOD for CH.%d\n", ld->name, ch);
+ dev_kfree_skb_any(skb);
+ link_trigger_cp_crash(mld, CRASH_REASON_MIF_RIL_BAD_CH,
+ "ERR! No IOD for CH.XX");
+ return -EACCES;
+ }
+
+#ifdef DEBUG_MODEM_IF_LINK_RX
+ mif_pkt(ch, "LNK-RX", skb);
+#endif
+
+#if defined(CPIF_WAKEPKT_SET_MARK)
+ if (atomic_xchg(&ld->mc->mark_skb_wakeup, 0) == 1) {
+ skb->mark |= CPIF_WAKEPKT_SET_MARK;
+ atomic_inc(&mld->misc_wakeup_count);
+ }
+#endif
+
+ ret = iod->recv_skb_single(iod, ld, skb);
+ if (unlikely(ret < 0)) {
+ struct modem_ctl *mc = ld->mc;
+
+ mif_err_limited("%s: %s<-%s: ERR! %s->recv_skb fail (%d)\n",
+ ld->name, iod->name, mc->name, iod->name, ret);
+ dev_kfree_skb_any(skb);
+ }
+
+ return ret;
+}
+
+static int pass_skb_to_net(struct mem_link_device *mld, struct sk_buff *skb)
+{
+ struct link_device *ld = &mld->link_dev;
+ struct skbuff_private *priv;
+ struct io_device *iod;
+ int ret = 0;
+
+ priv = skbpriv(skb);
+ if (unlikely(!priv)) {
+ mif_err("%s: ERR! No PRIV in skb@%pK\n", ld->name, skb);
+ dev_kfree_skb_any(skb);
+ link_trigger_cp_crash(mld, CRASH_REASON_MIF_RX_BAD_DATA,
+ "ERR! No PRIV");
+ return -EFAULT;
+ }
+
+ iod = priv->iod;
+ if (unlikely(!iod)) {
+ mif_err("%s: ERR! No IOD in skb@%pK\n", ld->name, skb);
+ dev_kfree_skb_any(skb);
+ link_trigger_cp_crash(mld, CRASH_REASON_MIF_RX_BAD_DATA,
+ "ERR! No IOD");
+ return -EIO;
+ }
+
+#ifdef DEBUG_MODEM_IF_LINK_RX
+ mif_pkt(iod->ch, "LNK-RX", skb);
+#endif
+
+#if defined(CPIF_WAKEPKT_SET_MARK)
+ if (atomic_xchg(&ld->mc->mark_skb_wakeup, 0) == 1) {
+ skb->mark |= CPIF_WAKEPKT_SET_MARK;
+ atomic_inc(&mld->net_wakeup_count);
+ }
+#endif
+
+ ret = iod->recv_net_skb(iod, ld, skb);
+ if (unlikely(ret < 0)) {
+ struct modem_ctl *mc = ld->mc;
+
+ mif_err_limited("%s: %s<-%s: ERR! %s->recv_net_skb fail (%d)\n",
+ ld->name, iod->name, mc->name, iod->name, ret);
+ dev_kfree_skb_any(skb);
+ }
+
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_WITH_SBD_ARCH)
+static int rx_net_frames_from_rb(struct sbd_ring_buffer *rb, int budget,
+ int *work_done)
+{
+ int rcvd = 0;
+ struct link_device *ld = rb->ld;
+ struct mem_link_device *mld = ld_to_mem_link_device(ld);
+ unsigned int num_frames;
+ int ret = 0;
+
+ num_frames = min_t(unsigned int, rb_usage(rb), budget);
+
+ while (rcvd < num_frames) {
+ struct sk_buff *skb = NULL;
+
+ ret = sbd_pio_rx(rb, &skb);
+ if (unlikely(ret))
+ return ret;
+
+ /* The $rcvd must be accumulated here, because $skb can be freed
+ * in pass_skb_to_net().
+ */
+ rcvd++;
+
+ ret = pass_skb_to_net(mld, skb);
+ if (ret < 0)
+ break;
+ }
+
+ if (ret != -EBUSY && rcvd < num_frames) {
+ struct io_device *iod = rb->iod;
+ struct link_device *ld = rb->ld;
+ struct modem_ctl *mc = ld->mc;
+
+ mif_err("%s: %s<-%s: WARN! rcvd %d < num_frames %d\n",
+ ld->name, iod->name, mc->name, rcvd, num_frames);
+ }
+
+ *work_done = rcvd;
+
+ return ret;
+}
+
+static int rx_ipc_frames_from_rb(struct sbd_ring_buffer *rb)
+{
+ int rcvd = 0;
+ struct link_device *ld = rb->ld;
+ struct mem_link_device *mld = ld_to_mem_link_device(ld);
+ unsigned int qlen = rb->len;
+ unsigned int in = *rb->wp;
+ unsigned int out = *rb->rp;
+ unsigned int num_frames = circ_get_usage(qlen, in, out);
+ int ret = 0;
+
+ while (rcvd < num_frames) {
+ struct sk_buff *skb = NULL;
+
+ ret = sbd_pio_rx(rb, &skb);
+ if (unlikely(ret))
+ return ret;
+
+ /* The $rcvd must be accumulated here, because $skb can be freed
+ * in pass_skb_to_demux().
+ */
+ rcvd++;
+
+ if (skbpriv(skb)->lnk_hdr) {
+ u8 ch = rb->ch;
+ u8 fch = ld->get_ch(skb->data);
+
+ if (fch != ch) {
+ mif_err("frm.ch:%d != rb.ch:%d\n", fch, ch);
+ pr_skb("CRASH", skb, ld);
+ dev_kfree_skb_any(skb);
+ link_trigger_cp_crash(mld, CRASH_REASON_MIF_RX_BAD_DATA,
+ "frm.ch is not same with rb.ch");
+ continue;
+ }
+ }
+
+ pass_skb_to_demux(mld, skb);
+ }
+
+ if (rcvd < num_frames) {
+ struct io_device *iod = rb->iod;
+ struct modem_ctl *mc = ld->mc;
+
+ mif_err("%s: %s<-%s: WARN! rcvd %d < num_frames %d\n",
+ ld->name, iod->name, mc->name, rcvd, num_frames);
+ }
+ return rcvd;
+}
+
+static int sbd_ipc_rx_func_napi(struct link_device *ld, struct io_device *iod,
+ int budget, int *work_done)
+{
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ struct sbd_ring_buffer *rb = sbd_ch2rb(&mld->sbd_link_dev, iod->ch, RX);
+ int rcvd = 0;
+ int ret;
+
+ ret = rx_net_frames_from_rb(rb, budget, &rcvd);
+
+ if (IS_ERR_VALUE((unsigned long)ret) && (ret != -EBUSY))
+ mif_err_limited("RX error (%d)\n", ret);
+
+ *work_done = rcvd;
+ return ret;
+}
+#endif //CONFIG_LINK_DEVICE_WITH_SBD_ARCH
+
+static int legacy_ipc_rx_func_napi(struct mem_link_device *mld, struct legacy_ipc_device *dev,
+ int budget, int *work_done)
+{
+ struct link_device *ld = &mld->link_dev;
+ struct modem_data *modem = ld->mdm_data;
+ unsigned int qsize = get_rxq_buff_size(dev);
+ unsigned int in = get_rxq_head(dev);
+ unsigned int out = get_rxq_tail(dev);
+ unsigned int size = circ_get_usage(qsize, in, out);
+ int rcvd = 0;
+ int err = 0;
+
+ /* Make sure the in, out pointers are within bounds to avoid overflow.
+ * These pointers are passed from CP shared memory and must be
+ * validated before dma_sync below. */
+ if ((in > qsize) || (out > qsize)) {
+ mif_err("OOB error! in:%u, out:%u, qsize:%u\n", in, out, qsize);
+ link_trigger_cp_crash(mld, CRASH_REASON_MIF_RX_BAD_DATA,
+ "OOB error");
+ return -EINVAL;
+ }
+
+ if (unlikely(circ_empty(in, out)))
+ return 0;
+
+ if (modem->legacy_raw_rx_buffer_cached && dev->id == IPC_MAP_NORM_RAW) {
+ char *src = get_rxq_buff(dev);
+
+ if (!src) {
+ mif_err_limited("get_rxq_buff() error\n");
+ return -EINVAL;
+ }
+
+ if ((out + size) <= qsize)
+ dma_sync_single_for_cpu(ld->dev, virt_to_phys(src + out), size,
+ DMA_FROM_DEVICE);
+ else {
+ dma_sync_single_for_cpu(ld->dev, virt_to_phys(src + out), qsize - out,
+ DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(ld->dev, virt_to_phys(src), size - (qsize - out),
+ DMA_FROM_DEVICE);
+ }
+ }
+
+ while ((budget != 0) && (rcvd < size)) {
+ struct sk_buff *skb;
+ u8 ch;
+ struct io_device *iod;
+
+ skb = recv_from_legacy_link(mld, dev, in, &err);
+ if (err)
+ return err;
+
+ ch = ld->get_ch(skb->data);
+ iod = link_get_iod_with_channel(ld, ch);
+ if (!iod) {
+ mif_err("%s: ERR! [%s]No IOD for CH.%d(out:%u)\n",
+ ld->name, dev->name, ch, get_rxq_tail(dev));
+ pr_skb("CRASH", skb, ld);
+ dev_kfree_skb_any(skb);
+ link_trigger_cp_crash(mld, CRASH_REASON_MIF_RX_BAD_DATA,
+ "ERR! No IOD from CP");
+ break;
+ }
+
+ /* Record the IO device and the link device into the &skb->cb */
+ skbpriv(skb)->iod = iod;
+ skbpriv(skb)->ld = ld;
+
+ skbpriv(skb)->lnk_hdr = iod->link_header;
+ skbpriv(skb)->sipc_ch = ch;
+ skbpriv(skb)->napi = &mld->mld_napi;
+ /* The $rcvd must be accumulated here, because $skb can be freed
+ * in pass_skb_to_demux().
+ */
+ rcvd += skb->len;
+
+ if (ld->is_ps_ch(ch)) {
+ budget--;
+ *work_done += 1;
+ }
+
+ pass_skb_to_demux(mld, skb);
+ }
+
+ if ((budget != 0) && (rcvd < size)) {
+ struct link_device *ld = &mld->link_dev;
+
+ mif_err("%s: WARN! rcvd %d < size %d\n", ld->name, rcvd, size);
+ }
+
+ return 0;
+}
+
+static int legacy_ipc_rx_func(struct mem_link_device *mld, struct legacy_ipc_device *dev)
+{
+ struct link_device *ld = &mld->link_dev;
+ struct modem_data *modem = ld->mdm_data;
+ unsigned int qsize = get_rxq_buff_size(dev);
+ unsigned int in = get_rxq_head(dev);
+ unsigned int out = get_rxq_tail(dev);
+ unsigned int size = circ_get_usage(qsize, in, out);
+ int rcvd = 0;
+ int err = 0;
+
+ /* Make sure the in, out pointers are within bounds to avoid overflow.
+ * These pointers are passed from CP shared memory and must be
+ * validated before dma_sync below. */
+ if ((in > qsize) || (out > qsize)) {
+ mif_err("OOB error! in:%u, out:%u, qsize:%u\n", in, out, qsize);
+ link_trigger_cp_crash(mld, CRASH_REASON_MIF_RX_BAD_DATA,
+ "OOB error");
+ return -EINVAL;
+ }
+
+ if (unlikely(circ_empty(in, out)))
+ return 0;
+
+ if (modem->legacy_raw_rx_buffer_cached && dev->id == IPC_MAP_NORM_RAW) {
+ char *src = get_rxq_buff(dev);
+
+ if (!src) {
+ mif_err_limited("get_rxq_buff() error\n");
+ return -EINVAL;
+ }
+
+ if ((out + size) <= qsize)
+ dma_sync_single_for_cpu(ld->dev, virt_to_phys(src + out), size,
+ DMA_FROM_DEVICE);
+ else {
+ dma_sync_single_for_cpu(ld->dev, virt_to_phys(src + out), qsize - out,
+ DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(ld->dev, virt_to_phys(src), size - (qsize - out),
+ DMA_FROM_DEVICE);
+ }
+ }
+
+ while (rcvd < size) {
+ struct sk_buff *skb;
+ u8 ch;
+ struct io_device *iod;
+
+ skb = recv_from_legacy_link(mld, dev, in, &err);
+ if (err)
+ return err;
+
+ ch = ld->get_ch(skb->data);
+ iod = link_get_iod_with_channel(ld, ch);
+ if (!iod) {
+ mif_err("%s: ERR! [%s]No IOD for CH.%d(out:%u)\n",
+ ld->name, dev->name, ch, get_rxq_tail(dev));
+ pr_skb("CRASH", skb, ld);
+ dev_kfree_skb_any(skb);
+ link_trigger_cp_crash(mld, CRASH_REASON_MIF_RX_BAD_DATA,
+ "ERR! No IOD from CP in rx_frames_from_dev()");
+ break;
+ }
+
+ /* Record the IO device and the link device into the &skb->cb */
+ skbpriv(skb)->iod = iod;
+ skbpriv(skb)->ld = ld;
+
+ skbpriv(skb)->lnk_hdr = iod->link_header;
+ skbpriv(skb)->sipc_ch = ch;
+
+ skbpriv(skb)->napi = NULL;
+
+ /* The $rcvd must be accumulated here, because $skb can be freed
+ * in pass_skb_to_demux().
+ */
+ rcvd += skb->len;
+ pass_skb_to_demux(mld, skb);
+ }
+
+ if (rcvd < size) {
+ struct link_device *ld = &mld->link_dev;
+
+ mif_err("%s: WARN! rcvd %d < size %d\n", ld->name, rcvd, size);
+ }
+
+ return rcvd;
+}
+
+#if IS_ENABLED(CONFIG_MCU_IPC)
+static ktime_t rx_int_enable_time;
+static ktime_t rx_int_disable_time;
+#endif
+
+static int shmem_enable_rx_int(struct link_device *ld)
+{
+#if IS_ENABLED(CONFIG_MCU_IPC)
+ struct mem_link_device *mld = to_mem_link_device(ld);
+
+ if (ld->interrupt_types == INTERRUPT_MAILBOX) {
+ mld->rx_int_enable = 1;
+ if (rx_int_disable_time) {
+ rx_int_enable_time = ktime_get();
+ mld->rx_int_disabled_time += ktime_to_us(ktime_sub(rx_int_enable_time,
+ rx_int_disable_time));
+ rx_int_enable_time = 0;
+ rx_int_disable_time = 0;
+ }
+ return cp_mbox_enable_handler(CP_MBOX_IRQ_IDX_0, mld->irq_cp2ap_msg);
+ }
+#endif
+
+ return 0;
+}
+
+static int shmem_disable_rx_int(struct link_device *ld)
+{
+#if IS_ENABLED(CONFIG_MCU_IPC)
+ struct mem_link_device *mld = to_mem_link_device(ld);
+
+ if (ld->interrupt_types == INTERRUPT_MAILBOX) {
+ mld->rx_int_enable = 0;
+ rx_int_disable_time = ktime_get();
+
+ return cp_mbox_disable_handler(CP_MBOX_IRQ_IDX_0, mld->irq_cp2ap_msg);
+ }
+#endif
+
+ return 0;
+}
+
+static int update_handover_block_info(struct link_device *ld, unsigned long arg)
+{
+ struct mem_link_device *mld = ld_to_mem_link_device(ld);
+ struct t_handover_block_info info;
+ int err = 0;
+
+ err = copy_from_user(&info, (const void __user *)arg,
+ sizeof(struct t_handover_block_info));
+ if (err) {
+ mif_err("%s: ERR! handover_block_info copy_from_user fail\n",
+ ld->name);
+ return -EFAULT;
+ }
+
+ mif_info("%s: call send_handover_block_info (sku: %d, rev: %d)\n",
+ ld->name, info.modem_sku, info.minor_id);
+
+ memcpy(mld->ap2cp_handover_block_info.addr, &info,
+ sizeof(struct t_handover_block_info));
+
+ return 0;
+}
+
+static int bootdump_rx_func(struct mem_link_device *mld)
+{
+ int ret = 0;
+ struct legacy_ipc_device *dev = mld->legacy_link_dev.dev[IPC_MAP_NORM_RAW];
+
+ u32 qlen = mld->msb_rxq.qlen;
+
+ while (qlen-- > 0) {
+ struct mst_buff *msb;
+ u16 intr;
+
+ msb = msb_dequeue(&mld->msb_rxq);
+ if (!msb)
+ break;
+ intr = msb->snapshot.int2ap;
+ if (cmd_valid(intr))
+ mld->cmd_handler(mld, int2cmd(intr));
+ msb_free(msb);
+ }
+
+ /* recv frames from RAW buffer which should contain bootdump frames */
+ ret = legacy_ipc_rx_func(mld, dev);
+ if (ret == -ENOMEM) {
+ if (!work_pending(&mld->page_reclaim_work)) {
+ struct link_device *ld = &mld->link_dev;
+
+ mif_err_limited("Rx ENOMEM, try reclaim work\n");
+ queue_work(ld->rx_wq,
+ &mld->page_reclaim_work);
+ }
+ }
+
+ return ret;
+}
+
+static void bootdump_oom_handler_work(struct work_struct *ws)
+{
+ struct mem_link_device *mld =
+ container_of(ws, struct mem_link_device, page_reclaim_work);
+ struct sk_buff *skb;
+
+ /* try to page reclaim with GFP_KERNEL */
+ skb = alloc_skb(PAGE_SIZE - 512, GFP_KERNEL);
+ if (skb)
+ dev_kfree_skb_any(skb);
+
+ /* need to disable the RX irq ?? */
+ msleep(200);
+
+ mif_info("trigger the rx task again\n");
+ bootdump_rx_func(mld);
+}
+
+static void bootdump_rx_work(struct work_struct *ws)
+{
+ struct mem_link_device *mld;
+
+ mld = container_of(ws, struct mem_link_device, bootdump_rx_dwork.work);
+
+ bootdump_rx_func(mld);
+}
+
+/*============================================================================*/
+static int shmem_init_comm(struct link_device *ld, struct io_device *iod)
+{
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ struct modem_ctl *mc = ld->mc;
+ struct io_device *check_iod = NULL;
+ bool allow_no_check_iod = false;
+ int id = iod->ch;
+ int fmt2rfs = (ld->chid_rfs_0 - ld->chid_fmt_0);
+ int rfs2fmt = (ld->chid_fmt_0 - ld->chid_rfs_0);
+
+ if (atomic_read(&mld->init_end_cnt))
+ return 0;
+
+ if (ld->protocol == PROTOCOL_SIT)
+ return 0;
+
+ /* FMT will check RFS and vice versa */
+ if (ld->is_fmt_ch(id)) {
+ check_iod = link_get_iod_with_channel(ld, (id + fmt2rfs));
+ allow_no_check_iod = true;
+ } else if (ld->is_rfs_ch(id)) {
+ check_iod = link_get_iod_with_channel(ld, (id + rfs2fmt));
+ }
+
+ if (check_iod ? atomic_read(&check_iod->opened) : allow_no_check_iod) {
+ if (ld->link_type == LINKDEV_SHMEM)
+ write_clk_table_to_shmem(mld);
+
+ if (cp_online(mc) && !atomic_read(&mld->init_end_cnt)) {
+ mif_err("%s: %s -> INIT_END -> %s\n", ld->name, iod->name, mc->name);
+ atomic_inc(&mld->init_end_cnt);
+ send_ipc_irq(mld, cmd2int(CMD_INIT_END));
+ }
+ } else if (check_iod) {
+ mif_err("%s is not opened yet\n", check_iod->name);
+ }
+
+ return 0;
+}
+
+static int shmem_send(struct link_device *ld, struct io_device *iod,
+ struct sk_buff *skb)
+{
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ u8 ch = iod->ch;
+
+ return xmit_to_cp(mld, iod, ch, skb);
+}
+
+static void link_prepare_normal_boot(struct link_device *ld, struct io_device *iod)
+{
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ unsigned long flags;
+
+ atomic_set(&mld->init_end_cnt, 0);
+ atomic_set(&mld->init_end_busy, 0);
+ mld->last_init_end_cnt = 0;
+
+ spin_lock_irqsave(&mld->state_lock, flags);
+ mld->state = LINK_STATE_OFFLINE;
+ spin_unlock_irqrestore(&mld->state_lock, flags);
+
+ cancel_tx_timer(mld, &mld->tx_timer);
+
+ if (ld->sbd_ipc) {
+#if IS_ENABLED(CONFIG_LTE_MODEM_XMM7260)
+ sbd_deactivate(&mld->sbd_link_dev);
+#endif
+ cancel_tx_timer(mld, &mld->sbd_tx_timer);
+ }
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC_UL)
+ cancel_tx_timer(mld, &mld->pktproc_tx_timer);
+#endif
+
+ purge_txq(mld);
+}
+
+static int link_load_cp_image(struct link_device *ld, struct io_device *iod,
+ unsigned long arg)
+{
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ void __iomem *dst;
+ void __user *src;
+ struct cp_image img;
+ void __iomem *v_base;
+ size_t valid_space;
+ int ret = 0;
+
+ /**
+ * Get the information about the boot image
+ */
+ memset(&img, 0, sizeof(struct cp_image));
+
+ ret = copy_from_user(&img, (const void __user *)arg, sizeof(img));
+ if (ret) {
+ mif_err("%s: ERR! INFO copy_from_user fail\n", ld->name);
+ return -EFAULT;
+ }
+
+ mutex_lock(&mld->vmap_lock);
+
+ if (mld->attrs & LINK_ATTR_XMIT_BTDLR_PCIE) {
+ struct modem_data *modem = mld->link_dev.mdm_data;
+
+ /*
+ * Copy boot img to data buffer for keeping the IPC region integrity.
+ * boot_img_offset should be 64KB aligned.
+ */
+ mld->boot_img_offset = round_up(modem->legacy_raw_buffer_offset, SZ_64K);
+ mld->boot_img_size = img.size;
+
+ valid_space = mld->size - mld->boot_img_offset;
+ v_base = mld->base + mld->boot_img_offset;
+
+ goto check_img;
+ }
+
+ if (mld->boot_base == NULL) {
+ mld->boot_base = cp_shmem_get_nc_region(cp_shmem_get_base(ld->mdm_data->cp_num,
+ SHMEM_CP), mld->boot_size);
+ if (!mld->boot_base) {
+ mif_err("Failed to vmap boot_region\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ /* Calculate size of valid space which BL will download */
+ valid_space = (img.mode) ? mld->size : mld->boot_size;
+ /* Calculate base address (0: BOOT_MODE, 1: DUMP_MODE) */
+ v_base = (img.mode) ? mld->base : mld->boot_base;
+
+check_img:
+ /**
+ * Check the size of the boot image
+ * fix the integer overflow of "img.m_offset + img.len" from Jose Duart
+ */
+ if (img.size > valid_space || img.len > valid_space
+ || img.m_offset > valid_space - img.len) {
+ mif_err("%s: ERR! Invalid args: size %x, offset %x, len %x\n",
+ ld->name, img.size, img.m_offset, img.len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ dst = (void __iomem *)(v_base + img.m_offset);
+ src = (void __user *)((unsigned long)img.binary);
+ ret = copy_from_user_memcpy_toio(dst, src, img.len);
+ if (ret) {
+ mif_err("%s: ERR! BOOT copy_from_user fail\n", ld->name);
+ goto out;
+ }
+
+out:
+ mutex_unlock(&mld->vmap_lock);
+
+ return ret;
+}
+
+static int link_load_gnss_image(struct link_device *ld,
+ struct io_device *iod, unsigned long arg)
+{
+ struct gnss_image img;
+ void __iomem *dst;
+ void __user *src;
+
+ int ret = 0;
+ struct mem_link_device *mld = to_mem_link_device(ld);
+
+ memset(&img, 0, sizeof(struct gnss_image));
+
+ mif_info_limited("Load GNSS images\n");
+
+ if (!mld->gnss_v_base) {
+ mif_err("No gnss_fw vmap region\n");
+ return -ENOMEM;
+ }
+
+ ret = copy_from_user(&img, (const void __user *)arg, sizeof(img));
+ if (ret) {
+ mif_err("copy_from_user() fail:%d\n", ret);
+ return ret;
+ }
+
+ dst = (void __iomem *)(mld->gnss_v_base + img.offset);
+ src = (void __user *)((unsigned long)img.firmware_bin);
+ ret = copy_from_user_memcpy_toio(dst, src, img.firmware_size);
+ if (ret) {
+ mif_err("copy_from_user_memcpy_toio() fail:%d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int link_read_gnss_image(struct link_device *ld,
+ struct io_device *iod, unsigned long arg)
+{
+ struct gnss_image img;
+ int err = 0;
+ struct mem_link_device *mld = to_mem_link_device(ld);
+
+ memset(&img, 0, sizeof(struct gnss_image));
+
+ if (!mld->gnss_v_base) {
+ mif_err("No gnss_fw vmap region\n");
+ return -ENOMEM;
+ }
+
+ err = copy_from_user(&img, (const void __user *)arg,
+ sizeof(struct gnss_image));
+ if (err) {
+ mif_err("copy_from_user fail:%d\n", err);
+ return err;
+ }
+
+ if (img.offset + img.firmware_size > cp_shmem_get_size(0, SHMEM_GNSS_FW)) {
+ mif_err("offset:%d size:%d error\n",
+ img.offset, img.firmware_size);
+ return -EFAULT;
+ }
+
+ err = copy_to_user(img.firmware_bin,
+ mld->gnss_v_base + img.offset, img.firmware_size);
+ if (err) {
+ mif_err("copy_to_user fail:%d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+int shm_get_security_param2(u32 cp_num, unsigned long mode, u32 bl_size,
+ unsigned long *param)
+{
+ int ret = 0;
+
+ switch (mode) {
+ case CP_BOOT_MODE_NORMAL:
+ case CP_BOOT_MODE_DUMP:
+ *param = bl_size;
+ break;
+ case CP_BOOT_RE_INIT:
+ *param = 0;
+ break;
+ case CP_BOOT_MODE_MANUAL:
+ *param = cp_shmem_get_base(cp_num, SHMEM_CP) + bl_size;
+ break;
+ default:
+ mif_info("Invalid sec_mode(%lu)\n", mode);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+int shm_get_security_param3(u32 cp_num, unsigned long mode, u32 main_size,
+ unsigned long *param)
+{
+ int ret = 0;
+
+ switch (mode) {
+ case CP_BOOT_MODE_NORMAL:
+ *param = main_size;
+ break;
+ case CP_BOOT_MODE_DUMP:
+#ifdef CP_NONSECURE_BOOT
+ *param = cp_shmem_get_base(cp_num, SHMEM_CP);
+#else
+ *param = cp_shmem_get_base(cp_num, SHMEM_IPC);
+#endif
+ break;
+ case CP_BOOT_RE_INIT:
+ *param = 0;
+ break;
+ case CP_BOOT_MODE_MANUAL:
+ *param = main_size;
+ break;
+ default:
+ mif_info("Invalid sec_mode(%lu)\n", mode);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+#define MAX_TRY_CNT 0x1000
+#define MODE_CP_CHECK_CONTINUE 0x8
+
+static int shmem_security_request(struct link_device *ld, struct io_device *iod,
+ unsigned long arg)
+{
+ unsigned long mode, param2, param3;
+ int err = 0;
+ struct modem_sec_req msr;
+#if IS_ENABLED(CONFIG_CP_SECURE_BOOT)
+ uint32_t try_cnt = 0;
+#endif
+ u32 cp_num = ld->mdm_data->cp_num;
+ struct mem_link_device *mld = ld->mdm_data->mld;
+#if IS_ENABLED(CONFIG_CP_PKTPROC) && IS_ENABLED(CONFIG_EXYNOS_CPIF_IOMMU)
+ struct pktproc_adaptor *ppa = &mld->pktproc;
+#endif
+
+ err = copy_from_user(&msr, (const void __user *)arg, sizeof(msr));
+ if (err) {
+ mif_err("%s: ERR! copy_from_user fail\n", ld->name);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ mode = (unsigned long)msr.mode;
+ err = shm_get_security_param2(cp_num, mode, msr.param2, ¶m2);
+ if (err) {
+ mif_err("%s: ERR! parameter2 is invalid\n", ld->name);
+ goto exit;
+ }
+ err = shm_get_security_param3(cp_num, mode, msr.param3, ¶m3);
+ if (err) {
+ mif_err("%s: ERR! parameter3 is invalid\n", ld->name);
+ goto exit;
+ }
+
+
+ mutex_lock(&mld->vmap_lock);
+ if (mld->boot_base != NULL) {
+ /* boot_base is in no use at this point */
+ vunmap(mld->boot_base);
+ mld->boot_base = NULL;
+ }
+ mutex_unlock(&mld->vmap_lock);
+
+#if IS_ENABLED(CONFIG_CP_SECURE_BOOT)
+ exynos_smc(SMC_ID_CLK, SSS_CLK_ENABLE, 0, 0);
+ if ((mode == CP_BOOT_MODE_NORMAL) && cp_shmem_get_mem_map_on_cp_flag(cp_num))
+ mode |= cp_shmem_get_base(cp_num, SHMEM_CP);
+
+ mif_info("mode=0x%lx, param2=0x%lx, param3=0x%lx, cp_base_addr=0x%lx\n",
+ mode, param2, param3, cp_shmem_get_base(cp_num, SHMEM_CP));
+ err = (int)exynos_smc(SMC_ID, mode, param2, param3);
+
+ while (err == CP_CHECK_SIGN_NOT_FINISH && try_cnt < MAX_TRY_CNT) {
+ try_cnt++;
+ err = (int)exynos_smc(SMC_ID, MODE_CP_CHECK_CONTINUE, 0x0, 0x0);
+ }
+
+ exynos_smc(SMC_ID_CLK, SSS_CLK_DISABLE, 0, 0);
+
+ if (try_cnt >= MAX_TRY_CNT)
+ mif_info("%s: it fails to check signature of main binary.\n", ld->name);
+
+ mif_info("%s: return_value=%d\n", ld->name, err);
+#endif
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC)
+ if ((mode == CP_BOOT_RE_INIT) && mld->pktproc.use_dedicated_baaw) {
+ mif_info("memaddr:0x%lx memsize:0x%08x\n",
+ cp_shmem_get_base(cp_num, SHMEM_PKTPROC),
+ cp_shmem_get_size(cp_num, SHMEM_PKTPROC)
+#if IS_ENABLED(CONFIG_CP_PKTPROC_UL)
+ + cp_shmem_get_size(cp_num, SHMEM_PKTPROC_UL));
+#else
+ );
+#endif
+
+ exynos_smc(SMC_ID_CLK, SSS_CLK_ENABLE, 0, 0);
+#if IS_ENABLED(CONFIG_EXYNOS_CPIF_IOMMU)
+ err = (int)exynos_smc(SMC_ID, CP_BOOT_EXT_BAAW,
+ ppa->cp_base, SYSMMU_BAAW_SIZE);
+#else
+ err = (int)exynos_smc(SMC_ID, CP_BOOT_EXT_BAAW,
+ (unsigned long)cp_shmem_get_base(cp_num, SHMEM_PKTPROC),
+ (unsigned long)cp_shmem_get_size(cp_num, SHMEM_PKTPROC)
+#if IS_ENABLED(CONFIG_CP_PKTPROC_UL)
+ + (unsigned long)cp_shmem_get_size(cp_num, SHMEM_PKTPROC_UL));
+#else
+ );
+#endif
+#endif
+
+ exynos_smc(SMC_ID_CLK, SSS_CLK_DISABLE, 0, 0);
+ if (err)
+ mif_err("ERROR: SMC call failure:%d\n", err);
+ }
+#endif
+
+exit:
+ return err;
+}
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_WITH_SBD_ARCH)
+static int sbd_link_rx_func_napi(struct sbd_link_device *sl, struct link_device *ld, int budget,
+ int *work_done)
+{
+ int i = 0;
+ int ret = 0;
+
+ for (i = 0; i < sl->num_channels ; i++) {
+ struct sbd_ring_buffer *rb = sbd_id2rb(sl, i, RX);
+ int ps_rcvd = 0;
+
+ if (unlikely(rb_empty(rb)))
+ continue;
+ if ((budget <= 0) && ld->is_ps_ch(sbd_id2ch(sl, i)))
+ continue;
+ if (!ld->is_ps_ch(sbd_id2ch(sl, i)))
+ ret = rx_ipc_frames_from_rb(rb);
+ else /* ps channels */
+ ret = sbd_ipc_rx_func_napi(ld, rb->iod, budget, &ps_rcvd);
+ if ((ret == -EBUSY) || (ret == -ENOMEM) || (ret == -EFAULT))
+ break;
+ if (ld->is_ps_ch(sbd_id2ch(sl, i))) {
+ /* count budget only for ps frames */
+ budget -= ps_rcvd;
+ *work_done += ps_rcvd;
+ }
+ }
+ return ret;
+}
+#endif//CONFIG_LINK_DEVICE_WITH_SBD_ARCH
+
+static int legacy_link_rx_func_napi(struct mem_link_device *mld, int budget, int *work_done)
+{
+ int i = 0;
+ int ret = 0;
+
+ for (i = 0; i < IPC_MAP_MAX; i++) {
+ struct legacy_ipc_device *dev = mld->legacy_link_dev.dev[i];
+ int ps_rcvd = 0;
+
+ if (unlikely(circ_empty(get_rxq_head(dev), get_rxq_tail(dev))))
+ continue; /* go to next device */
+ if (budget <= 0)
+ break;
+ ret = legacy_ipc_rx_func_napi(mld, dev, budget, &ps_rcvd);
+ if ((ret == -EBUSY) || (ret == -ENOMEM))
+ break;
+ /* count budget for all frames */
+ budget -= ps_rcvd;
+ *work_done += ps_rcvd;
+
+ }
+
+#if IS_ENABLED(CONFIG_CPIF_TP_MONITOR)
+ tpmon_add_legacy_packet_count(*work_done);
+#endif
+
+ return ret;
+}
+
+static int shmem_enqueue_snapshot(struct mem_link_device *mld);
+
+/*
+ * mld_rx_int_poll
+ *
+ * This NAPI poll function does not handle reception of any network frames.
+ * It is used for servicing CP2AP commands and FMT RX frames while the RX
+ * mailbox interrupt is masked. When the mailbox interrupt is masked, CP can
+ * set the interrupt but the AP will not react. However, the interrupt status
+ * bit will still be set, so we can poll the status bit to handle new RX
+ * interrupts.
+ * If the RAW NAPI functions are no longer scheduled at the end of this poll
+ * function, we can enable the mailbox interrupt and stop polling.
+ */
+static int mld_rx_int_poll(struct napi_struct *napi, int budget)
+{
+ struct mem_link_device *mld = container_of(napi, struct mem_link_device,
+ mld_napi);
+ struct link_device *ld = &mld->link_dev;
+ struct modem_ctl *mc = ld->mc;
+#if IS_ENABLED(CONFIG_LINK_DEVICE_WITH_SBD_ARCH)
+ struct sbd_link_device *sl = &mld->sbd_link_dev;
+#endif
+ int total_ps_rcvd = 0;
+ int ps_rcvd = 0;
+ int ret = 0;
+ int total_budget = budget;
+ u32 qlen = 0;
+
+ mld->rx_poll_count++;
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC)
+ if (!mld->pktproc.use_exclusive_irq) {
+ int i = 0;
+ for (i = 0; i < mld->pktproc.num_queue; i++) {
+ ret = mld->pktproc.q[i]->clean_rx_ring(mld->pktproc.q[i], budget, &ps_rcvd);
+ if ((ret == -EBUSY) || (ret == -ENOMEM)) {
+ goto keep_poll;
+ }
+
+ budget -= ps_rcvd;
+ total_ps_rcvd += ps_rcvd;
+ ps_rcvd = 0;
+ }
+ }
+#endif
+
+#if IS_ENABLED(CONFIG_MCU_IPC)
+ if (ld->interrupt_types == INTERRUPT_MAILBOX)
+ ret = cp_mbox_check_handler(CP_MBOX_IRQ_IDX_0, mld->irq_cp2ap_msg);
+
+ if (IS_ERR_VALUE((unsigned long)ret)) {
+ mif_err_limited("mbox check irq fails: err: %d\n", ret);
+ goto dummy_poll_complete;
+ }
+
+ if (ret)
+#endif
+ { /* if an irq is raised, take care of commands */
+ ret = shmem_enqueue_snapshot(mld);
+ if (ret != -ENOMSG && ret != 0)
+ goto dummy_poll_complete;
+
+ qlen = mld->msb_rxq.qlen;
+
+ if (unlikely(!cp_online(mc))) { /* for boot and dump sequences */
+ queue_delayed_work(ld->rx_wq, &mld->bootdump_rx_dwork, 0);
+ goto dummy_poll_complete;
+ }
+
+ while (qlen-- > 0) {
+ struct mst_buff *msb;
+ u16 intr;
+
+ msb = msb_dequeue(&mld->msb_rxq);
+ if (!msb)
+ break;
+ intr = msb->snapshot.int2ap;
+ if (cmd_valid(intr))
+ mld->cmd_handler(mld, int2cmd(intr));
+ msb_free(msb);
+ }
+ }
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_WITH_SBD_ARCH)
+ if (sbd_active(&mld->sbd_link_dev)) {
+ ret = sbd_link_rx_func_napi(sl, ld, budget, &ps_rcvd);
+ if ((ret == -EBUSY) || (ret == -ENOMEM))
+ goto keep_poll;
+ else if (ret == -EFAULT) { /* unrecoverable error */
+ link_trigger_cp_crash(mld, CRASH_REASON_MIF_RX_BAD_DATA,
+ "rp exceeds ring buffer size");
+ goto dummy_poll_complete;
+ }
+ budget -= ps_rcvd;
+ total_ps_rcvd += ps_rcvd;
+ ps_rcvd = 0;
+ } else
+#endif
+ { /* legacy buffer */
+ ret = legacy_link_rx_func_napi(mld, budget, &ps_rcvd);
+ if ((ret == -EBUSY) || (ret == -ENOMEM))
+ goto keep_poll;
+ budget -= ps_rcvd;
+ total_ps_rcvd += ps_rcvd;
+ ps_rcvd = 0;
+ }
+
+#if IS_ENABLED(CONFIG_CPIF_TP_MONITOR)
+ if (total_ps_rcvd)
+ tpmon_start();
+#endif
+
+ if (total_ps_rcvd < total_budget) {
+ napi_complete_done(napi, total_ps_rcvd);
+ ld->enable_rx_int(ld);
+ return total_ps_rcvd;
+ }
+
+keep_poll:
+ return total_budget;
+
+dummy_poll_complete:
+ napi_complete(napi);
+ ld->enable_rx_int(ld);
+
+ return 0;
+}
+
+static void sync_net_dev(struct link_device *ld)
+{
+ struct mem_link_device *mld = to_mem_link_device(ld);
+
+ napi_synchronize(&mld->mld_napi);
+ mif_info("%s\n", netdev_name(&mld->dummy_net));
+}
+
+static int link_start_normal_boot(struct link_device *ld, struct io_device *iod)
+{
+ struct mem_link_device *mld = to_mem_link_device(ld);
+
+ if (ld->sbd_ipc && mld->attrs & LINK_ATTR_MEM_DUMP)
+ sbd_deactivate(&mld->sbd_link_dev);
+
+ sync_net_dev(ld);
+
+ init_legacy_link(&mld->legacy_link_dev);
+ skb_queue_purge(&iod->sk_rx_q);
+
+ if (mld->attrs & LINK_ATTR_BOOT_ALIGNED)
+ ld->aligned = true;
+ else
+ ld->aligned = false;
+
+ if (mld->dpram_magic) {
+ unsigned int magic;
+
+ iowrite32(ld->magic_boot, mld->legacy_link_dev.magic);
+ magic = ioread32(mld->legacy_link_dev.magic);
+ if (magic != ld->magic_boot) {
+ mif_err("%s: ERR! magic 0x%08X != BOOT_MAGIC 0x%08X\n",
+ ld->name, magic, ld->magic_boot);
+ return -EFAULT;
+ }
+ mif_info("%s: magic == 0x%08X\n", ld->name, magic);
+ }
+
+ return 0;
+}
+
+static int link_start_dump_boot(struct link_device *ld, struct io_device *iod)
+{
+ struct mem_link_device *mld = to_mem_link_device(ld);
+
+ if (ld->sbd_ipc && mld->attrs & LINK_ATTR_MEM_DUMP)
+ sbd_deactivate(&mld->sbd_link_dev);
+
+ sync_net_dev(ld);
+
+ init_legacy_link(&mld->legacy_link_dev);
+ skb_queue_purge(&iod->sk_rx_q);
+
+ if (mld->attrs & LINK_ATTR_DUMP_ALIGNED)
+ ld->aligned = true;
+ else
+ ld->aligned = false;
+
+ if (mld->dpram_magic) {
+ unsigned int magic;
+
+ iowrite32(ld->magic_dump, mld->legacy_link_dev.magic);
+ magic = ioread32(mld->legacy_link_dev.magic);
+ if (magic != ld->magic_dump) {
+ mif_err("%s: ERR! magic 0x%08X != DUMP_MAGIC 0x%08X\n",
+ ld->name, magic, ld->magic_dump);
+ return -EFAULT;
+ }
+ mif_info("%s: magic == 0x%08X\n", ld->name, magic);
+ }
+
+ return 0;
+}
+
+static int link_start_partial_boot(struct link_device *ld, struct io_device *iod)
+{
+ struct mem_link_device *mld = to_mem_link_device(ld);
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_WITH_SBD_ARCH)
+ if (ld->sbd_ipc && mld->attrs & LINK_ATTR_MEM_DUMP)
+ sbd_deactivate(&mld->sbd_link_dev);
+#endif
+
+ sync_net_dev(ld);
+
+ init_legacy_link(&mld->legacy_link_dev);
+ skb_queue_purge(&iod->sk_rx_q);
+
+ if (mld->attrs & LINK_ATTR_BOOT_ALIGNED)
+ ld->aligned = true;
+ else
+ ld->aligned = false;
+
+ if (mld->dpram_magic) {
+ unsigned int magic;
+
+ iowrite32(ld->magic_dump, mld->legacy_link_dev.magic);
+ magic = ioread32(mld->legacy_link_dev.magic);
+ if (magic != ld->magic_dump) {
+ mif_err("%s: ERR! magic 0x%08X != DUMP_MAGIC 0x%08X\n",
+ ld->name, magic, ld->magic_dump);
+ return -EFAULT;
+ }
+ mif_info("%s: magic == 0x%08X\n", ld->name, magic);
+ }
+
+ return 0;
+}
+
+static void shmem_close_tx(struct link_device *ld)
+{
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mld->state_lock, flags);
+ mld->state = LINK_STATE_OFFLINE;
+ spin_unlock_irqrestore(&mld->state_lock, flags);
+
+ if (timer_pending(&mld->crash_ack_timer))
+ del_timer(&mld->crash_ack_timer);
+
+ stop_net_ifaces(ld, 0);
+ purge_txq(mld);
+}
+
+static int get_cp_crash_reason(struct link_device *ld, struct io_device *iod,
+ unsigned long arg)
+{
+ int ret;
+
+ ret = copy_to_user((void __user *)arg, &ld->crash_reason,
+ sizeof(struct crash_reason));
+ if (ret) {
+ mif_err("ERR! copy_to_user fail!\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+/*============================================================================*/
+
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_SHMEM)
+static u16 shmem_recv_cp2ap_irq(struct mem_link_device *mld)
+{
+ return get_ctrl_msg(&mld->cp2ap_msg);
+}
+
+static u16 shmem_recv_cp2ap_status(struct mem_link_device *mld)
+{
+ return (u16)extract_ctrl_msg(&mld->cp2ap_united_status, mld->sbi_cp_status_mask,
+ mld->sbi_cp_status_pos);
+}
+
+static void shmem_send_ap2cp_irq(struct mem_link_device *mld, u16 mask)
+{
+ set_ctrl_msg(&mld->ap2cp_msg, mask);
+
+ cp_mbox_set_interrupt(CP_MBOX_IRQ_IDX_0, mld->int_ap2cp_msg);
+}
+
+static inline u16 shmem_read_ap2cp_irq(struct mem_link_device *mld)
+{
+ return (u16)get_ctrl_msg(&mld->ap2cp_msg);
+}
+#endif
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE)
+static u16 pcie_recv_cp2ap_irq(struct mem_link_device *mld)
+{
+ return (u16)get_ctrl_msg(&mld->cp2ap_msg);
+}
+
+static u16 pcie_recv_cp2ap_status(struct mem_link_device *mld)
+{
+ return (u16)get_ctrl_msg(&mld->cp2ap_united_status);
+}
+
+static void pcie_send_ap2cp_irq(struct mem_link_device *mld, u16 mask)
+{
+ struct link_device *ld = &mld->link_dev;
+ struct modem_ctl *mc = ld->mc;
+ unsigned long flags;
+ bool force_crash = false;
+
+ spin_lock_irqsave(&mc->pcie_tx_lock, flags);
+
+ if (mutex_is_locked(&mc->pcie_onoff_lock)) {
+ mif_info_limited("Reserve doorbell interrupt: PCI on/off working\n");
+ set_ctrl_msg(&mld->ap2cp_msg, mask);
+ mc->reserve_doorbell_int = true;
+ goto exit;
+ }
+
+ if (!mc->pcie_powered_on) {
+ mif_info_limited("Reserve doorbell interrupt: PCI not powered on\n");
+ set_ctrl_msg(&mld->ap2cp_msg, mask);
+ mc->reserve_doorbell_int = true;
+ s5100_try_gpio_cp_wakeup(mc);
+ goto exit;
+ }
+
+ set_ctrl_msg(&mld->ap2cp_msg, mask);
+ mc->reserve_doorbell_int = false;
+ if (s51xx_pcie_send_doorbell_int(mc->s51xx_pdev, mld->intval_ap2cp_msg) != 0)
+ force_crash = true;
+
+exit:
+ spin_unlock_irqrestore(&mc->pcie_tx_lock, flags);
+
+ if (unlikely(force_crash))
+ s5100_force_crash_exit_ext(CRASH_REASON_PCIE_DOORBELL_FALIURE_AP2CP_IRQ);
+}
+
+static inline u16 pcie_read_ap2cp_irq(struct mem_link_device *mld)
+{
+ return (u16)get_ctrl_msg(&mld->ap2cp_msg);
+}
+#endif
+
+struct shmem_srinfo {
+ unsigned int size;
+ char buf[0];
+};
+
+/* not in use */
+static int shmem_ioctl(struct link_device *ld, struct io_device *iod,
+ unsigned int cmd, unsigned long arg)
+{
+ struct mem_link_device *mld = ld_to_mem_link_device(ld);
+
+ mif_info("%s: cmd 0x%08X\n", ld->name, cmd);
+ switch (cmd) {
+ case IOCTL_GET_SRINFO:
+ {
+ struct shmem_srinfo __user *sr_arg =
+ (struct shmem_srinfo __user *)arg;
+ unsigned int count, size = mld->srinfo_size;
+
+ if (copy_from_user(&count, &sr_arg->size, sizeof(unsigned int)))
+ return -EFAULT;
+
+ mif_info("get srinfo:%s, size = %d\n", iod->name, count);
+
+ size = min(size, count);
+ if (copy_to_user(&sr_arg->size, &size, sizeof(unsigned int)))
+ return -EFAULT;
+
+ if (copy_to_user(sr_arg->buf, mld->srinfo_base, size))
+ return -EFAULT;
+ break;
+ }
+
+ case IOCTL_SET_SRINFO:
+ {
+ struct shmem_srinfo __user *sr_arg =
+ (struct shmem_srinfo __user *)arg;
+ unsigned int count, size = mld->srinfo_size;
+
+ if (copy_from_user(&count, &sr_arg->size, sizeof(unsigned int)))
+ return -EFAULT;
+
+ mif_info("set srinfo:%s, size = %d\n", iod->name, count);
+
+ if (copy_from_user(mld->srinfo_base, sr_arg->buf, min(count, size)))
+ return -EFAULT;
+ break;
+ }
+
+ case IOCTL_GET_CP_BOOTLOG:
+ {
+ u8 __iomem *base = mld->base + SHMEM_BOOTLOG_BASE;
+ char str[SHMEM_BOOTLOG_BUFF];
+ unsigned int size = base[0] + (base[1] << 8)
+ + (base[2] << 16) + (base[3] << 24);
+
+ if (size <= 0 || size > SHMEM_BOOTLOG_BUFF - SHMEM_BOOTLOG_OFFSET) {
+ mif_info("Invalid CP boot log[%d]\n", size);
+ return -EINVAL;
+ }
+
+ strncpy(str, base + SHMEM_BOOTLOG_OFFSET, size);
+ mif_info("CP boot log[%d] : %s\n", size, str);
+ break;
+ }
+
+ case IOCTL_CLR_CP_BOOTLOG:
+ {
+ u8 __iomem *base = mld->base + SHMEM_BOOTLOG_BASE;
+
+ mif_info("Clear CP boot log\n");
+ memset(base, 0, SHMEM_BOOTLOG_BUFF);
+ break;
+ }
+
+ default:
+ mif_err("%s: ERR! invalid cmd 0x%08X\n", ld->name, cmd);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+irqreturn_t shmem_tx_state_handler(int irq, void *data)
+{
+ struct mem_link_device *mld = data;
+ struct link_device *ld = &mld->link_dev;
+ struct modem_ctl *mc = ld->mc;
+ u16 int2ap_status;
+
+ int2ap_status = mld->recv_cp2ap_status(mld);
+
+ /* Change SHM_FLOWCTL to MASK_TX_FLOWCTRL */
+ int2ap_status = (int2ap_status & SHM_FLOWCTL_BIT) << 2;
+
+ switch (int2ap_status & (SHM_FLOWCTL_BIT << 2)) {
+ case MASK_TX_FLOWCTL_SUSPEND:
+ if (!chk_same_cmd(mld, int2ap_status))
+ tx_flowctrl_suspend(mld);
+ break;
+
+ case MASK_TX_FLOWCTL_RESUME:
+ if (!chk_same_cmd(mld, int2ap_status))
+ tx_flowctrl_resume(mld);
+ break;
+
+ default:
+ break;
+ }
+
+ if (unlikely(!rx_possible(mc))) {
+ mif_err("%s: ERR! %s.state == %s\n", ld->name, mc->name,
+ mc_state(mc));
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int shmem_enqueue_snapshot(struct mem_link_device *mld)
+{
+ struct mst_buff *msb;
+ struct link_device *ld = &mld->link_dev;
+ struct modem_ctl *mc = ld->mc;
+
+ msb = mem_take_snapshot(mld, RX);
+ if (!msb)
+ return -ENOMEM;
+
+ if (unlikely(!int_valid(msb->snapshot.int2ap))) {
+ mif_err("%s: ERR! invalid intr 0x%X\n",
+ ld->name, msb->snapshot.int2ap);
+ msb_free(msb);
+ return -EINVAL;
+ }
+
+ if (unlikely(!rx_possible(mc))) {
+ mif_err("%s: ERR! %s.state == %s\n", ld->name, mc->name,
+ mc_state(mc));
+ msb_free(msb);
+ return -EINVAL;
+ }
+
+ if (unlikely(!cmd_valid(msb->snapshot.int2ap))) {
+ msb_free(msb);
+ return -ENOMSG;
+ }
+
+ msb_queue_tail(&mld->msb_rxq, msb);
+
+ return 0;
+}
+
+irqreturn_t shmem_irq_handler(int irq, void *data)
+{
+ struct mem_link_device *mld = data;
+
+ mld->rx_int_count++;
+ if (napi_schedule_prep(&mld->mld_napi)) {
+ struct link_device *ld = &mld->link_dev;
+
+ ld->disable_rx_int(ld);
+ __napi_schedule(&mld->mld_napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#if IS_ENABLED(CONFIG_MCU_IPC)
+static irqreturn_t shmem_cp2ap_wakelock_handler(int irq, void *data)
+{
+ struct mem_link_device *mld = data;
+ unsigned int req;
+
+ mif_info("%s\n", __func__);
+
+ req = extract_ctrl_msg(&mld->cp2ap_united_status, mld->sbi_cp2ap_wakelock_mask,
+ mld->sbi_cp2ap_wakelock_pos);
+
+ if (req == 0) {
+ if (cpif_wake_lock_active(mld->ws)) {
+ cpif_wake_unlock(mld->ws);
+ mif_info("cp_wakelock unlocked\n");
+ } else {
+ mif_info("cp_wakelock already unlocked\n");
+ }
+ } else if (req == 1) {
+ if (cpif_wake_lock_active(mld->ws)) {
+ mif_info("cp_wakelock already unlocked\n");
+ } else {
+ cpif_wake_lock(mld->ws);
+ mif_info("cp_wakelock locked\n");
+ }
+ } else {
+ mif_err("unsupported request: cp_wakelock\n");
+ }
+
+ return IRQ_HANDLED;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_MCU_IPC) && IS_ENABLED(CONFIG_LINK_DEVICE_PCIE)
+static irqreturn_t shmem_cp2ap_rat_mode_handler(int irq, void *data)
+{
+ struct mem_link_device *mld = data;
+ unsigned int req;
+
+ req = extract_ctrl_msg(&mld->cp2ap_united_status, mld->sbi_cp_rat_mode_mask,
+ mld->sbi_cp_rat_mode_pos);
+
+ mif_info("value: %u\n", req);
+
+ if (req) {
+ s51xx_pcie_l1ss_ctrl(0);
+ mif_info("cp requests pcie l1ss disable\n");
+ } else {
+ s51xx_pcie_l1ss_ctrl(1);
+ mif_info("cp requests pcie l1ss enable\n");
+ }
+
+ return IRQ_HANDLED;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC_CLAT)
+#define CLATINFO_ACK_TIMEOUT (1000) /* ms */
+bool shmem_ap2cp_write_clatinfo(struct mem_link_device *mld, struct clat_info *clat)
+{
+ u8 *buff;
+ u32 addr;
+ struct link_device *ld = &mld->link_dev;
+ struct modem_ctl *mc = ld->mc;
+ unsigned long remain;
+ unsigned long timeout = msecs_to_jiffies(CLATINFO_ACK_TIMEOUT);
+ bool ret = true;
+
+ if (mld->disable_hw_clat)
+ return false;
+
+ mutex_lock(&mld->clatinfo_lock);
+
+ buff = (u8 *)&clat->ipv4_local_subnet;
+ memcpy(&addr, &clat->ipv4_local_subnet, sizeof(addr));
+ mif_info("xlat_v4_addr: %02X %02X %02X %02X\n", buff[0], buff[1], buff[2], buff[3]);
+ set_ctrl_msg(&mld->ap2cp_clatinfo_xlat_v4_addr, addr);
+
+ buff = (u8 *)&clat->ipv6_local_subnet;
+ memcpy(&addr, buff, sizeof(addr));
+ mif_info("xlat_addr_0: %02X %02X %02X %02X\n", buff[0], buff[1], buff[2], buff[3]);
+ set_ctrl_msg(&mld->ap2cp_clatinfo_xlat_addr_0, addr);
+
+ buff += sizeof(addr);
+ memcpy(&addr, buff, sizeof(addr));
+ mif_info("xlat_addr_1: %02X %02X %02X %02X\n", buff[0], buff[1], buff[2], buff[3]);
+ set_ctrl_msg(&mld->ap2cp_clatinfo_xlat_addr_1, addr);
+
+ buff += sizeof(addr);
+ memcpy(&addr, buff, sizeof(addr));
+ mif_info("xlat_addr_2: %02X %02X %02X %02X\n", buff[0], buff[1], buff[2], buff[3]);
+ set_ctrl_msg(&mld->ap2cp_clatinfo_xlat_addr_2, addr);
+
+ buff += sizeof(addr);
+ memcpy(&addr, buff, sizeof(addr));
+ mif_info("xlat_addr_3: %02X %02X %02X %02X\n", buff[0], buff[1], buff[2], buff[3]);
+ set_ctrl_msg(&mld->ap2cp_clatinfo_xlat_addr_3, addr);
+
+ mif_info("clat_index: %d\n", clat->clat_index);
+ set_ctrl_msg(&mld->ap2cp_clatinfo_index, clat->clat_index);
+
+ mif_info("send ap2cp_clatinfo_irq: %d\n", mld->int_ap2cp_clatinfo_send);
+ reinit_completion(&mc->clatinfo_ack);
+ cp_mbox_set_interrupt(CP_MBOX_IRQ_IDX_0, mld->int_ap2cp_clatinfo_send);
+
+ remain = wait_for_completion_timeout(&mc->clatinfo_ack, timeout);
+ if (remain == 0) {
+ mif_err("clatinfo ack not delivered from cp\n");
+ ret = false;
+ goto out;
+ }
+
+ /* set clat_ndev with clat registers */
+ if (clat->ipv4_iface[0])
+ iodevs_for_each(ld->msd, toe_set_iod_clat_netdev, clat);
+
+out:
+ mutex_unlock(&mld->clatinfo_lock);
+
+ /* clear clat_ndev but take a delay to prevent null ndev */
+ if (!clat->ipv4_iface[0]) {
+ msleep(100);
+ iodevs_for_each(ld->msd, toe_set_iod_clat_netdev, clat);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(shmem_ap2cp_write_clatinfo);
+
+static irqreturn_t shmem_cp2ap_clatinfo_ack(int irq, void *data)
+{
+ struct mem_link_device *mld = data;
+ struct link_device *ld = &mld->link_dev;
+ struct modem_ctl *mc = ld->mc;
+
+ mif_info("CP replied clatinfo ack - use v4-rmmet path\n");
+
+ complete_all(&mc->clatinfo_ack);
+
+ return IRQ_HANDLED;
+}
+
+static void clatinfo_test(struct mem_link_device *mld)
+{
+ struct clat_info clat;
+ int i;
+ unsigned char *buff = (unsigned char *)&clat;
+
+ for (i = 0; i < sizeof(clat); i++)
+ buff[i] = i;
+
+ clat.clat_index = 0;
+
+ shmem_ap2cp_write_clatinfo(mld, &clat);
+}
+#endif
+
+#if IS_ENABLED(CONFIG_ECT)
+static int parse_ect(struct mem_link_device *mld, char *dvfs_domain_name)
+{
+ int i, counter = 0;
+ u32 mif_max_freq, mif_max_num_of_table = 0;
+ void *dvfs_block;
+ struct ect_dvfs_domain *dvfs_domain;
+
+ dvfs_block = ect_get_block(BLOCK_DVFS);
+ if (dvfs_block == NULL)
+ return -ENODEV;
+
+ dvfs_domain = ect_dvfs_get_domain(dvfs_block, (char *)dvfs_domain_name);
+ if (dvfs_domain == NULL)
+ return -ENODEV;
+
+ if (!strcmp(dvfs_domain_name, "MIF")) {
+ mld->mif_table.num_of_table = dvfs_domain->num_of_level;
+ mif_max_num_of_table = dvfs_domain->num_of_level;
+ mld->total_freq_table_count++;
+
+ if (mld->mif_table.use_dfs_max_freq) {
+ mif_info("use dfs max freq\n");
+ mif_max_freq = cal_dfs_get_max_freq(mld->mif_table.cal_id_mif);
+
+ for (i = 0; i < mif_max_num_of_table; i++) {
+ if (dvfs_domain->list_level[i].level == mif_max_freq) {
+ mif_max_num_of_table = mif_max_num_of_table - i;
+ counter = i;
+ break;
+ }
+
+ mld->mif_table.freq[mif_max_num_of_table - 1 - i] = mif_max_freq;
+ mif_info("MIF_LEV[%d] : %u\n",
+ mif_max_num_of_table - i, mif_max_freq);
+ }
+ }
+
+ for (i = mif_max_num_of_table - 1; i >= 0; i--) {
+ mld->mif_table.freq[i] =
+ dvfs_domain->list_level[counter++].level;
+ mif_info("MIF_LEV[%d] : %u\n", i + 1,
+ mld->mif_table.freq[i]);
+ }
+ } else if (!strcmp(dvfs_domain_name, "CP_CPU")) {
+ mld->cp_cpu_table.num_of_table = dvfs_domain->num_of_level;
+ mld->total_freq_table_count++;
+ for (i = dvfs_domain->num_of_level - 1; i >= 0; i--) {
+ mld->cp_cpu_table.freq[i] =
+ dvfs_domain->list_level[counter++].level;
+ mif_info("CP_CPU_LEV[%d] : %u\n", i + 1,
+ mld->cp_cpu_table.freq[i]);
+ }
+ } else if (!strcmp(dvfs_domain_name, "CP")) {
+ mld->cp_table.num_of_table = dvfs_domain->num_of_level;
+ mld->total_freq_table_count++;
+ for (i = dvfs_domain->num_of_level - 1; i >= 0; i--) {
+ mld->cp_table.freq[i] =
+ dvfs_domain->list_level[counter++].level;
+ mif_info("CP_LEV[%d] : %u\n", i + 1,
+ mld->cp_table.freq[i]);
+ }
+ } else if (!strcmp(dvfs_domain_name, "CP_EM")) {
+ mld->cp_em_table.num_of_table = dvfs_domain->num_of_level;
+ mld->total_freq_table_count++;
+ for (i = dvfs_domain->num_of_level - 1; i >= 0; i--) {
+ mld->cp_em_table.freq[i] =
+ dvfs_domain->list_level[counter++].level;
+ mif_info("CP_LEV[%d] : %u\n", i + 1,
+ mld->cp_em_table.freq[i]);
+ }
+ } else if (!strcmp(dvfs_domain_name, "CP_MCW")) {
+ mld->cp_mcw_table.num_of_table = dvfs_domain->num_of_level;
+ mld->total_freq_table_count++;
+ for (i = dvfs_domain->num_of_level - 1; i >= 0; i--) {
+ mld->cp_mcw_table.freq[i] =
+ dvfs_domain->list_level[counter++].level;
+ mif_info("CP_LEV[%d] : %u\n", i + 1,
+ mld->cp_mcw_table.freq[i]);
+ }
+ }
+
+ return 0;
+}
+#else
+static int parse_ect(struct mem_link_device *mld, char *dvfs_domain_name)
+{
+ mif_err("ECT is not defined(%s)\n", __func__);
+
+ mld->mif_table.num_of_table = 0;
+ mld->cp_cpu_table.num_of_table = 0;
+ mld->cp_table.num_of_table = 0;
+ mld->cp_em_table.num_of_table = 0;
+ mld->cp_mcw_table.num_of_table = 0;
+
+ return 0;
+}
+#endif
+
+static int shmem_rx_setup(struct link_device *ld)
+{
+ ld->rx_wq = alloc_workqueue(
+ "mem_rx_work", WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
+ if (!ld->rx_wq) {
+ mif_err("%s: ERR! fail to create rx_wq\n", ld->name);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* sysfs */
+static ssize_t tx_period_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct modem_data *modem;
+
+ modem = (struct modem_data *)dev->platform_data;
+ return sysfs_emit(buf, "%ld\n",
+ modem->mld->tx_period_ns / NSEC_PER_MSEC);
+}
+
+static ssize_t tx_period_ms_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ struct modem_data *modem;
+
+ modem = (struct modem_data *)dev->platform_data;
+
+ ret = kstrtouint(buf, 0, &modem->mld->tx_period_ns);
+ if (ret)
+ return -EINVAL;
+ modem->mld->tx_period_ns *= NSEC_PER_MSEC;
+
+ ret = count;
+ return ret;
+}
+
+static ssize_t info_region_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct modem_data *modem;
+ struct mem_link_device *mld;
+ ssize_t count = 0;
+
+ modem = (struct modem_data *)dev->platform_data;
+ mld = modem->mld;
+
+ if (modem->offset_ap_version)
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "ap_version:0x%08X\n",
+ ioread32(mld->ap_version));
+
+ if (modem->offset_cp_version)
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "cp_version:0x%08X\n",
+ ioread32(mld->cp_version));
+
+ if (modem->offset_cmsg_offset)
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "cmsg_offset:0x%08X\n",
+ ioread32(mld->cmsg_offset));
+
+ if (modem->offset_srinfo_offset)
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "srinfo_offset:0x%08X\n",
+ ioread32(mld->srinfo_offset));
+
+ if (modem->offset_clk_table_offset)
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "clk_table_offset:0x%08X\n",
+ ioread32(mld->clk_table_offset));
+
+ if (modem->offset_buff_desc_offset)
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "buff_desc_offset:0x%08X\n",
+ ioread32(mld->buff_desc_offset));
+
+ if (modem->offset_capability_offset) {
+ int part;
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "capability_offset:0x%08X\n",
+ ioread32(mld->capability_offset));
+
+ for (part = 0; part < AP_CP_CAP_PARTS; part++) {
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "ap_capability_offset[%d]:0x%08X\n", part,
+ ioread32(mld->ap_capability_offset[part]));
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "cp_capability_offset[%d]:0x%08X\n", part,
+ ioread32(mld->cp_capability_offset[part]));
+ }
+ }
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "ap2cp_msg:0x%08X\n",
+ get_ctrl_msg(&mld->ap2cp_msg));
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "cp2ap_msg:0x%08X\n",
+ get_ctrl_msg(&mld->cp2ap_msg));
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "ap2cp_united_status:0x%08X\n",
+ get_ctrl_msg(&mld->ap2cp_united_status));
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "cp2ap_united_status:0x%08X\n",
+ get_ctrl_msg(&mld->cp2ap_united_status));
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "ap2cp_kerneltime:0x%08X\n",
+ get_ctrl_msg(&mld->ap2cp_kerneltime));
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "ap2cp_kerneltime_sec:0x%08X\n",
+ get_ctrl_msg(&mld->ap2cp_kerneltime_sec));
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "ap2cp_kerneltime_usec:0x%08X\n",
+ get_ctrl_msg(&mld->ap2cp_kerneltime_usec));
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(tx_period_ms);
+static DEVICE_ATTR_RO(info_region);
+
+static struct attribute *link_device_attrs[] = {
+ &dev_attr_tx_period_ms.attr,
+ &dev_attr_info_region.attr,
+ NULL,
+};
+
+static const struct attribute_group link_device_group = {
+ .attrs = link_device_attrs,
+};
+
+/* sysfs for napi */
+static ssize_t rx_napi_list_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct modem_data *modem;
+ struct napi_struct *n;
+ struct net_device *netdev;
+ ssize_t count = 0;
+
+ modem = (struct modem_data *)dev->platform_data;
+ netdev = &modem->mld->dummy_net;
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "[%s's napi_list]\n",
+ netdev_name(netdev));
+ list_for_each_entry(n, &netdev->napi_list, dev_list)
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "state: %s(%ld), weight: %d, poll: 0x%pK\n",
+ test_bit(NAPI_STATE_SCHED, &n->state) ?
+ "NAPI_STATE_SCHED" : "NAPI_STATE_COMPLETE",
+ n->state, n->weight, (void *)n->poll);
+
+ return count;
+}
+
+static ssize_t rx_int_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct modem_data *modem;
+
+ modem = (struct modem_data *)dev->platform_data;
+ return sysfs_emit(buf, "%d\n", modem->mld->rx_int_enable);
+}
+
+static ssize_t rx_int_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct modem_data *modem;
+
+ modem = (struct modem_data *)dev->platform_data;
+ return sysfs_emit(buf, "%d\n", modem->mld->rx_int_count);
+}
+
+static ssize_t rx_int_count_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct modem_data *modem;
+ unsigned int ret, val = 0;
+
+ modem = (struct modem_data *)dev->platform_data;
+ ret = kstrtouint(buf, 0, &val);
+ if (ret < 0) {
+ mif_err("kstrtouint() failed, rc:%d\n", ret);
+ }
+
+ if (val == 0)
+ modem->mld->rx_int_count = 0;
+ return count;
+}
+
+static ssize_t rx_poll_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct modem_data *modem;
+ struct mem_link_device *mld;
+
+ modem = (struct modem_data *)dev->platform_data;
+ mld = modem->mld;
+
+ return sysfs_emit(buf, "%s: %d\n",
+ netdev_name(&mld->dummy_net), mld->rx_poll_count);
+}
+
+static ssize_t rx_poll_count_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct modem_data *modem;
+ struct mem_link_device *mld;
+
+ modem = (struct modem_data *)dev->platform_data;
+ mld = modem->mld;
+
+ mld->rx_poll_count = 0;
+ return count;
+}
+
+static ssize_t rx_int_disabled_time_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct modem_data *modem;
+
+ modem = (struct modem_data *)dev->platform_data;
+ return sysfs_emit(buf, "%lld\n", modem->mld->rx_int_disabled_time);
+}
+
+static ssize_t rx_int_disabled_time_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct modem_data *modem;
+ unsigned int ret, val = 0;
+
+ modem = (struct modem_data *)dev->platform_data;
+ ret = kstrtouint(buf, 0, &val);
+ if (ret < 0) {
+ mif_err("kstrtouint() failed, rc:%d", ret);
+ }
+
+ if (val == 0)
+ modem->mld->rx_int_disabled_time = 0;
+ return count;
+}
+
+static DEVICE_ATTR_RO(rx_napi_list);
+static DEVICE_ATTR_RO(rx_int_enable);
+static DEVICE_ATTR_RW(rx_int_count);
+static DEVICE_ATTR_RW(rx_poll_count);
+static DEVICE_ATTR_RW(rx_int_disabled_time);
+
+static struct attribute *napi_attrs[] = {
+ &dev_attr_rx_napi_list.attr,
+ &dev_attr_rx_int_enable.attr,
+ &dev_attr_rx_int_count.attr,
+ &dev_attr_rx_poll_count.attr,
+ &dev_attr_rx_int_disabled_time.attr,
+ NULL,
+};
+
+static const struct attribute_group napi_group = {
+ .attrs = napi_attrs,
+ .name = "napi",
+};
+
+#if defined(CPIF_WAKEPKT_SET_MARK)
+/* sysfs attribute for wake up events */
+static ssize_t wakeup_events_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct modem_data *modem;
+ struct mem_link_device *mld;
+ ssize_t count = 0;
+
+ modem = (struct modem_data *)dev->platform_data;
+ mld = modem->mld;
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, \
+ "Wakeup from NET packets: %d\n", \
+ atomic_read(&mld->net_wakeup_count));
+ count += scnprintf(&buf[count], PAGE_SIZE - count, \
+ "Wakeup from misc packets: %d\n", \
+ atomic_read(&mld->misc_wakeup_count));
+
+ return count;
+}
+
+static DEVICE_ATTR_RO(wakeup_events);
+
+static struct attribute *wakeup_attrs[] = {
+ &dev_attr_wakeup_events.attr,
+ NULL,
+};
+
+static const struct attribute_group wakeup_group = {
+ .attrs = wakeup_attrs,
+};
+#endif
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC_CLAT)
+static ssize_t debug_hw_clat_test_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct modem_data *modem;
+ struct mem_link_device *mld;
+ unsigned int val = 0;
+
+ int ret;
+
+ modem = (struct modem_data *)dev->platform_data;
+ mld = modem->mld;
+ ret = kstrtouint(buf, 0, &val);
+
+ if (val == 1)
+ clatinfo_test(mld);
+
+ return count;
+}
+
+#define PKTPROC_CLAT_ADDR_MAX (4)
+static ssize_t debug_disable_hw_clat_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct clat_info clat;
+ unsigned int i;
+ unsigned int flag;
+ int ret;
+ struct modem_data *modem;
+ struct mem_link_device *mld;
+
+ modem = (struct modem_data *)dev->platform_data;
+ mld = modem->mld;
+
+ ret = kstrtoint(buf, 0, &flag);
+ if (ret)
+ return -EINVAL;
+
+ if (flag) {
+ memset(&clat, 0, sizeof(clat));
+ for (i = 0; i < PKTPROC_CLAT_ADDR_MAX; i++) {
+ clat.clat_index = i;
+ scnprintf(clat.ipv6_iface, IFNAMSIZ, "rmnet%d", i);
+ shmem_ap2cp_write_clatinfo(mld, &clat);
+ msleep(1000);
+ }
+ }
+
+ mld->disable_hw_clat = (flag > 0 ? true : false);
+
+ return count;
+}
+
+static ssize_t debug_disable_hw_clat_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct modem_data *modem;
+ struct mem_link_device *mld;
+
+ modem = (struct modem_data *)dev->platform_data;
+ mld = modem->mld;
+
+ return sysfs_emit(buf, "disable_hw_clat: %d\n", mld->disable_hw_clat);
+}
+
+static DEVICE_ATTR_WO(debug_hw_clat_test);
+static DEVICE_ATTR_RW(debug_disable_hw_clat);
+
+static struct attribute *hw_clat_attrs[] = {
+ &dev_attr_debug_hw_clat_test.attr,
+ &dev_attr_debug_disable_hw_clat.attr,
+ NULL,
+};
+
+static const struct attribute_group hw_clat_group = {
+ .attrs = hw_clat_attrs,
+ .name = "hw_clat",
+};
+#endif
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC)
+static u32 p_pktproc[3];
+static u32 c_pktproc[3];
+
+static void pktproc_print(struct mem_link_device *mld)
+{
+ struct pktproc_adaptor *ppa = &mld->pktproc;
+ int i;
+ struct pktproc_queue *q;
+
+ for (i = 0; i < ppa->num_queue; i++) {
+ q = ppa->q[i];
+
+ c_pktproc[0] = *q->fore_ptr;
+ c_pktproc[1] = *q->rear_ptr;
+ c_pktproc[2] = q->done_ptr;
+
+ if (memcmp(p_pktproc, c_pktproc, sizeof(u32)*3)) {
+ mif_err("Queue:%d fore:%d rear:%d done:%d\n",
+ i, c_pktproc[0], c_pktproc[1], c_pktproc[2]);
+ memcpy(p_pktproc, c_pktproc, sizeof(u32)*3);
+ }
+ }
+}
+#endif
+
+#define BUFF_SIZE 256
+static u32 p_rwpointer[4];
+static u32 c_rwpointer[4];
+
+static enum hrtimer_restart sbd_print(struct hrtimer *timer)
+{
+ struct mem_link_device *mld = container_of(timer, struct mem_link_device, sbd_print_timer);
+ struct sbd_link_device *sl = &mld->sbd_link_dev;
+ u16 id;
+ struct sbd_ring_buffer *rb[ULDL];
+ struct io_device *iod;
+ char buf[BUFF_SIZE] = { 0, };
+ int len = 0;
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC)
+ pktproc_print(mld);
+#endif
+
+ if (likely(sbd_active(sl))) {
+ id = sbd_ch2id(sl, QOS_HIPRIO);
+ rb[TX] = &sl->ipc_dev[id].rb[TX];
+ rb[RX] = &sl->ipc_dev[id].rb[RX];
+
+ c_rwpointer[0] = *(u32 *)rb[TX]->rp;
+ c_rwpointer[1] = *(u32 *)rb[TX]->wp;
+ c_rwpointer[2] = *(u32 *)rb[RX]->rp;
+ c_rwpointer[3] = *(u32 *)rb[RX]->wp;
+
+ if (memcmp(p_rwpointer, c_rwpointer, sizeof(u32)*4)) {
+ mif_err("TX %04d/%04d %04d/%04d RX %04d/%04d %04d/%04d\n",
+ c_rwpointer[0] & 0xFFFF, c_rwpointer[1] & 0xFFFF,
+ c_rwpointer[0] >> 16, c_rwpointer[1] >> 16,
+ c_rwpointer[2] & 0xFFFF, c_rwpointer[3] & 0xFFFF,
+ c_rwpointer[2] >> 16, c_rwpointer[3] >> 16);
+ memcpy(p_rwpointer, c_rwpointer, sizeof(u32)*4);
+
+ spin_lock(&rb[TX]->iod->msd->active_list_lock);
+ list_for_each_entry(iod, &rb[TX]->iod->msd->activated_ndev_list,
+ node_ndev) {
+ len += scnprintf(buf + len, BUFF_SIZE - len, "%s: %lu/%lu ",
+ iod->name, iod->ndev->stats.tx_packets,
+ iod->ndev->stats.rx_packets);
+ }
+ spin_unlock(&rb[TX]->iod->msd->active_list_lock);
+
+ mif_err("%s\n", buf);
+ }
+ }
+
+ hrtimer_forward_now(timer, ms_to_ktime(1000));
+
+ return HRTIMER_RESTART;
+}
+
+static int set_protocol_attr(struct link_device *ld)
+{
+ switch (ld->protocol) {
+ case PROTOCOL_SIPC:
+ ld->chid_fmt_0 = SIPC5_CH_ID_FMT_0;
+ ld->chid_rfs_0 = SIPC5_CH_ID_RFS_0;
+ ld->magic_boot = MEM_BOOT_MAGIC;
+ ld->magic_crash = MEM_CRASH_MAGIC;
+ ld->magic_dump = MEM_DUMP_MAGIC;
+ ld->magic_ipc = MEM_IPC_MAGIC;
+
+ ld->is_start_valid = sipc5_start_valid;
+ ld->is_padding_exist = sipc5_padding_exist;
+ ld->is_multi_frame = sipc5_multi_frame;
+ ld->has_ext_len = sipc5_ext_len;
+ ld->get_ch = sipc5_get_ch;
+ ld->get_ctrl = sipc5_get_ctrl;
+ ld->calc_padding_size = sipc5_calc_padding_size;
+ ld->get_hdr_len = sipc5_get_hdr_len;
+ ld->get_frame_len = sipc5_get_frame_len;
+ ld->get_total_len = sipc5_get_total_len;
+ ld->is_fmt_ch = sipc5_fmt_ch;
+ ld->is_ps_ch = sipc_ps_ch;
+ ld->is_rfs_ch = sipc5_rfs_ch;
+ ld->is_boot_ch = sipc5_boot_ch;
+ ld->is_dump_ch = sipc5_dump_ch;
+ ld->is_bootdump_ch = sipc5_bootdump_ch;
+ ld->is_ipc_ch = sipc5_ipc_ch;
+ ld->is_csd_ch = sipc_csd_ch;
+ ld->is_log_ch = sipc_log_ch;
+ ld->is_router_ch = sipc_router_ch;
+ ld->is_misc_ch = sipc_misc_ch;
+ ld->is_embms_ch = NULL;
+ ld->is_uts_ch = NULL;
+ ld->is_wfs0_ch = NULL;
+ ld->is_wfs1_ch = NULL;
+ break;
+ case PROTOCOL_SIT:
+ ld->chid_fmt_0 = EXYNOS_CH_ID_FMT_0;
+ ld->chid_rfs_0 = EXYNOS_CH_ID_RFS_0;
+ ld->magic_boot = SHM_BOOT_MAGIC;
+ ld->magic_crash = SHM_DUMP_MAGIC;
+ ld->magic_dump = SHM_DUMP_MAGIC;
+ ld->magic_ipc = SHM_IPC_MAGIC;
+
+ ld->is_start_valid = exynos_start_valid;
+ ld->is_padding_exist = exynos_padding_exist;
+ ld->is_multi_frame = exynos_multi_frame;
+ ld->has_ext_len = exynos_ext_len;
+ ld->get_ch = exynos_get_ch;
+ ld->get_ctrl = exynos_get_ctrl;
+ ld->calc_padding_size = exynos_calc_padding_size;
+ ld->get_hdr_len = exynos_get_hdr_len;
+ ld->get_frame_len = exynos_get_frame_len;
+ ld->get_total_len = exynos_get_total_len;
+ ld->is_fmt_ch = exynos_fmt_ch;
+ ld->is_ps_ch = exynos_ps_ch;
+ ld->is_rfs_ch = exynos_rfs_ch;
+ ld->is_boot_ch = exynos_boot_ch;
+ ld->is_dump_ch = exynos_dump_ch;
+ ld->is_bootdump_ch = exynos_bootdump_ch;
+ ld->is_ipc_ch = exynos_ipc_ch;
+ ld->is_csd_ch = exynos_rcs_ch;
+ ld->is_log_ch = exynos_log_ch;
+ ld->is_router_ch = exynos_router_ch;
+ ld->is_embms_ch = exynos_embms_ch;
+ ld->is_uts_ch = exynos_uts_ch;
+ ld->is_wfs0_ch = exynos_wfs0_ch;
+ ld->is_wfs1_ch = exynos_wfs1_ch;
+ ld->is_oem_ch = exynos_oem_ch;
+ break;
+ default:
+ mif_err("protocol error %d\n", ld->protocol);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int set_ld_attr(struct platform_device *pdev,
+ u32 link_type, struct modem_data *modem,
+ struct mem_link_device *mld, struct link_device *ld)
+{
+ int err = 0;
+
+ ld->name = modem->link_name;
+
+ if (mld->attrs & LINK_ATTR_SBD_IPC) {
+ mif_info("%s<->%s: LINK_ATTR_SBD_IPC\n", ld->name, modem->name);
+ ld->sbd_ipc = true;
+ }
+
+ if (mld->attrs & LINK_ATTR_IPC_ALIGNED) {
+ mif_info("%s<->%s: LINK_ATTR_IPC_ALIGNED\n",
+ ld->name, modem->name);
+ ld->aligned = true;
+ }
+
+ ld->ipc_version = modem->ipc_version;
+ ld->interrupt_types = modem->interrupt_types;
+
+ ld->mdm_data = modem;
+
+ ld->dev = &pdev->dev;
+
+ /*
+ * Set up link device methods
+ */
+ ld->ioctl = shmem_ioctl;
+
+ ld->init_comm = shmem_init_comm;
+ ld->terminate_comm = NULL;
+ ld->send = shmem_send;
+
+ ld->link_prepare_normal_boot = link_prepare_normal_boot;
+ ld->link_trigger_cp_crash = link_trigger_cp_crash;
+
+ do {
+ if (!(mld->attrs & LINK_ATTR_MEM_BOOT))
+ break;
+
+ ld->link_start_normal_boot = link_start_normal_boot;
+ ld->link_start_partial_boot = link_start_partial_boot;
+ if (link_type == LINKDEV_SHMEM)
+ ld->security_req = shmem_security_request;
+
+ if (!(mld->attrs & LINK_ATTR_XMIT_BTDLR))
+ break;
+
+ ld->load_cp_image = link_load_cp_image;
+ mld->spi_bus_num = -1;
+ mif_dt_read_u32_noerr(pdev->dev.of_node, "cpboot_spi_bus_num",
+ mld->spi_bus_num);
+ if (mld->attrs & LINK_ATTR_XMIT_BTDLR_SPI) {
+ ld->load_cp_image = cpboot_spi_load_cp_image;
+
+ if (mld->spi_bus_num < 0) {
+ mif_err("cpboot_spi_bus_num error\n");
+ err = -ENODEV;
+ goto error;
+ }
+ }
+
+ if (mld->attrs & LINK_ATTR_XMIT_BTDLR_GNSS) {
+ ld->load_gnss_image = link_load_gnss_image;
+ ld->read_gnss_image = link_read_gnss_image;
+ }
+ } while (0);
+
+ if (mld->attrs & LINK_ATTR_MEM_DUMP)
+ ld->link_start_dump_boot = link_start_dump_boot;
+
+ ld->close_tx = shmem_close_tx;
+ ld->get_cp_crash_reason = get_cp_crash_reason;
+
+ ld->protocol = modem->protocol;
+ ld->capability_check = modem->capability_check;
+
+ err = set_protocol_attr(ld);
+ if (err)
+ goto error;
+
+ ld->enable_rx_int = shmem_enable_rx_int;
+ ld->disable_rx_int = shmem_disable_rx_int;
+
+ ld->start_timers = shmem_start_timers;
+ ld->stop_timers = shmem_stop_timers;
+
+ ld->handover_block_info = update_handover_block_info;
+
+ return 0;
+
+error:
+ mif_err("xxx\n");
+ return err;
+}
+
+static int init_shmem_maps(u32 link_type, struct modem_data *modem,
+ struct mem_link_device *mld, struct link_device *ld, u32 cp_num)
+{
+ int err = 0;
+ struct device_node *np_acpm = NULL;
+ u32 acpm_addr;
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_SHMEM)
+ if (link_type == LINKDEV_SHMEM) {
+ mld->boot_size = cp_shmem_get_size(cp_num, SHMEM_CP) +
+ cp_shmem_get_size(cp_num, SHMEM_VSS);
+ mld->boot_base = NULL;
+ mif_info("boot_base=NULL, boot_size=%lu\n",
+ (unsigned long)mld->boot_size);
+ }
+#endif
+
+ /*
+ * Initialize SHMEM maps for IPC (physical map -> logical map)
+ */
+ mld->size = cp_shmem_get_size(cp_num, SHMEM_IPC);
+ if (modem->legacy_raw_rx_buffer_cached)
+ mld->base = cp_shmem_get_nc_region(cp_shmem_get_base(cp_num, SHMEM_IPC),
+ modem->legacy_raw_buffer_offset + modem->legacy_raw_txq_size);
+ else
+ mld->base = cp_shmem_get_region(cp_num, SHMEM_IPC);
+
+#if IS_ENABLED(CONFIG_MODEM_IF_LEGACY_QOS)
+ mld->hiprio_base = cp_shmem_get_nc_region(cp_shmem_get_base(cp_num, SHMEM_IPC)
+ + modem->legacy_raw_qos_buffer_offset, modem->legacy_raw_qos_txq_size
+ + modem->legacy_raw_qos_rxq_size);
+#endif
+ if (!mld->base) {
+ mif_err("Failed to vmap ipc_region\n");
+ err = -ENOMEM;
+ goto error;
+ }
+ mif_info("ipc_base=%pK, ipc_size=%lu\n",
+ mld->base, (unsigned long)mld->size);
+
+ switch (link_type) {
+ case LINKDEV_SHMEM:
+ /*
+ * Initialize SHMEM maps for VSS (physical map -> logical map)
+ */
+ mld->vss_base = cp_shmem_get_region(cp_num, SHMEM_VSS);
+ if (!mld->vss_base) {
+ mif_err("Failed to vmap vss_region\n");
+ err = -ENOMEM;
+ goto error;
+ }
+ mif_info("vss_base=%pK\n", mld->vss_base);
+
+ /*
+ * Initialize memory maps for ACPM (physical map -> logical map)
+ */
+ np_acpm = of_find_node_by_name(NULL, "acpm_ipc");
+ if (!np_acpm)
+ break;
+
+ of_property_read_u32(np_acpm, "dump-size", &mld->acpm_size);
+ of_property_read_u32(np_acpm, "dump-base", &acpm_addr);
+ mld->acpm_base = cp_shmem_get_nc_region(acpm_addr, mld->acpm_size);
+ if (!mld->acpm_base) {
+ mif_err("Failed to vmap acpm_region\n");
+ err = -ENOMEM;
+ goto error;
+ }
+ mif_info("acpm_base=%pK acpm_size:0x%X\n", mld->acpm_base,
+ mld->acpm_size);
+ break;
+ default:
+ break;
+ }
+
+ ld->link_type = link_type;
+ create_legacy_link_device(mld);
+
+ if (mld->attrs & LINK_ATTR_XMIT_BTDLR_GNSS) {
+ mld->gnss_v_base = cp_shmem_get_nc_region(
+ cp_shmem_get_base(0, SHMEM_GNSS_FW),
+ cp_shmem_get_size(0, SHMEM_GNSS_FW));
+ if (!mld->gnss_v_base)
+ mif_err("cp_shmem_get_nc_region() for gnss_fw failed\n");
+
+ }
+
+ if (ld->sbd_ipc) {
+ hrtimer_init(&mld->sbd_tx_timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ mld->sbd_tx_timer.function = sbd_tx_timer_func;
+
+ hrtimer_init(&mld->sbd_print_timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ mld->sbd_print_timer.function = sbd_print;
+
+ err = create_sbd_link_device(ld,
+ &mld->sbd_link_dev, mld->base, mld->size);
+ if (err < 0)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ mif_err("xxx\n");
+ return err;
+}
+
+static int init_info_region(struct modem_data *modem,
+ struct mem_link_device *mld, struct link_device *ld)
+{
+ u8 __iomem *cmsg_base;
+ int part;
+
+ if (modem->offset_ap_version)
+ mld->ap_version = (u32 __iomem *)(mld->base + modem->offset_ap_version);
+ if (modem->offset_cp_version)
+ mld->cp_version = (u32 __iomem *)(mld->base + modem->offset_cp_version);
+ if (modem->offset_cmsg_offset) {
+ mld->cmsg_offset = (u32 __iomem *)(mld->base + modem->offset_cmsg_offset);
+ cmsg_base = mld->base + modem->cmsg_offset;
+ iowrite32(modem->cmsg_offset, mld->cmsg_offset);
+ } else {
+ cmsg_base = mld->base;
+ }
+
+ if (modem->offset_srinfo_offset) {
+ mld->srinfo_offset = (u32 __iomem *)(mld->base + modem->offset_srinfo_offset);
+ iowrite32(modem->srinfo_offset, mld->srinfo_offset);
+ }
+ if (modem->offset_clk_table_offset) {
+ mld->clk_table_offset = (u32 __iomem *)(mld->base + modem->offset_clk_table_offset);
+ iowrite32(modem->clk_table_offset, mld->clk_table_offset);
+ }
+ if (modem->offset_buff_desc_offset) {
+ mld->buff_desc_offset = (u32 __iomem *)(mld->base + modem->offset_buff_desc_offset);
+ iowrite32(modem->buff_desc_offset, mld->buff_desc_offset);
+ }
+
+ mld->srinfo_base = (u32 __iomem *)(mld->base + modem->srinfo_offset);
+ mld->srinfo_size = modem->srinfo_size;
+ mld->clk_table = (u32 __iomem *)(mld->base + modem->clk_table_offset);
+
+ if (ld->capability_check) {
+ u8 __iomem *offset;
+
+ /* AP/CP capability */
+ offset = mld->base + modem->offset_capability_offset;
+ mld->capability_offset = (u32 __iomem *)(offset);
+ iowrite32(modem->capability_offset, mld->capability_offset);
+
+ offset = mld->base + modem->capability_offset;
+ for (part = 0; part < AP_CP_CAP_PARTS; part++) {
+ mld->ap_capability_offset[part] =
+ (u32 __iomem *)(offset + (AP_CP_CAP_PART_LEN * 2 * part));
+ mld->cp_capability_offset[part] =
+ (u32 __iomem *)(offset + (AP_CP_CAP_PART_LEN * 2 * part) +
+ AP_CP_CAP_PART_LEN);
+
+ /* Initial values */
+ iowrite32(0, mld->ap_capability_offset[part]);
+ iowrite32(0, mld->cp_capability_offset[part]);
+ }
+ }
+
+ construct_ctrl_msg(&mld->cp2ap_msg, modem->cp2ap_msg, cmsg_base);
+ construct_ctrl_msg(&mld->ap2cp_msg, modem->ap2cp_msg, cmsg_base);
+ construct_ctrl_msg(&mld->cp2ap_united_status, modem->cp2ap_united_status, cmsg_base);
+ construct_ctrl_msg(&mld->ap2cp_united_status, modem->ap2cp_united_status, cmsg_base);
+#if IS_ENABLED(CONFIG_CP_PKTPROC_CLAT)
+ construct_ctrl_msg(&mld->ap2cp_clatinfo_xlat_v4_addr,
+ modem->ap2cp_clatinfo_xlat_v4_addr, cmsg_base);
+ construct_ctrl_msg(&mld->ap2cp_clatinfo_xlat_addr_0,
+ modem->ap2cp_clatinfo_xlat_addr_0, cmsg_base);
+ construct_ctrl_msg(&mld->ap2cp_clatinfo_xlat_addr_1,
+ modem->ap2cp_clatinfo_xlat_addr_1, cmsg_base);
+ construct_ctrl_msg(&mld->ap2cp_clatinfo_xlat_addr_2,
+ modem->ap2cp_clatinfo_xlat_addr_2, cmsg_base);
+ construct_ctrl_msg(&mld->ap2cp_clatinfo_xlat_addr_3,
+ modem->ap2cp_clatinfo_xlat_addr_3, cmsg_base);
+ construct_ctrl_msg(&mld->ap2cp_clatinfo_index,
+ modem->ap2cp_clatinfo_index, cmsg_base);
+#endif
+ construct_ctrl_msg(&mld->ap2cp_kerneltime, modem->ap2cp_kerneltime, cmsg_base);
+ construct_ctrl_msg(&mld->ap2cp_kerneltime_sec, modem->ap2cp_kerneltime_sec, cmsg_base);
+ construct_ctrl_msg(&mld->ap2cp_kerneltime_usec, modem->ap2cp_kerneltime_usec, cmsg_base);
+ construct_ctrl_msg(&mld->ap2cp_handover_block_info,
+ modem->ap2cp_handover_block_info, cmsg_base);
+
+ for (part = 0; part < AP_CP_CAP_PARTS; part++)
+ mld->ap_capability[part] = modem->ap_capability[part];
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_MCU_IPC)
+static int register_irq_handler(struct modem_data *modem,
+ struct mem_link_device *mld, struct link_device *ld)
+{
+ unsigned int irq_num;
+ int err;
+
+ if (ld->interrupt_types != INTERRUPT_MAILBOX) {
+ err = -EPERM;
+ goto error;
+ }
+
+ irq_num = mld->irq_cp2ap_msg;
+ err = cp_mbox_register_handler(CP_MBOX_IRQ_IDX_0, irq_num,
+ shmem_irq_handler, mld);
+ if (err)
+ goto irq_error;
+
+ /**
+ * Retrieve SHMEM MBOX# and IRQ# for wakelock
+ */
+ mld->ws = cpif_wake_lock_register(ld->dev, ld->name);
+ if (mld->ws == NULL) {
+ mif_err("%s: wakeup_source_register fail\n", ld->name);
+ err = -EINVAL;
+ goto error;
+ }
+
+ irq_num = mld->irq_cp2ap_wakelock;
+ err = cp_mbox_register_handler(CP_MBOX_IRQ_IDX_0, irq_num,
+ shmem_cp2ap_wakelock_handler, mld);
+ if (err)
+ goto irq_error;
+
+ /**
+ * Retrieve SHMEM MBOX# and IRQ# for RAT_MODE
+ */
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE)
+ irq_num = mld->irq_cp2ap_rat_mode;
+ err = cp_mbox_register_handler(CP_MBOX_IRQ_IDX_0, irq_num,
+ shmem_cp2ap_rat_mode_handler, mld);
+ if (err)
+ goto irq_error;
+#endif
+
+ irq_num = mld->irq_cp2ap_status;
+ err = cp_mbox_register_handler(CP_MBOX_IRQ_IDX_0, irq_num,
+ shmem_tx_state_handler, mld);
+ if (err)
+ goto irq_error;
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC_CLAT)
+ irq_num = mld->irq_cp2ap_clatinfo_ack;
+ err = cp_mbox_register_handler(CP_MBOX_IRQ_IDX_0, irq_num,
+ shmem_cp2ap_clatinfo_ack, mld);
+ if (err)
+ goto irq_error;
+#endif
+
+ return 0;
+
+irq_error:
+ mif_err("%s: ERR! cp_mbox_register_handler(MBOX_IRQ_IDX_0, %u) fail (%d)\n",
+ ld->name, irq_num, err);
+
+error:
+ mif_err("xxx\n");
+
+ return err;
+}
+#endif
+
+static int parse_ect_tables(struct platform_device *pdev,
+ struct mem_link_device *mld)
+{
+ int err = 0;
+
+ err = of_property_read_u32(pdev->dev.of_node,
+ "devfreq_use_dfs_max_freq", &mld->mif_table.use_dfs_max_freq);
+ if (err) {
+ mif_err("devfreq_use_dfs_max_freq error:%d\n", err);
+ return err;
+ }
+
+ if (mld->mif_table.use_dfs_max_freq) {
+ err = of_property_read_u32(pdev->dev.of_node,
+ "devfreq_cal_id_mif", &mld->mif_table.cal_id_mif);
+ if (err) {
+ mif_err("devfreq_cal_id_mif error:%d\n", err);
+ return err;
+ }
+ }
+
+ /* Parsing devfreq, cpufreq table from ECT */
+ mif_info("Parsing MIF frequency table...\n");
+ err = parse_ect(mld, "MIF");
+ if (err < 0)
+ mif_err("Can't get MIF frequency table!!!!!\n");
+
+ mif_info("Parsing CP_CPU frequency table...\n");
+ err = parse_ect(mld, "CP_CPU");
+ if (err < 0)
+ mif_err("Can't get CP_CPU frequency table!!!!!\n");
+
+ mif_info("Parsing CP frequency table...\n");
+ err = parse_ect(mld, "CP");
+ if (err < 0)
+ mif_err("Can't get CP frequency table!!!!!\n");
+
+ mif_info("Parsing CP_EM frequency table...\n");
+ err = parse_ect(mld, "CP_EM");
+ if (err < 0)
+ mif_err("Can't get CP_EM frequency table!!!!!\n");
+
+ mif_info("Parsing CP_MCW frequency table...\n");
+ err = parse_ect(mld, "CP_MCW");
+ if (err < 0)
+ mif_err("Can't get CP_MCW frequency table!!!!!\n");
+
+ return 0;
+}
+
+struct link_device *create_link_device(struct platform_device *pdev, u32 link_type)
+{
+ struct modem_data *modem;
+ struct mem_link_device *mld;
+ struct link_device *ld;
+ int err;
+ u32 cp_num;
+
+ mif_info("+++\n");
+
+ /**
+ * Get the modem (platform) data
+ */
+ modem = (struct modem_data *)pdev->dev.platform_data;
+ if (!modem) {
+ mif_err("ERR! modem == NULL\n");
+ return NULL;
+ }
+
+ if (!modem->mbx) {
+ mif_err("%s: ERR! mbx == NULL\n", modem->link_name);
+ return NULL;
+ }
+
+ if (modem->ipc_version < SIPC_VER_50) {
+ mif_err("%s<->%s: ERR! IPC version %d < SIPC_VER_50\n",
+ modem->link_name, modem->name, modem->ipc_version);
+ return NULL;
+ }
+
+ mif_info("MODEM:%s LINK:%s\n", modem->name, modem->link_name);
+
+ /*
+ * Alloc an instance of mem_link_device structure
+ */
+ mld = kzalloc(sizeof(struct mem_link_device), GFP_KERNEL);
+ if (!mld) {
+ mif_err("%s<->%s: ERR! mld kzalloc fail\n",
+ modem->link_name, modem->name);
+ return NULL;
+ }
+
+ /*
+ * Retrieve modem-specific attributes value
+ */
+ mld->attrs = modem->link_attrs;
+ mif_info("link_attrs:0x%08lx\n", mld->attrs);
+
+ /*====================================================================
+ * Initialize "memory snapshot buffer (MSB)" framework
+ *====================================================================
+ */
+ if (msb_init() < 0) {
+ mif_err("%s<->%s: ERR! msb_init() fail\n",
+ modem->link_name, modem->name);
+ goto error;
+ }
+
+ /*====================================================================
+ * Set attributes as a "link_device"
+ *====================================================================
+ */
+ ld = &mld->link_dev;
+ err = set_ld_attr(pdev, link_type, modem, mld, ld);
+ if (err)
+ goto error;
+
+ init_dummy_netdev(&mld->dummy_net);
+ netif_napi_add_weight(&mld->dummy_net, &mld->mld_napi, mld_rx_int_poll, NAPI_POLL_WEIGHT);
+ napi_enable(&mld->mld_napi);
+
+ INIT_LIST_HEAD(&ld->list);
+
+ spin_lock_init(&ld->netif_lock);
+ atomic_set(&ld->netif_stopped, 0);
+ ld->tx_flowctrl_mask = 0;
+
+ if (shmem_rx_setup(ld) < 0)
+ goto error;
+
+ if (mld->attrs & LINK_ATTR_DPRAM_MAGIC) {
+ mif_info("%s<->%s: LINK_ATTR_DPRAM_MAGIC\n",
+ ld->name, modem->name);
+ mld->dpram_magic = true;
+ }
+
+ mld->cmd_handler = shmem_cmd_handler;
+
+ spin_lock_init(&mld->state_lock);
+ mutex_init(&mld->vmap_lock);
+#if IS_ENABLED(CONFIG_CP_PKTPROC_CLAT)
+ mutex_init(&mld->clatinfo_lock);
+#endif
+
+ mld->state = LINK_STATE_OFFLINE;
+
+ /*
+ * Initialize variables for TX & RX
+ */
+ msb_queue_head_init(&mld->msb_rxq);
+ msb_queue_head_init(&mld->msb_log);
+
+ hrtimer_init(&mld->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ mld->tx_timer.function = tx_timer_func;
+
+ INIT_WORK(&mld->page_reclaim_work, bootdump_oom_handler_work);
+
+ /*
+ * Initialize variables for CP booting and crash dump
+ */
+ INIT_DELAYED_WORK(&mld->bootdump_rx_dwork, bootdump_rx_work);
+
+ /*
+ * Link local functions to the corresponding function pointers that are
+ * mandatory for all memory-type link devices
+ */
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE)
+ if (link_type == LINKDEV_PCIE) {
+ mld->recv_cp2ap_irq = pcie_recv_cp2ap_irq;
+ mld->send_ap2cp_irq = pcie_send_ap2cp_irq;
+ mld->recv_cp2ap_status = pcie_recv_cp2ap_status;
+ }
+#endif
+#if IS_ENABLED(CONFIG_LINK_DEVICE_SHMEM)
+ if (link_type == LINKDEV_SHMEM) {
+ mld->recv_cp2ap_irq = shmem_recv_cp2ap_irq;
+ mld->send_ap2cp_irq = shmem_send_ap2cp_irq;
+ mld->recv_cp2ap_status = shmem_recv_cp2ap_status;
+ }
+#endif
+
+ /*
+ * Link local functions to the corresponding function pointers that are
+ * optional for some memory-type link devices
+ */
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE)
+ if (link_type == LINKDEV_PCIE)
+ mld->read_ap2cp_irq = pcie_read_ap2cp_irq;
+#endif
+#if IS_ENABLED(CONFIG_LINK_DEVICE_SHMEM)
+ if (link_type == LINKDEV_SHMEM)
+ mld->read_ap2cp_irq = shmem_read_ap2cp_irq;
+#endif
+
+ /*
+ * Initialize SHMEM maps for BOOT (physical map -> logical map)
+ */
+ cp_num = ld->mdm_data->cp_num;
+ err = init_shmem_maps(link_type, modem, mld, ld, cp_num);
+ if (err)
+ goto error;
+
+ /*
+ * Info region
+ */
+ err = init_info_region(modem, mld, ld);
+ if (err)
+ goto error;
+
+ /*
+ * Retrieve SHMEM MBOX#, IRQ#, etc.
+ */
+ mld->int_ap2cp_msg = modem->mbx->int_ap2cp_msg;
+ mld->irq_cp2ap_msg = modem->mbx->irq_cp2ap_msg;
+
+ mld->sbi_cp_status_mask = modem->sbi_cp_status_mask;
+ mld->sbi_cp_status_pos = modem->sbi_cp_status_pos;
+ mld->irq_cp2ap_status = modem->mbx->irq_cp2ap_status;
+
+ mld->sbi_cp2ap_wakelock_mask = modem->sbi_cp2ap_wakelock_mask;
+ mld->sbi_cp2ap_wakelock_pos = modem->sbi_cp2ap_wakelock_pos;
+ mld->irq_cp2ap_wakelock = modem->mbx->irq_cp2ap_wakelock;
+
+ mld->sbi_cp_rat_mode_mask = modem->sbi_cp2ap_rat_mode_mask;
+ mld->sbi_cp_rat_mode_pos = modem->sbi_cp2ap_rat_mode_pos;
+ mld->irq_cp2ap_rat_mode = modem->mbx->irq_cp2ap_rat_mode;
+
+ mld->pktproc_use_36bit_addr = modem->pktproc_use_36bit_addr;
+#if IS_ENABLED(CONFIG_CP_PKTPROC_CLAT)
+ mld->int_ap2cp_clatinfo_send = modem->mbx->int_ap2cp_clatinfo_send;
+ mld->irq_cp2ap_clatinfo_ack = modem->mbx->irq_cp2ap_clatinfo_ack;
+#endif
+
+ /**
+ * For TX Flow-control command from CP
+ */
+ mld->tx_flowctrl_cmd = 0;
+
+ /* Link mem_link_device to modem_data */
+ modem->mld = mld;
+
+ mld->tx_period_ns = TX_PERIOD_MS * NSEC_PER_MSEC;
+
+ mld->pass_skb_to_net = pass_skb_to_net;
+ mld->pass_skb_to_demux = pass_skb_to_demux;
+
+ /*
+ * Register interrupt handlers
+ */
+#if IS_ENABLED(CONFIG_MCU_IPC)
+ err = register_irq_handler(modem, mld, ld);
+ if (err) {
+ mif_err("register_irq_handler() error %d\n", err);
+ goto error;
+ }
+#endif
+
+ if (ld->link_type == LINKDEV_SHMEM) {
+ err = parse_ect_tables(pdev, mld);
+ if (err)
+ goto error;
+ }
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC)
+ err = pktproc_create(pdev, mld, cp_shmem_get_base(cp_num, SHMEM_PKTPROC),
+ cp_shmem_get_size(cp_num, SHMEM_PKTPROC));
+ if (err < 0) {
+ mif_err("pktproc_create() error %d\n", err);
+ goto error;
+ }
+#endif
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC_UL)
+ err = pktproc_create_ul(pdev, mld, cp_shmem_get_base(cp_num, SHMEM_PKTPROC),
+ cp_shmem_get_size(cp_num, SHMEM_PKTPROC_UL));
+ if (err < 0) {
+ mif_err("pktproc_create_ul() error %d\n", err);
+ goto error;
+ }
+ hrtimer_init(&mld->pktproc_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ mld->pktproc_tx_timer.function = pktproc_tx_timer_func;
+#endif
+
+#if IS_ENABLED(CONFIG_CPIF_TP_MONITOR)
+ err = tpmon_create(pdev, ld);
+ if (err < 0) {
+ mif_err("tpmon_create() error %d\n", err);
+ goto error;
+ }
+#endif
+
+ /* sysfs */
+ if (sysfs_create_group(&pdev->dev.kobj, &link_device_group))
+ mif_err("failed to create sysfs node related link_device\n");
+
+ if (sysfs_create_group(&pdev->dev.kobj, &napi_group))
+ mif_err("failed to create sysfs node related napi\n");
+
+#if defined(CPIF_WAKEPKT_SET_MARK)
+ if (sysfs_create_group(&pdev->dev.kobj, &wakeup_group))
+ mif_err("failed to create sysfs node for wakeup events\n");
+#endif
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC_CLAT)
+ if (sysfs_create_group(&pdev->dev.kobj, &hw_clat_group))
+ mif_err("failed to create sysfs node related hw clat\n");
+#endif
+
+ mif_info("---\n");
+ return ld;
+
+error:
+ kfree(mld);
+ mif_err("xxx\n");
+ return NULL;
+}
diff --git a/link_device.h b/link_device.h
new file mode 100644
index 0000000..1f8ebe6
--- /dev/null
+++ b/link_device.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINK_DEVICE_H__
+#define __LINK_DEVICE_H__
+
+#include <linux/types.h>
+
+#include "link_device_memory.h"
+#include "modem_toe_device.h"
+
+static inline bool ipc_active(struct mem_link_device *mld)
+{
+ struct link_device *ld = &mld->link_dev;
+ struct modem_ctl *mc = ld->mc;
+
+ if (unlikely(!cp_online(mc))) {
+ mif_err("%s<->%s: %s.state %s != ONLINE <%ps>\n",
+ ld->name, mc->name, mc->name, mc_state(mc), CALLER);
+ return false;
+ }
+
+ if (mld->dpram_magic) {
+ unsigned int magic = ioread32(mld->legacy_link_dev.magic);
+ unsigned int mem_access = ioread32(mld->legacy_link_dev.mem_access);
+
+ if (magic != ld->magic_ipc || mem_access != 1) {
+ mif_err("%s<->%s: ERR! magic:0x%X access:%d <%ps>\n",
+ ld->name, mc->name, magic, mem_access, CALLER);
+ return false;
+ }
+ }
+
+ if (atomic_read(&mld->forced_cp_crash)) {
+ mif_err("%s<->%s: ERR! forced_cp_crash:%d <%ps>\n",
+ ld->name, mc->name, atomic_read(&mld->forced_cp_crash),
+ CALLER);
+ return false;
+ }
+
+ return true;
+}
+
+bool check_mem_link_tx_pending(struct mem_link_device *mld);
+irqreturn_t shmem_tx_state_handler(int irq, void *data);
+irqreturn_t shmem_irq_handler(int irq, void *data);
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC_CLAT)
+bool shmem_ap2cp_write_clatinfo(struct mem_link_device *mld, struct clat_info *clat);
+#endif
+
+#endif /* end of __LINK_DEVICE_H__ */
+
diff --git a/link_device_memory.h b/link_device_memory.h
new file mode 100644
index 0000000..4bd6be0
--- /dev/null
+++ b/link_device_memory.h
@@ -0,0 +1,855 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2010 Samsung Electronics.
+ *
+ */
+
+#ifndef __MODEM_LINK_DEVICE_MEMORY_H__
+#define __MODEM_LINK_DEVICE_MEMORY_H__
+
+#include <linux/cpumask.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/sched/rt.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/ktime.h>
+#include <linux/hrtimer.h>
+#include <linux/notifier.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+
+#include "mcu_ipc.h"
+#include "modem_prj.h"
+#include "include/circ_queue.h"
+#include "include/sbd.h"
+#include "include/sipc5.h"
+#include "include/legacy.h"
+#include "link_rx_pktproc.h"
+#if IS_ENABLED(CONFIG_CP_PKTPROC_UL)
+#include "link_tx_pktproc.h"
+#endif
+#include "boot_device_spi.h"
+#include "cpif_tp_monitor.h"
+
+/*============================================================================*/
+enum mem_iface_type {
+ MEM_EXT_DPRAM = 0x0001, /* External DPRAM */
+ MEM_AP_IDPRAM = 0x0002, /* DPRAM in AP */
+ MEM_CP_IDPRAM = 0x0004, /* DPRAM in CP */
+ MEM_PLD_DPRAM = 0x0008, /* PLD or FPGA */
+ MEM_SYS_SHMEM = 0x0100, /* Shared-memory (SHMEM) on a system bus */
+ MEM_C2C_SHMEM = 0x0200, /* SHMEM with C2C (Chip-to-chip) interface */
+ MEM_LLI_SHMEM = 0x0400, /* SHMEM with MIPI-LLI interface */
+};
+
+#define MEM_DPRAM_TYPE_MASK 0x00FF
+#define MEM_SHMEM_TYPE_MASK 0xFF00
+
+/*============================================================================*/
+#define MASK_INT_VALID 0x0080
+#define MASK_TX_FLOWCTL_SUSPEND 0x0010
+#define MASK_TX_FLOWCTL_RESUME 0x0000
+
+#define MASK_CMD_VALID 0x0040
+#define MASK_CMD_FIELD 0x003F
+
+#define MASK_REQ_ACK_FMT 0x0020
+#define MASK_REQ_ACK_RAW 0x0010
+#define MASK_RES_ACK_FMT 0x0008
+#define MASK_RES_ACK_RAW 0x0004
+#define MASK_SEND_FMT 0x0002
+#define MASK_SEND_RAW 0x0001
+#define MASK_SEND_DATA 0x0001
+
+#define CMD_INIT_START 0x0001
+#define CMD_INIT_END 0x0002
+#define CMD_REQ_ACTIVE 0x0003
+#define CMD_RES_ACTIVE 0x0004
+#define CMD_REQ_TIME_SYNC 0x0005
+#define CMD_KERNEL_PANIC 0x0006
+#define CMD_CRASH_RESET 0x0007
+#define CMD_PHONE_START 0x0008
+#define CMD_CRASH_EXIT 0x0009
+#define CMD_CP_DEEP_SLEEP 0x000A
+#define CMD_NV_REBUILDING 0x000B
+#define CMD_EMER_DOWN 0x000C
+#define CMD_PIF_INIT_DONE 0x000D
+#define CMD_SILENT_NV_REBUILD 0x000E
+#define CMD_NORMAL_POWER_OFF 0x000F
+
+/*============================================================================*/
+#define MAX_SKB_TXQ_DEPTH 1024
+#define TX_PERIOD_MS 1 /* 1 ms */
+#define MAX_TX_BUSY_COUNT 1024
+#define BUSY_COUNT_MASK 0xF
+
+#define RES_ACK_WAIT_TIMEOUT 10 /* 10 ms */
+
+enum tx_flowctrl_mask_bit {
+ TXQ_STOP_MASK = 1,
+ TX_SUSPEND_MASK,
+};
+
+#define SHM_FLOWCTL_BIT BIT(2)
+
+/*============================================================================*/
+#define FORCE_CRASH_ACK_TIMEOUT (5 * HZ)
+
+/*============================================================================*/
+#define SHMEM_SRINFO_DATA_STR 64
+
+#define SHMEM_BOOTLOG_BASE 0xC00
+#define SHMEM_BOOTLOG_BUFF 0x1FF
+#define SHMEM_BOOTLOG_OFFSET 0x4
+
+/*============================================================================*/
+struct __packed mem_snapshot {
+ /* Timestamp */
+ struct timespec64 ts;
+
+ /* Direction (TX or RX) */
+ enum direction dir;
+
+ /* The status of memory interface at the time */
+ unsigned int magic;
+ unsigned int access;
+
+ unsigned int head[IPC_MAP_MAX][MAX_DIR];
+ unsigned int tail[IPC_MAP_MAX][MAX_DIR];
+
+ u16 int2ap;
+ u16 int2cp;
+};
+
+struct mst_buff {
+ /* These two members must be first. */
+ struct mst_buff *next;
+ struct mst_buff *prev;
+
+ struct mem_snapshot snapshot;
+};
+
+struct mst_buff_head {
+ /* These two members must be first. */
+ struct mst_buff *next;
+ struct mst_buff *prev;
+
+ u32 qlen;
+ spinlock_t lock;
+};
+
+/*============================================================================*/
+enum mem_ipc_mode {
+ MEM_LEGACY_IPC,
+ MEM_SBD_IPC,
+};
+
+#define FREQ_MAX_LV (40)
+
+struct freq_table {
+ int num_of_table;
+ u32 use_dfs_max_freq;
+ u32 cal_id_mif;
+ u32 freq[FREQ_MAX_LV];
+};
+
+struct ctrl_msg {
+ u32 type;
+ union {
+ u32 sr_num;
+ u32 __iomem *addr;
+ };
+};
+
+struct mem_link_device {
+ /**
+ * COMMON and MANDATORY to all link devices
+ */
+ struct link_device link_dev;
+
+ /**
+ * Attributes
+ */
+ unsigned long attrs; /* Set of link_attr_bit flags */
+
+ /**
+ * Flags
+ */
+ bool dpram_magic; /* DPRAM-style magic code */
+
+ /**
+ * {physical address, size, virtual address} for BOOT region
+ */
+ phys_addr_t boot_start;
+ size_t boot_size;
+ u8 __iomem *boot_base;
+ u32 boot_img_offset; /* From IPC base */
+ u32 boot_img_size;
+
+ /**
+ * {physical address, size, virtual address} for IPC region
+ */
+ phys_addr_t start;
+ size_t size;
+ u8 __iomem *base; /* virtual address of ipc mem start */
+#if IS_ENABLED(CONFIG_MODEM_IF_LEGACY_QOS)
+ u8 __iomem *hiprio_base; /* virtual address of priority queue start */
+#endif
+
+ /**
+ * vss region for dump
+ */
+ u8 __iomem *vss_base;
+
+ /**
+ * acpm region for dump
+ */
+ u8 __iomem *acpm_base;
+ int acpm_size;
+
+ void __iomem *gnss_v_base;
+
+ /* Boot link device */
+ struct legacy_link_device legacy_link_dev;
+
+ /* sbd link device */
+ struct sbd_link_device sbd_link_dev;
+
+ /**
+ * GPIO#, MBOX#, IRQ# for IPC
+ */
+ unsigned int int_ap2cp_msg; /* INTR# for IPC TX */
+ unsigned int irq_cp2ap_msg; /* IRQ# for IPC RX */
+
+ unsigned int sbi_cp2ap_wakelock_mask;
+ unsigned int sbi_cp2ap_wakelock_pos;
+ unsigned int irq_cp2ap_wakelock; /* IRQ# for wakelock */
+
+ unsigned int sbi_cp_status_mask;
+ unsigned int sbi_cp_status_pos;
+ unsigned int irq_cp2ap_status; /* IRQ# for TX FLOWCTL */
+
+ unsigned int total_freq_table_count;
+
+ struct freq_table mif_table;
+ struct freq_table cp_cpu_table;
+ struct freq_table cp_table;
+ struct freq_table cp_em_table;
+ struct freq_table cp_mcw_table;
+
+ unsigned int sbi_cp_rat_mode_mask; /* MBOX# for pcie */
+ unsigned int sbi_cp_rat_mode_pos; /* MBOX# for pcie */
+ unsigned int irq_cp2ap_rat_mode; /* IRQ# for pcie */
+
+ unsigned int tx_flowctrl_cmd;
+
+ struct wakeup_source *ws;
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC_CLAT)
+ unsigned int int_ap2cp_clatinfo_send;
+ unsigned int irq_cp2ap_clatinfo_ack;
+
+ struct mutex clatinfo_lock;
+#endif
+
+ /**
+ * Member variables for TX & RX
+ */
+ struct mst_buff_head msb_rxq;
+ struct mst_buff_head msb_log;
+
+ struct hrtimer tx_timer;
+ struct hrtimer sbd_tx_timer;
+ struct hrtimer sbd_print_timer;
+#if IS_ENABLED(CONFIG_CP_PKTPROC_UL)
+ struct hrtimer pktproc_tx_timer;
+#endif
+ struct work_struct page_reclaim_work;
+
+ /**
+ * Member variables for CP booting and crash dump
+ */
+ struct delayed_work bootdump_rx_dwork;
+ atomic_t init_end_cnt;
+ atomic_t init_end_busy;
+ int last_init_end_cnt;
+
+ /**
+ * Mandatory methods for the common memory-type interface framework
+ */
+ void (*send_ap2cp_irq)(struct mem_link_device *mld, u16 mask);
+
+ /**
+ * Optional methods for some kind of memory-type interface media
+ */
+ u16 (*recv_cp2ap_irq)(struct mem_link_device *mld);
+ u16 (*read_ap2cp_irq)(struct mem_link_device *mld);
+ u16 (*recv_cp2ap_status)(struct mem_link_device *mld);
+ void (*finalize_cp_start)(struct mem_link_device *mld);
+ void (*unmap_region)(void *rgn);
+ void (*debug_info)(void);
+ void (*cmd_handler)(struct mem_link_device *mld, u16 cmd);
+
+ unsigned int tx_period_ns;
+ unsigned int force_use_memcpy;
+ unsigned int memcpy_packet_count;
+ unsigned int zeromemcpy_packet_count;
+
+ atomic_t forced_cp_crash;
+ struct timer_list crash_ack_timer;
+
+ spinlock_t state_lock;
+ /* protects boot_base nc region */
+ struct mutex vmap_lock;
+ enum link_state state;
+
+ struct net_device dummy_net;
+ struct napi_struct mld_napi;
+ unsigned int rx_int_enable;
+ unsigned int rx_int_count;
+ unsigned int rx_poll_count;
+ unsigned long long rx_int_disabled_time;
+
+ /* Location for arguments in shared memory */
+ u32 __iomem *ap_version;
+ u32 __iomem *cp_version;
+ u32 __iomem *cmsg_offset; /* address where cmsg offset is written */
+ u32 __iomem *srinfo_offset;
+ u32 __iomem *clk_table_offset;
+ u32 __iomem *buff_desc_offset;
+ u32 __iomem *capability_offset;
+
+ u32 __iomem *ap_capability_offset[AP_CP_CAP_PARTS];
+ u32 __iomem *cp_capability_offset[AP_CP_CAP_PARTS];
+
+ /* Location for control messages in shared memory */
+ struct ctrl_msg ap2cp_msg;
+ struct ctrl_msg cp2ap_msg;
+ struct ctrl_msg ap2cp_united_status;
+ struct ctrl_msg cp2ap_united_status;
+#if IS_ENABLED(CONFIG_CP_PKTPROC_CLAT)
+ struct ctrl_msg ap2cp_clatinfo_xlat_v4_addr;
+ struct ctrl_msg ap2cp_clatinfo_xlat_addr_0;
+ struct ctrl_msg ap2cp_clatinfo_xlat_addr_1;
+ struct ctrl_msg ap2cp_clatinfo_xlat_addr_2;
+ struct ctrl_msg ap2cp_clatinfo_xlat_addr_3;
+ struct ctrl_msg ap2cp_clatinfo_index;
+#endif
+ struct ctrl_msg ap2cp_kerneltime; /* for DRAM_V1 and MAILBOX_SR */
+ struct ctrl_msg ap2cp_kerneltime_sec; /* for DRAM_V2 */
+ struct ctrl_msg ap2cp_kerneltime_usec; /* for DRAM_V2 */
+ struct ctrl_msg ap2cp_handover_block_info;
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE)
+ /* Doorbell */
+ unsigned int intval_ap2cp_msg;
+ unsigned int intval_ap2cp_pcie_link_ack;
+
+ /* MSI */
+ u8 __iomem *msi_reg_base;
+ bool msi_irq_enabled;
+ int msi_irq_base;
+ bool msi_irq_base_wake;
+ u32 msi_irq_base_cpu;
+ u32 msi_irq_q_cpu[PKTPROC_MAX_QUEUE];
+#endif
+
+ u32 __iomem *srinfo_base;
+ u32 srinfo_size;
+ u32 __iomem *clk_table;
+
+ u32 ap_capability[AP_CP_CAP_PARTS];
+ u32 cp_capability[AP_CP_CAP_PARTS];
+
+ int (*pass_skb_to_net)(struct mem_link_device *mld, struct sk_buff *skb);
+ int (*pass_skb_to_demux)(struct mem_link_device *mld, struct sk_buff *skb);
+
+ struct pktproc_adaptor pktproc;
+#if IS_ENABLED(CONFIG_CP_PKTPROC_UL)
+ struct pktproc_adaptor_ul pktproc_ul;
+#endif
+
+ int pktproc_use_36bit_addr;
+
+ int spi_bus_num;
+
+ struct cpif_tpmon *tpmon;
+
+ struct toe_ctrl_t *tc;
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC_CLAT)
+ bool disable_hw_clat;
+#endif
+
+#if defined(CPIF_WAKEPKT_SET_MARK)
+ atomic_t net_wakeup_count;
+ atomic_t misc_wakeup_count;
+#endif
+};
+
+#define to_mem_link_device(ld) \
+ container_of(ld, struct mem_link_device, link_dev)
+#define ld_to_mem_link_device(ld) \
+ container_of(ld, struct mem_link_device, link_dev)
+#define sbd_to_mem_link_device(sl) \
+ container_of(sl, struct mem_link_device, sbd_link_dev)
+
+#define MEM_IPC_MAGIC 0xAA
+#define MEM_CRASH_MAGIC 0xDEADDEAD
+#define MEM_BOOT_MAGIC 0x424F4F54
+#define MEM_DUMP_MAGIC 0x44554D50
+
+#define MAX_TABLE_COUNT 8
+
+struct clock_table_info {
+ char table_name[4];
+ u32 table_count;
+};
+
+struct clock_table {
+ char parser_version[4];
+ u32 total_table_count;
+ struct clock_table_info table_info[MAX_TABLE_COUNT];
+};
+
+/*============================================================================*/
+static inline bool mem_type_shmem(enum mem_iface_type type)
+{
+ return (type & MEM_SHMEM_TYPE_MASK) ? true : false;
+}
+
+/*============================================================================*/
+static inline bool int_valid(u16 x)
+{
+ return (x & MASK_INT_VALID) ? true : false;
+}
+
+static inline u16 mask2int(u16 mask)
+{
+ return mask | MASK_INT_VALID;
+}
+
+/*
+ * @remark This must be invoked after validation with int_valid().
+ */
+static inline bool cmd_valid(u16 x)
+{
+ return (x & MASK_CMD_VALID) ? true : false;
+
+}
+
+static inline bool chk_same_cmd(struct mem_link_device *mld, u16 x)
+{
+ if (mld->tx_flowctrl_cmd != x) {
+ mld->tx_flowctrl_cmd = x;
+ return false;
+ }
+
+ return true;
+}
+
+static inline u16 int2cmd(u16 x)
+{
+ return x & MASK_CMD_FIELD;
+}
+
+static inline u16 cmd2int(u16 cmd)
+{
+ return mask2int(cmd | MASK_CMD_VALID);
+}
+
+/*============================================================================*/
+static inline struct circ_queue *cq(struct legacy_ipc_device *dev,
+ enum direction dir)
+{
+ return (dir == TX) ? &dev->txq : &dev->rxq;
+}
+
+static inline unsigned int get_txq_head(struct legacy_ipc_device *dev)
+{
+ return get_head(&dev->txq);
+}
+
+static inline void set_txq_head(struct legacy_ipc_device *dev, unsigned int in)
+{
+ set_head(&dev->txq, in);
+}
+
+static inline unsigned int get_txq_tail(struct legacy_ipc_device *dev)
+{
+ return get_tail(&dev->txq);
+}
+
+static inline void set_txq_tail(struct legacy_ipc_device *dev, unsigned int out)
+{
+ set_tail(&dev->txq, out);
+}
+
+static inline char *get_txq_buff(struct legacy_ipc_device *dev)
+{
+ return get_buff(&dev->txq);
+}
+
+static inline unsigned int get_txq_buff_size(struct legacy_ipc_device *dev)
+{
+ return get_size(&dev->txq);
+}
+
+static inline unsigned int get_rxq_head(struct legacy_ipc_device *dev)
+{
+ return get_head(&dev->rxq);
+}
+
+static inline void set_rxq_head(struct legacy_ipc_device *dev, unsigned int in)
+{
+ set_head(&dev->rxq, in);
+}
+
+static inline unsigned int get_rxq_tail(struct legacy_ipc_device *dev)
+{
+ return get_tail(&dev->rxq);
+}
+
+static inline void set_rxq_tail(struct legacy_ipc_device *dev, unsigned int out)
+{
+ set_tail(&dev->rxq, out);
+}
+
+static inline char *get_rxq_buff(struct legacy_ipc_device *dev)
+{
+ return get_buff(&dev->rxq);
+}
+
+static inline unsigned int get_rxq_buff_size(struct legacy_ipc_device *dev)
+{
+ return get_size(&dev->rxq);
+}
+
+static inline u16 msg_mask(struct legacy_ipc_device *dev)
+{
+ return dev->msg_mask;
+}
+
+static inline u16 req_ack_mask(struct legacy_ipc_device *dev)
+{
+ return dev->req_ack_mask;
+}
+
+static inline u16 res_ack_mask(struct legacy_ipc_device *dev)
+{
+ return dev->res_ack_mask;
+}
+
+static inline bool req_ack_valid(struct legacy_ipc_device *dev, u16 val)
+{
+ if (!cmd_valid(val) && (val & req_ack_mask(dev)))
+ return true;
+ else
+ return false;
+}
+
+static inline bool res_ack_valid(struct legacy_ipc_device *dev, u16 val)
+{
+ if (!cmd_valid(val) && (val & res_ack_mask(dev)))
+ return true;
+ else
+ return false;
+}
+
+static inline bool rxq_empty(struct legacy_ipc_device *dev)
+{
+ u32 head;
+ u32 tail;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->rxq.lock, flags);
+
+ head = get_rxq_head(dev);
+ tail = get_rxq_tail(dev);
+
+ spin_unlock_irqrestore(&dev->rxq.lock, flags);
+
+ return circ_empty(head, tail);
+}
+
+static inline bool txq_empty(struct legacy_ipc_device *dev)
+{
+ u32 head;
+ u32 tail;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->txq.lock, flags);
+
+ head = get_txq_head(dev);
+ tail = get_txq_tail(dev);
+
+ spin_unlock_irqrestore(&dev->txq.lock, flags);
+
+ return circ_empty(head, tail);
+}
+
+static inline int construct_ctrl_msg(struct ctrl_msg *cmsg, u32 *arr_from_dt,
+ u8 __iomem *base)
+{
+ if (!cmsg)
+ return -EINVAL;
+
+ cmsg->type = arr_from_dt[0];
+ switch (cmsg->type) {
+ case MAILBOX_SR:
+ cmsg->sr_num = arr_from_dt[1];
+ break;
+ case DRAM_V1:
+ case DRAM_V2:
+ cmsg->addr = (u32 __iomem *)(base + arr_from_dt[1]);
+ break;
+ case CMSG_TYPE_NONE:
+ break;
+ default:
+ mif_err("ERR! wrong type for ctrl msg\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline void init_ctrl_msg(struct ctrl_msg *cmsg)
+{
+ switch (cmsg->type) {
+ case MAILBOX_SR:
+ /* nothing to do */
+ break;
+ case DRAM_V1:
+ case DRAM_V2:
+ *cmsg->addr = 0;
+ break;
+ case GPIO:
+ break;
+ default:
+ break;
+ }
+
+}
+static inline u32 get_ctrl_msg(struct ctrl_msg *cmsg)
+{
+ u32 val = 0;
+
+ switch (cmsg->type) {
+ case MAILBOX_SR:
+#if IS_ENABLED(CONFIG_MCU_IPC)
+ val = cp_mbox_get_sr(cmsg->sr_num);
+#endif
+ break;
+ case DRAM_V1:
+ case DRAM_V2:
+ val = ioread32(cmsg->addr);
+ break;
+ case GPIO:
+ break;
+ default:
+ break;
+ }
+
+ return val;
+}
+
+static inline void set_ctrl_msg(struct ctrl_msg *cmsg, u32 msg)
+{
+ switch (cmsg->type) {
+ case MAILBOX_SR:
+#if IS_ENABLED(CONFIG_MCU_IPC)
+ cp_mbox_set_sr(cmsg->sr_num, msg);
+#endif
+ break;
+ case DRAM_V1:
+ case DRAM_V2:
+ iowrite32(msg, cmsg->addr);
+ break;
+ case GPIO:
+ break;
+ default:
+ break;
+ }
+}
+
+static inline u32 extract_ctrl_msg(struct ctrl_msg *cmsg, u32 mask, u32 pos)
+{
+ u32 val = 0;
+
+ switch (cmsg->type) {
+ case MAILBOX_SR:
+#if IS_ENABLED(CONFIG_MCU_IPC)
+ val = cp_mbox_extract_sr(cmsg->sr_num, mask, pos);
+#endif
+ break;
+ case DRAM_V1:
+ case DRAM_V2:
+ val = (ioread32(cmsg->addr) >> pos) & mask;
+ break;
+ case GPIO:
+ break;
+ default:
+ break;
+ }
+
+ return val;
+}
+
+static inline void update_ctrl_msg(struct ctrl_msg *cmsg, u32 msg, u32 mask, u32 pos)
+{
+ u32 val = 0;
+
+ switch (cmsg->type) {
+ case MAILBOX_SR:
+#if IS_ENABLED(CONFIG_MCU_IPC)
+ cp_mbox_update_sr(cmsg->sr_num, msg, mask, pos);
+#endif
+ break;
+ case DRAM_V1:
+ case DRAM_V2:
+ val = ioread32(cmsg->addr);
+ val &= ~(mask << pos);
+ val |= (msg & mask) << pos;
+ iowrite32(val, cmsg->addr);
+ break;
+ case GPIO:
+ break;
+ default:
+ break;
+ }
+}
+
+/*============================================================================*/
+int msb_init(void);
+
+struct mst_buff *msb_alloc(void);
+void msb_free(struct mst_buff *msb);
+
+void msb_queue_head_init(struct mst_buff_head *list);
+void msb_queue_tail(struct mst_buff_head *list, struct mst_buff *msb);
+void msb_queue_head(struct mst_buff_head *list, struct mst_buff *msb);
+
+struct mst_buff *msb_dequeue(struct mst_buff_head *list);
+
+void msb_queue_purge(struct mst_buff_head *list);
+
+struct mst_buff *mem_take_snapshot(struct mem_link_device *mld,
+ enum direction dir);
+
+/*============================================================================*/
+static inline void send_ipc_irq(struct mem_link_device *mld, u16 val)
+{
+ if (likely(mld->send_ap2cp_irq))
+ mld->send_ap2cp_irq(mld, val);
+}
+
+static inline void send_ipc_irq_debug(struct mem_link_device *mld, u16 val)
+{
+#if IS_ENABLED(CONFIG_MCU_IPC)
+ if (mld->ap2cp_msg.type == MAILBOX_SR)
+ cp_mbox_dump_sr();
+#endif
+ send_ipc_irq(mld, val);
+#if IS_ENABLED(CONFIG_MCU_IPC)
+ if (mld->ap2cp_msg.type == MAILBOX_SR)
+ cp_mbox_dump_sr();
+#endif
+}
+
+void mem_irq_handler(struct mem_link_device *mld, struct mst_buff *msb);
+
+/*============================================================================*/
+void __iomem *mem_vmap(phys_addr_t pa, size_t size, struct page *pages[]);
+void mem_vunmap(void *va);
+
+int mem_register_boot_rgn(struct mem_link_device *mld, phys_addr_t start,
+ size_t size);
+void mem_unregister_boot_rgn(struct mem_link_device *mld);
+int mem_setup_boot_map(struct mem_link_device *mld);
+
+int mem_register_ipc_rgn(struct mem_link_device *mld, phys_addr_t start,
+ size_t size);
+void mem_unregister_ipc_rgn(struct mem_link_device *mld);
+void mem_setup_ipc_map(struct mem_link_device *mld);
+
+struct mem_link_device *mem_create_link_device(enum mem_iface_type type,
+ struct modem_data *modem);
+
+/*============================================================================*/
+int mem_reset_ipc_link(struct mem_link_device *mld);
+void mem_cmd_handler(struct mem_link_device *mld, u16 cmd);
+
+/*============================================================================*/
+#if IS_ENABLED(CONFIG_CP_PKTPROC_UL)
+void pktproc_ul_q_stop(struct pktproc_queue_ul *q);
+int pktproc_ul_q_check_busy(struct pktproc_queue_ul *q);
+#endif
+
+void sbd_txq_stop(struct sbd_ring_buffer *rb);
+int sbd_txq_check_busy(struct sbd_ring_buffer *rb);
+
+void txq_stop(struct mem_link_device *mld, struct legacy_ipc_device *dev);
+int txq_check_busy(struct mem_link_device *mld, struct legacy_ipc_device *dev);
+
+void tx_flowctrl_suspend(struct mem_link_device *mld);
+void tx_flowctrl_resume(struct mem_link_device *mld);
+
+void send_req_ack(struct mem_link_device *mld, struct legacy_ipc_device *dev);
+void recv_res_ack(struct mem_link_device *mld, struct legacy_ipc_device *dev,
+ struct mem_snapshot *mst);
+
+void recv_req_ack(struct mem_link_device *mld, struct legacy_ipc_device *dev,
+ struct mem_snapshot *mst);
+void send_res_ack(struct mem_link_device *mld, struct legacy_ipc_device *dev);
+
+/*============================================================================*/
+void mem_handle_cp_crash(struct mem_link_device *mld, enum modem_state state);
+void mem_forced_cp_crash(struct mem_link_device *mld);
+
+/*============================================================================*/
+void print_req_ack(struct mem_link_device *mld, struct mem_snapshot *mst,
+ struct legacy_ipc_device *dev, enum direction dir);
+void print_res_ack(struct mem_link_device *mld, struct mem_snapshot *mst,
+ struct legacy_ipc_device *dev, enum direction dir);
+
+void print_mem_snapshot(struct mem_link_device *mld, struct mem_snapshot *mst);
+void print_dev_snapshot(struct mem_link_device *mld, struct mem_snapshot *mst,
+ struct legacy_ipc_device *dev);
+
+static inline struct sk_buff *mem_alloc_skb(unsigned int len)
+{
+ gfp_t priority;
+ struct sk_buff *skb;
+
+ priority = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
+
+ skb = alloc_skb(len + NET_SKB_PAD, priority);
+ if (!skb) {
+ mif_err("ERR! alloc_skb(len:%d + pad:%d, gfp:0x%x) fail\n",
+ len, NET_SKB_PAD, priority);
+#if IS_ENABLED(CONFIG_SEC_DEBUG_MIF_OOM)
+ show_mem(SHOW_MEM_FILTER_NODES);
+#endif
+ return NULL;
+ }
+
+ skb_reserve(skb, NET_SKB_PAD);
+ return skb;
+}
+
+/*============================================================================*/
+
+#define NET_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
+
+#if IS_ENABLED(CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE)
+extern int is_rndis_use(void);
+#endif
+
+#endif /* __MODEM_LINK_DEVICE_MEMORY_H__ */
diff --git a/link_device_memory_debug.c b/link_device_memory_debug.c
new file mode 100644
index 0000000..5d30055
--- /dev/null
+++ b/link_device_memory_debug.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2011 Samsung Electronics.
+ *
+ */
+
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/netdevice.h>
+
+#include "modem_prj.h"
+#include "modem_utils.h"
+#include "link_device_memory.h"
+
+void print_req_ack(struct mem_link_device *mld, struct mem_snapshot *mst,
+ struct legacy_ipc_device *dev, enum direction dir)
+{
+#ifdef DEBUG_MODEM_IF_FLOW_CTRL
+ struct link_device *ld = &mld->link_dev;
+ struct modem_ctl *mc = ld->mc;
+ enum legacy_ipc_map id = dev->id;
+ unsigned int qsize = get_size(cq(dev, dir));
+ unsigned int in = mst->head[id][dir];
+ unsigned int out = mst->tail[id][dir];
+ unsigned int usage = circ_get_usage(qsize, in, out);
+ unsigned int space = circ_get_space(qsize, in, out);
+
+ mif_info("REQ_ACK: %s%s%s: %s_%s.%d {in:%u out:%u usage:%u space:%u}\n",
+ ld->name, arrow(dir), mc->name, dev->name, q_dir(dir),
+ dev->req_ack_cnt[dir], in, out, usage, space);
+#endif
+}
+
+void print_res_ack(struct mem_link_device *mld, struct mem_snapshot *mst,
+ struct legacy_ipc_device *dev, enum direction dir)
+{
+#ifdef DEBUG_MODEM_IF_FLOW_CTRL
+ struct link_device *ld = &mld->link_dev;
+ struct modem_ctl *mc = ld->mc;
+ enum legacy_ipc_map id = dev->id;
+ enum direction opp_dir = opposite(dir); /* opposite direction */
+ unsigned int qsize = get_size(cq(dev, opp_dir));
+ unsigned int in = mst->head[id][opp_dir];
+ unsigned int out = mst->tail[id][opp_dir];
+ unsigned int usage = circ_get_usage(qsize, in, out);
+ unsigned int space = circ_get_space(qsize, in, out);
+
+ mif_info("RES_ACK: %s%s%s: %s_%s.%d {in:%u out:%u usage:%u space:%u}\n",
+ ld->name, arrow(dir), mc->name, dev->name, q_dir(opp_dir),
+ dev->req_ack_cnt[opp_dir], in, out, usage, space);
+#endif
+}
+
+void print_mem_snapshot(struct mem_link_device *mld, struct mem_snapshot *mst)
+{
+ struct link_device *ld = &mld->link_dev;
+
+ mif_info("%s: [%s] ACC{%X %d} FMT{TI:%u TO:%u RI:%u RO:%u} RAW{TI:%u TO:%u RI:%u RO:%u} INTR{RX:0x%X TX:0x%X}\n",
+ ld->name, ipc_dir(mst->dir), mst->magic, mst->access,
+ mst->head[IPC_MAP_FMT][TX], mst->tail[IPC_MAP_FMT][TX],
+ mst->head[IPC_MAP_FMT][RX], mst->tail[IPC_MAP_FMT][RX],
+ mst->head[IPC_MAP_NORM_RAW][TX], mst->tail[IPC_MAP_NORM_RAW][TX],
+ mst->head[IPC_MAP_NORM_RAW][RX], mst->tail[IPC_MAP_NORM_RAW][RX],
+ mst->int2ap, mst->int2cp);
+}
+
+void print_dev_snapshot(struct mem_link_device *mld, struct mem_snapshot *mst,
+ struct legacy_ipc_device *dev)
+{
+ struct link_device *ld = &mld->link_dev;
+ enum legacy_ipc_map id = dev->id;
+
+ if (id >= IPC_MAP_MAX)
+ return;
+
+ mif_info("%s: [%s] %s | TXQ{in:%u out:%u} RXQ{in:%u out:%u} | INTR{0x%02X}\n",
+ ld->name, ipc_dir(mst->dir), dev->name,
+ mst->head[id][TX], mst->tail[id][TX],
+ mst->head[id][RX], mst->tail[id][RX],
+ (mst->dir == RX) ? mst->int2ap : mst->int2cp);
+}
diff --git a/link_device_memory_flow_control.c b/link_device_memory_flow_control.c
new file mode 100644
index 0000000..7b8bc93
--- /dev/null
+++ b/link_device_memory_flow_control.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2011 Samsung Electronics.
+ *
+ */
+
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/netdevice.h>
+
+#include "modem_prj.h"
+#include "modem_utils.h"
+#include "link_device_memory.h"
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC_UL)
+void pktproc_ul_q_stop(struct pktproc_queue_ul *q)
+{
+ struct link_device *ld = &q->mld->link_dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ld->netif_lock, flags);
+ if (!atomic_read(&q->busy)) {
+ mif_info("Requested stop on PKTPROC UL QUEUE %d\n", q->q_idx);
+ atomic_set(&q->busy, 1);
+ stop_net_ifaces(ld, TXQ_STOP_MASK);
+ }
+ spin_unlock_irqrestore(&ld->netif_lock, flags);
+}
+
+static void pktproc_ul_q_start(struct pktproc_queue_ul *q)
+{
+ struct link_device *ld = &q->mld->link_dev;
+ unsigned long flags;
+
+ mif_info("Requested start on PKTPROC UL QUEUE %d\n", q->q_idx);
+
+ spin_lock_irqsave(&ld->netif_lock, flags);
+ atomic_set(&q->busy, 0);
+ resume_net_ifaces(ld, TXQ_STOP_MASK);
+ spin_unlock_irqrestore(&ld->netif_lock, flags);
+}
+
+int pktproc_ul_q_check_busy(struct pktproc_queue_ul *q)
+{
+ struct link_device *ld = &q->mld->link_dev;
+ int busy_count;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ld->netif_lock, flags);
+ busy_count = atomic_read(&q->busy);
+ if (unlikely(busy_count))
+ atomic_inc(&q->busy);
+ spin_unlock_irqrestore(&ld->netif_lock, flags);
+
+ if (!busy_count)
+ return 0;
+
+ spin_lock_irqsave(&q->lock, flags);
+ if (pktproc_ul_q_empty(q->q_info)) {
+ spin_unlock_irqrestore(&q->lock, flags);
+#ifdef DEBUG_MODEM_IF_FLOW_CTRL
+ if (cp_online(ld->mc))
+ mif_err("PKTPROC UL Queue %d: EMPTY (busy_cnt %d)\n", q->q_idx, busy_count);
+#endif
+ pktproc_ul_q_start(q);
+ return 0;
+ }
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return -EBUSY;
+}
+#endif /* CONFIG_CP_PKTPROC_UL */
+
+void sbd_txq_stop(struct sbd_ring_buffer *rb)
+{
+ struct link_device *ld = rb->ld;
+ unsigned long flags;
+
+ if (!ld->is_ps_ch(rb->ch))
+ return;
+
+ spin_lock_irqsave(&ld->netif_lock, flags);
+ if (!atomic_read(&rb->busy)) {
+ mif_info("Requested stop on rb ch: %d name: %s\n", rb->ch, rb->iod->name);
+ atomic_set(&rb->busy, 1);
+ stop_net_ifaces(ld, TXQ_STOP_MASK);
+ }
+ spin_unlock_irqrestore(&ld->netif_lock, flags);
+}
+
+static void sbd_txq_start(struct sbd_ring_buffer *rb)
+{
+ struct link_device *ld = rb->ld;
+ unsigned long flags;
+
+ if (!ld->is_ps_ch(rb->ch))
+ return;
+
+ mif_info("Requested start on rb ch: %d name: %s\n", rb->ch, rb->iod->name);
+
+ spin_lock_irqsave(&ld->netif_lock, flags);
+ atomic_set(&rb->busy, 0);
+ resume_net_ifaces(ld, TXQ_STOP_MASK);
+ spin_unlock_irqrestore(&ld->netif_lock, flags);
+}
+
+int sbd_txq_check_busy(struct sbd_ring_buffer *rb)
+{
+ struct link_device *ld = rb->ld;
+ int busy_count;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ld->netif_lock, flags);
+ busy_count = atomic_read(&rb->busy);
+ if (unlikely(busy_count))
+ atomic_inc(&rb->busy);
+ spin_unlock_irqrestore(&ld->netif_lock, flags);
+
+ if (!busy_count)
+ return 0;
+
+ if (rb_empty(rb)) {
+#ifdef DEBUG_MODEM_IF_FLOW_CTRL
+ if (cp_online(ld->mc))
+ mif_err("%s TXQ: EMPTY (busy_cnt %d)\n", rb->iod->name, busy_count);
+#endif
+ sbd_txq_start(rb);
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+void txq_stop(struct mem_link_device *mld, struct legacy_ipc_device *dev)
+{
+ struct link_device *ld = &mld->link_dev;
+ unsigned long flags;
+ bool ret = false;
+
+ if (dev->id == IPC_MAP_FMT)
+ return;
+
+ spin_lock_irqsave(&ld->netif_lock, flags);
+ if (!atomic_read(&dev->txq.busy)) {
+ mif_info("Requested stop on dev: %s\n", dev->name);
+ atomic_set(&dev->txq.busy, 1);
+ ret = stop_net_ifaces(ld, TXQ_STOP_MASK);
+ }
+ spin_unlock_irqrestore(&ld->netif_lock, flags);
+
+ if (ret) {
+ /* notify cp that legacy buffer is stuck. required for legacy only */
+ send_req_ack(mld, dev);
+ }
+}
+
+static void txq_start(struct mem_link_device *mld, struct legacy_ipc_device *dev)
+{
+ struct link_device *ld = &mld->link_dev;
+ unsigned long flags;
+
+ if (dev->id == IPC_MAP_FMT)
+ return;
+
+ mif_info("Requested start on dev: %s\n", dev->name);
+
+ spin_lock_irqsave(&ld->netif_lock, flags);
+ atomic_set(&dev->txq.busy, 0);
+ resume_net_ifaces(ld, TXQ_STOP_MASK);
+ spin_unlock_irqrestore(&ld->netif_lock, flags);
+}
+
+int txq_check_busy(struct mem_link_device *mld, struct legacy_ipc_device *dev)
+{
+ struct link_device *ld = &mld->link_dev;
+ struct modem_ctl *mc = ld->mc;
+ int busy_count;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ld->netif_lock, flags);
+ busy_count = atomic_read(&dev->txq.busy);
+ if (unlikely(busy_count))
+ atomic_inc(&dev->txq.busy);
+ spin_unlock_irqrestore(&ld->netif_lock, flags);
+
+ if (!busy_count)
+ return 0;
+
+ if (txq_empty(dev)) {
+#ifdef DEBUG_MODEM_IF_FLOW_CTRL
+ if (cp_online(mc)) {
+ mif_err("%s->%s: %s_TXQ: No RES_ACK, but EMPTY (busy_cnt %d)\n",
+ ld->name, mc->name, dev->name, busy_count);
+ }
+#endif
+ txq_start(mld, dev);
+ return 0;
+ }
+
+ if (cp_online(mc) && count_flood(busy_count, BUSY_COUNT_MASK)) {
+ /* notify cp that legacy buffer is stuck. required for legacy only */
+ send_req_ack(mld, dev);
+ return -ETIME;
+ }
+
+ return -EBUSY;
+}
+
+void tx_flowctrl_suspend(struct mem_link_device *mld)
+{
+ struct link_device *ld = &mld->link_dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ld->netif_lock, flags);
+ stop_net_ifaces(ld, TX_SUSPEND_MASK);
+ spin_unlock_irqrestore(&ld->netif_lock, flags);
+}
+
+void tx_flowctrl_resume(struct mem_link_device *mld)
+{
+ struct link_device *ld = &mld->link_dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ld->netif_lock, flags);
+ resume_net_ifaces(ld, TX_SUSPEND_MASK);
+ spin_unlock_irqrestore(&ld->netif_lock, flags);
+}
+
+void send_req_ack(struct mem_link_device *mld, struct legacy_ipc_device *dev)
+{
+#ifdef DEBUG_MODEM_IF_FLOW_CTRL
+ struct mst_buff *msb;
+#endif
+
+ send_ipc_irq(mld, mask2int(req_ack_mask(dev)));
+ dev->req_ack_cnt[TX] += 1;
+
+#ifdef DEBUG_MODEM_IF_FLOW_CTRL
+ msb = mem_take_snapshot(mld, TX);
+ if (!msb)
+ return;
+ print_req_ack(mld, &msb->snapshot, dev, TX);
+ msb_free(msb);
+#endif
+}
+
+void recv_res_ack(struct mem_link_device *mld, struct legacy_ipc_device *dev,
+ struct mem_snapshot *mst)
+{
+ dev->req_ack_cnt[TX] -= 1;
+
+ txq_start(mld, dev);
+
+#ifdef DEBUG_MODEM_IF_FLOW_CTRL
+ print_res_ack(mld, mst, dev, RX);
+#endif
+}
+
+void recv_req_ack(struct mem_link_device *mld, struct legacy_ipc_device *dev,
+ struct mem_snapshot *mst)
+{
+ dev->req_ack_cnt[RX] += 1;
+
+#ifdef DEBUG_MODEM_IF_FLOW_CTRL
+ print_req_ack(mld, mst, dev, RX);
+#endif
+}
+
+void send_res_ack(struct mem_link_device *mld, struct legacy_ipc_device *dev)
+{
+#ifdef DEBUG_MODEM_IF_FLOW_CTRL
+ struct mst_buff *msb;
+#endif
+
+ send_ipc_irq(mld, mask2int(res_ack_mask(dev)));
+ dev->req_ack_cnt[RX] -= 1;
+
+#ifdef DEBUG_MODEM_IF_FLOW_CTRL
+ msb = mem_take_snapshot(mld, TX);
+ if (!msb)
+ return;
+ print_res_ack(mld, &msb->snapshot, dev, TX);
+ msb_free(msb);
+#endif
+}
diff --git a/link_device_memory_legacy.c b/link_device_memory_legacy.c
new file mode 100644
index 0000000..ad9501f
--- /dev/null
+++ b/link_device_memory_legacy.c
@@ -0,0 +1,453 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "include/legacy.h"
+#include "modem_utils.h"
+#include "link_device.h"
+
+/* sysfs */
+static ssize_t region_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct modem_data *modem;
+ ssize_t count = 0;
+
+ modem = (struct modem_data *)dev->platform_data;
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "FMT offset head:0x%08X buff:0x%08X\n",
+ modem->legacy_fmt_head_tail_offset,
+ modem->legacy_fmt_buffer_offset);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "FMT size txq:0x%08X rxq:0x%08X\n",
+ modem->legacy_fmt_txq_size,
+ modem->legacy_fmt_rxq_size);
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "RAW offset head:0x%08X buff:0x%08X\n",
+ modem->legacy_raw_head_tail_offset,
+ modem->legacy_raw_buffer_offset);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "RAW size txq:0x%08X rxq:0x%08X\n",
+ modem->legacy_raw_txq_size,
+ modem->legacy_raw_rxq_size);
+
+#if IS_ENABLED(CONFIG_MODEM_IF_LEGACY_QOS)
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "QoS offset head:0x%08X buff:0x%08X\n",
+ modem->legacy_raw_qos_head_tail_offset,
+ modem->legacy_raw_qos_buffer_offset);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "QoS size txq:0x%08X rxq:0x%08X\n",
+ modem->legacy_raw_qos_txq_size,
+ modem->legacy_raw_qos_rxq_size);
+#endif
+
+ return count;
+}
+
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct modem_data *modem;
+ struct legacy_link_device *bl;
+ struct legacy_ipc_device *ipc_dev;
+ ssize_t count = 0;
+ int i;
+
+ modem = (struct modem_data *)dev->platform_data;
+ bl = &modem->mld->legacy_link_dev;
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "magic:0x%08X mem_access:0x%08X\n",
+ ioread32(bl->magic), ioread32(bl->mem_access));
+
+ for (i = 0; i < IPC_MAP_MAX; i++) {
+ ipc_dev = bl->dev[i];
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "\n");
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "ID:%d name:%s\n",
+ i, ipc_dev->name);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "TX busy:%d head:%d tail:%d\n",
+ atomic_read(&ipc_dev->txq.busy), get_txq_head(ipc_dev),
+ get_txq_tail(ipc_dev));
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "RX busy:%d head:%d tail:%d\n",
+ atomic_read(&ipc_dev->rxq.busy), get_rxq_head(ipc_dev),
+ get_rxq_tail(ipc_dev));
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "req_ack_cnt[TX]:%d req_ack_cnt[RX]:%d\n",
+ ipc_dev->req_ack_cnt[TX], ipc_dev->req_ack_cnt[RX]);
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_RO(region);
+static DEVICE_ATTR_RO(status);
+
+static struct attribute *legacy_attrs[] = {
+ &dev_attr_region.attr,
+ &dev_attr_status.attr,
+ NULL,
+};
+
+static const struct attribute_group legacy_group = {
+ .attrs = legacy_attrs,
+ .name = "legacy",
+};
+
+int create_legacy_link_device(struct mem_link_device *mld)
+{
+ struct legacy_ipc_device *dev;
+ struct legacy_link_device *bl = &mld->legacy_link_dev;
+ struct modem_data *modem = mld->link_dev.mdm_data;
+ struct link_device *ld = &mld->link_dev;
+ int ret = 0;
+
+ bl->ld = &mld->link_dev;
+
+ /* magic code and access enable fields */
+ bl->magic = (u32 __iomem *)(mld->base);
+ bl->mem_access = (u32 __iomem *)(mld->base + 4);
+
+ /* IPC_MAP_FMT */
+ bl->dev[IPC_MAP_FMT] = kzalloc(sizeof(struct legacy_ipc_device), GFP_KERNEL);
+ dev = bl->dev[IPC_MAP_FMT];
+
+ dev->id = IPC_MAP_FMT;
+ strcpy(dev->name, "FMT");
+
+ spin_lock_init(&dev->txq.lock);
+ atomic_set(&dev->txq.busy, 0);
+ dev->txq.head = (void __iomem *)(mld->base + modem->legacy_fmt_head_tail_offset);
+ dev->txq.tail = (void __iomem *)(mld->base + modem->legacy_fmt_head_tail_offset + 4);
+ dev->txq.buff = (void __iomem *)(mld->base + modem->legacy_fmt_buffer_offset);
+ dev->txq.size = modem->legacy_fmt_txq_size;
+
+ spin_lock_init(&dev->rxq.lock);
+ atomic_set(&dev->rxq.busy, 0);
+ dev->rxq.head = (void __iomem *)(mld->base + modem->legacy_fmt_head_tail_offset + 8);
+ dev->rxq.tail = (void __iomem *)(mld->base + modem->legacy_fmt_head_tail_offset + 12);
+ dev->rxq.buff = (void __iomem *)(mld->base + modem->legacy_fmt_buffer_offset +
+ modem->legacy_fmt_txq_size);
+ dev->rxq.size = modem->legacy_fmt_rxq_size;
+
+ dev->msg_mask = MASK_SEND_FMT;
+ dev->req_ack_mask = MASK_REQ_ACK_FMT;
+ dev->res_ack_mask = MASK_RES_ACK_FMT;
+
+ dev->skb_txq = kzalloc(sizeof(struct sk_buff_head), GFP_KERNEL);
+ dev->skb_rxq = kzalloc(sizeof(struct sk_buff_head), GFP_KERNEL);
+ skb_queue_head_init(dev->skb_txq);
+ skb_queue_head_init(dev->skb_rxq);
+
+ dev->req_ack_cnt[TX] = 0;
+ dev->req_ack_cnt[RX] = 0;
+
+ spin_lock_init(&dev->tx_lock);
+
+#if IS_ENABLED(CONFIG_MODEM_IF_LEGACY_QOS)
+ /* IPC_MAP_HPRIO_RAW */
+ bl->dev[IPC_MAP_HPRIO_RAW] = kzalloc(sizeof(struct legacy_ipc_device), GFP_KERNEL);
+ dev = bl->dev[IPC_MAP_HPRIO_RAW];
+
+ dev->id = IPC_MAP_HPRIO_RAW;
+ strcpy(dev->name, "HPRIO_RAW");
+
+ spin_lock_init(&dev->txq.lock);
+ atomic_set(&dev->txq.busy, 0);
+ dev->txq.head = (void __iomem *)(mld->base + modem->legacy_raw_qos_head_tail_offset);
+ dev->txq.tail = (void __iomem *)(mld->base + modem->legacy_raw_qos_head_tail_offset + 4);
+ dev->txq.buff = (void __iomem *)(mld->hiprio_base);
+ dev->txq.size = modem->legacy_raw_qos_txq_size;
+
+ spin_lock_init(&dev->rxq.lock);
+ atomic_set(&dev->rxq.busy, 0);
+ dev->rxq.head = (void __iomem *)(mld->base + modem->legacy_raw_qos_head_tail_offset + 8);
+ dev->rxq.tail = (void __iomem *)(mld->base + modem->legacy_raw_qos_head_tail_offset + 12);
+ dev->rxq.buff = (void __iomem *)(mld->hiprio_base + modem->legacy_raw_qos_txq_size);
+ dev->rxq.size = modem->legacy_raw_qos_rxq_size;
+
+ dev->msg_mask = MASK_SEND_RAW;
+ dev->req_ack_mask = MASK_REQ_ACK_RAW;
+ dev->res_ack_mask = MASK_RES_ACK_RAW;
+
+ dev->skb_txq = kzalloc(sizeof(struct sk_buff_head), GFP_KERNEL);
+ dev->skb_rxq = kzalloc(sizeof(struct sk_buff_head), GFP_KERNEL);
+ skb_queue_head_init(dev->skb_txq);
+ skb_queue_head_init(dev->skb_rxq);
+
+ dev->req_ack_cnt[TX] = 0;
+ dev->req_ack_cnt[RX] = 0;
+
+ spin_lock_init(&dev->tx_lock);
+#endif
+
+ /* IPC_MAP_NORM_RAW */
+ bl->dev[IPC_MAP_NORM_RAW] = kzalloc(sizeof(struct legacy_ipc_device), GFP_KERNEL);
+ dev = bl->dev[IPC_MAP_NORM_RAW];
+
+ dev->id = IPC_MAP_NORM_RAW;
+ strcpy(dev->name, "NORM_RAW");
+
+ spin_lock_init(&dev->txq.lock);
+ atomic_set(&dev->txq.busy, 0);
+ dev->txq.head = (void __iomem *)(mld->base + modem->legacy_raw_head_tail_offset);
+ dev->txq.tail = (void __iomem *)(mld->base + modem->legacy_raw_head_tail_offset + 4);
+ dev->txq.buff = (void __iomem *)(mld->base + modem->legacy_raw_buffer_offset);
+ dev->txq.size = modem->legacy_raw_txq_size;
+
+ spin_lock_init(&dev->rxq.lock);
+ atomic_set(&dev->rxq.busy, 0);
+ dev->rxq.head = (void __iomem *)(mld->base + modem->legacy_raw_head_tail_offset + 8);
+ dev->rxq.tail = (void __iomem *)(mld->base + modem->legacy_raw_head_tail_offset + 12);
+ if (modem->legacy_raw_rx_buffer_cached)
+ dev->rxq.buff =
+ phys_to_virt(cp_shmem_get_base(bl->ld->mdm_data->cp_num, SHMEM_IPC) +
+ modem->legacy_raw_buffer_offset + modem->legacy_raw_txq_size);
+ else
+ dev->rxq.buff = (void __iomem *)(mld->base + modem->legacy_raw_buffer_offset +
+ modem->legacy_raw_txq_size);
+ dev->rxq.size = modem->legacy_raw_rxq_size;
+
+ dev->msg_mask = MASK_SEND_RAW;
+ dev->req_ack_mask = MASK_REQ_ACK_RAW;
+ dev->res_ack_mask = MASK_RES_ACK_RAW;
+
+ dev->skb_txq = kzalloc(sizeof(struct sk_buff_head), GFP_KERNEL);
+ dev->skb_rxq = kzalloc(sizeof(struct sk_buff_head), GFP_KERNEL);
+ skb_queue_head_init(dev->skb_txq);
+ skb_queue_head_init(dev->skb_rxq);
+
+ dev->req_ack_cnt[TX] = 0;
+ dev->req_ack_cnt[RX] = 0;
+
+ spin_lock_init(&dev->tx_lock);
+
+ /* sysfs */
+ ret = sysfs_create_group(&ld->dev->kobj, &legacy_group);
+ if (ret != 0) {
+ mif_err("sysfs_create_group() error %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int init_legacy_link(struct legacy_link_device *bl)
+{
+ unsigned int magic;
+ unsigned int mem_access;
+ struct modem_data *modem = bl->ld->mdm_data;
+
+ int i = 0;
+
+ iowrite32(0, bl->magic);
+ iowrite32(0, bl->mem_access);
+
+ for (i = 0; i < IPC_MAP_MAX; i++) {
+ struct legacy_ipc_device *dev = bl->dev[i];
+ /* initialize circ_queues */
+ iowrite32(0, dev->txq.head);
+ iowrite32(0, dev->txq.tail);
+ iowrite32(0, dev->rxq.head);
+ iowrite32(0, dev->rxq.tail);
+
+ /* initialize skb queues */
+ skb_queue_purge(dev->skb_txq);
+ atomic_set(&dev->txq.busy, 0);
+ dev->req_ack_cnt[TX] = 0;
+ skb_queue_purge(dev->skb_rxq);
+ atomic_set(&dev->rxq.busy, 0);
+ dev->req_ack_cnt[RX] = 0;
+
+ if (modem->legacy_raw_rx_buffer_cached && i == IPC_MAP_NORM_RAW)
+ dma_sync_single_for_device(bl->ld->dev, virt_to_phys(dev->rxq.buff),
+ dev->rxq.size, DMA_FROM_DEVICE);
+ }
+
+ iowrite32(bl->ld->magic_ipc, bl->magic);
+ iowrite32(1, bl->mem_access);
+
+ magic = ioread32(bl->magic);
+ mem_access = ioread32(bl->mem_access);
+ if (magic != bl->ld->magic_ipc || mem_access != 1)
+ return -EACCES;
+
+ return 0;
+}
+
+int xmit_to_legacy_link(struct mem_link_device *mld, u8 ch,
+ struct sk_buff *skb, enum legacy_ipc_map legacy_buffer_index)
+{
+ struct legacy_ipc_device *dev = mld->legacy_link_dev.dev[legacy_buffer_index];
+ struct link_device *ld = &mld->link_dev;
+ char *src = skb->data;
+ char *dst = get_txq_buff(dev);
+ unsigned int qsize = 0;
+ unsigned int in = 0;
+ unsigned int out = 0;
+ unsigned int count = skb->len;
+ int space = 0;
+ int tried = 0;
+
+ while (1) {
+ qsize = get_txq_buff_size(dev);
+ in = get_txq_head(dev);
+ out = get_txq_tail(dev);
+
+ /* is queue valid? */
+ if (!circ_valid(qsize, in, out)) {
+ mif_err("%s: ERR! Invalid %s_TXQ{qsize:%d in:%d out:%d}\n",
+ ld->name, dev->name, qsize, in, out);
+ return -EIO;
+ }
+ /* space available? */
+ space = circ_get_space(qsize, in, out);
+ if (unlikely(space < count)) {
+ mif_err("%s: tried %d NOSPC %s_TX{qsize:%d in:%d out:%d free:%d len:%d}\n",
+ ld->name, tried, dev->name, qsize, in, out, space, count);
+ tried++;
+ if (tried >= 20)
+ return -ENOSPC;
+ if (in_interrupt())
+ mdelay(50);
+ else
+ msleep(50);
+ continue;
+ }
+
+ barrier();
+
+ circ_write(dst, src, qsize, in, count);
+
+ barrier();
+
+ set_txq_head(dev, circ_new_ptr(qsize, in, count));
+
+ /* Commit the item before incrementing the head */
+ smp_mb();
+
+ break;
+
+ }
+
+#ifdef DEBUG_MODEM_IF_LINK_TX
+ mif_pkt(ch, "LNK-TX", skb);
+#endif
+
+ dev_kfree_skb_any(skb);
+
+ return count;
+}
+
+
+struct sk_buff *recv_from_legacy_link(struct mem_link_device *mld,
+ struct legacy_ipc_device *dev, unsigned int in, int *ret)
+{
+ struct link_device *ld = &mld->link_dev;
+ struct sk_buff *skb;
+ char *src = get_rxq_buff(dev);
+ unsigned int qsize = get_rxq_buff_size(dev);
+ unsigned int out = get_rxq_tail(dev);
+ unsigned int rest = circ_get_usage(qsize, in, out);
+ unsigned int len;
+ char hdr[EXYNOS_HEADER_SIZE];
+ char pr_buff[BAD_MSG_BUFFER_SIZE];
+
+ /* Make sure out pointer is within bounds. This pointer is read
+ * from CP shared memory and must be validated before
+ * circ_read below. */
+ if (out > qsize) {
+ mif_err("OOB err! out:%u, qsize:%u\n", out, qsize);
+ if (ld->link_trigger_cp_crash) {
+ ld->link_trigger_cp_crash(mld,
+ CRASH_REASON_MIF_RX_BAD_DATA, "OOB error");
+ }
+ *ret = -EINVAL;
+ goto no_mem;
+ }
+
+ /* Copy the header in a frame to the header buffer */
+ switch (ld->protocol) {
+ case PROTOCOL_SIPC:
+ circ_read(hdr, src, qsize, out, SIPC5_MIN_HEADER_SIZE);
+ break;
+ case PROTOCOL_SIT:
+ circ_read(hdr, src, qsize, out, EXYNOS_HEADER_SIZE);
+ break;
+ default:
+ mif_err("procotol error %d\n", ld->protocol);
+ break;
+ }
+
+ /* Check the config field in the header */
+ if (unlikely(!ld->is_start_valid(hdr))) {
+ mif_err("%s: ERR! %s BAD CFG 0x%02X (in:%d out:%d rest:%d)\n",
+ ld->name, dev->name, hdr[SIPC5_CONFIG_OFFSET],
+ in, out, rest);
+ goto bad_msg;
+ }
+
+ /* Verify the length of the frame (data + padding) */
+ len = ld->get_total_len(hdr);
+ if (unlikely(len > rest)) {
+ mif_err("%s: ERR! %s BAD LEN %d > rest %d\n",
+ ld->name, dev->name, len, rest);
+ goto bad_msg;
+ }
+
+ /* Allocate an skb */
+ skb = mem_alloc_skb(len);
+ if (!skb) {
+ mif_err("%s: ERR! %s mem_alloc_skb(%d) fail\n",
+ ld->name, dev->name, len);
+ *ret = -ENOMEM;
+ goto no_mem;
+ }
+
+ /* Read the frame from the RXQ */
+ circ_read(skb_put(skb, len), src, qsize, out, len);
+
+ /* Update tail (out) pointer to the frame to be read in the future */
+ set_rxq_tail(dev, circ_new_ptr(qsize, out, len));
+
+ /* Finish reading data before incrementing tail */
+ smp_mb();
+
+ /* Record the time-stamp */
+ ktime_get_ts64(&skbpriv(skb)->ts);
+
+ return skb;
+
+bad_msg:
+ mif_err("%s%s%s: ERR! BAD MSG: %02x %02x %02x %02x\n",
+ ld->name, arrow(RX), ld->mc->name,
+ hdr[0], hdr[1], hdr[2], hdr[3]);
+
+ circ_read(pr_buff, src, qsize, out, BAD_MSG_BUFFER_SIZE);
+ pr_buffer("BAD MSG", (char *)pr_buff, (size_t)BAD_MSG_BUFFER_SIZE,
+ (size_t)BAD_MSG_BUFFER_SIZE);
+
+ set_rxq_tail(dev, in); /* Reset tail (out) pointer */
+ if (ld->link_trigger_cp_crash) {
+ ld->link_trigger_cp_crash(mld, CRASH_REASON_MIF_RX_BAD_DATA,
+ "ERR! BAD MSG from CP");
+ }
+ *ret = -EINVAL;
+
+no_mem:
+ return NULL;
+}
+
+bool check_legacy_tx_pending(struct mem_link_device *mld)
+{
+ int i;
+ unsigned int head, tail;
+ struct legacy_ipc_device *dev;
+
+ for (i = IPC_MAP_FMT ; i < IPC_MAP_MAX ; i++) {
+ dev = mld->legacy_link_dev.dev[i];
+ head = get_txq_head(dev);
+ tail = get_txq_tail(dev);
+
+ if (head != tail) {
+ mif_info("idx: %d, head: %u, tail: %u", i, head, tail);
+ return true;
+ }
+ }
+
+ return false;
+}
+
diff --git a/link_device_memory_sbd.c b/link_device_memory_sbd.c
new file mode 100644
index 0000000..98e8f58
--- /dev/null
+++ b/link_device_memory_sbd.c
@@ -0,0 +1,610 @@
+// SPDX-License-Identifier: GPL-2.0
+/* @file link_device_memory_main.c
+ * @brief common functions for all types of memory interface media
+ * @date 2014/02/05
+ * @author Hankook Jang (hankook.jang@samsung.com)
+ */
+
+/*
+ * Copyright (C) 2011 Samsung Electronics.
+ *
+ */
+
+#include <linux/shm_ipc.h>
+#include <soc/google/exynos-modem-ctrl.h>
+#include "modem_prj.h"
+#include "modem_utils.h"
+#include "link_device_memory.h"
+#include "include/sbd.h"
+
+static void print_sbd_config(struct sbd_link_device *sl)
+{
+ int i, dir;
+ struct sbd_rb_channel *rb_ch;
+ struct sbd_rb_desc *rbd;
+
+ mif_info("SBD_IPC {shmem_base:0x%pK shmem_size:%d}\n",
+ sl->shmem, sl->shmem_size);
+
+ mif_info("SBD_IPC {version:%d num_channels:%d rpwp_array_offset:%d}\n",
+ sl->g_desc->version, sl->g_desc->num_channels,
+ sl->g_desc->rpwp_array_offset);
+
+ for (i = 0; i < sl->num_channels; i++) {
+ for (dir = 0; dir < ULDL; dir++) {
+ rb_ch = &sl->g_desc->rb_ch[i][dir];
+ rbd = &sl->g_desc->rb_desc[i][dir];
+
+ mif_info("RB_DESC[%-2d][%s](offset:%d) = {id:%-2d ch:%-3d dir:%s} {buff_pos_array_offset:%-5d rb_len:%-3d} {buff_size:%-4d payload_offset:%d}\n",
+ i, udl_str(dir), rb_ch->rb_desc_offset,
+ rbd->id, rbd->ch, udl_str(rbd->direction),
+ rb_ch->buff_pos_array_offset, rbd->length,
+ rbd->buff_size, rbd->payload_offset);
+ }
+ }
+}
+
+/* sysfs */
+static ssize_t region_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct modem_data *modem;
+ struct sbd_link_device *sl;
+ struct sbd_rb_channel *rb_ch;
+ struct sbd_rb_desc *rbd;
+ struct sbd_link_attr *link_attr;
+ struct sbd_ring_buffer *rb;
+ int i, dir;
+ ssize_t count = 0;
+
+ modem = (struct modem_data *)dev->platform_data;
+ sl = &modem->mld->sbd_link_dev;
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "version:%d num_channels:%d rpwp_array_offset:%d\n",
+ sl->g_desc->version, sl->g_desc->num_channels, sl->g_desc->rpwp_array_offset);
+
+ for (i = 0; i < sl->num_channels; i++) {
+ for (dir = 0; dir < ULDL; dir++) {
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "\n");
+
+ rb = sbd_id2rb(sl, i, dir);
+ rb_ch = &sl->g_desc->rb_ch[i][dir];
+ rbd = &sl->g_desc->rb_desc[i][dir];
+ link_attr = &sl->link_attr[i];
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "ID:%d CH:%d direction:%s\n", rbd->id, rbd->ch, udl_str(dir));
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "DESC region offset:%d buff_pos:%d buff_size:%d len:%d\n",
+ rb_ch->rb_desc_offset, rb_ch->buff_pos_array_offset,
+ rbd->buff_size, rbd->length);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "BUFF region offset:%d size:%u\n",
+ calc_offset(rb->buff_rgn, sl->shmem), (rb->len * rb->buff_size));
+ }
+ }
+
+ return count;
+}
+
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct modem_data *modem;
+ struct sbd_link_device *sl;
+ struct sbd_ring_buffer *rb_tx, *rb_rx;
+ int i;
+ ssize_t count = 0;
+
+ modem = (struct modem_data *)dev->platform_data;
+ sl = &modem->mld->sbd_link_dev;
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "num_channels:%d\n", sl->num_channels);
+ for (i = 0; i < sl->num_channels; i++) {
+ rb_tx = sbd_id2rb(sl, i, TX);
+ rb_rx = sbd_id2rb(sl, i, RX);
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "\n");
+ if (!rb_tx->len && !rb_rx->len) {
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "ID:%d TX(empty) RX(empty)\n", i);
+ continue;
+ }
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "ID:%d name:%s\n",
+ i, rb_tx->iod->name);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "TX ch:%d len:%d buff_size:%d rp:%d wp:%d space:%d usage:%d\n",
+ rb_tx->ch, rb_tx->len, rb_rx->buff_size,
+ *rb_tx->rp, *rb_tx->wp, rb_space(rb_tx) + 1, rb_usage(rb_tx));
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "RX ch:%d len:%d buff_size:%d rp:%d wp:%d, space:%d usage:%d\n",
+ rb_rx->ch, rb_rx->len, rb_rx->buff_size,
+ *rb_rx->rp, *rb_rx->wp,
+ rb_space(rb_rx) + 1, rb_usage(rb_rx));
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_RO(region);
+static DEVICE_ATTR_RO(status);
+
+static struct attribute *sbd_attrs[] = {
+ &dev_attr_region.attr,
+ &dev_attr_status.attr,
+ NULL,
+};
+
+static const struct attribute_group sbd_group = {
+ .attrs = sbd_attrs,
+ .name = "sbd",
+};
+
+static void setup_link_attr(struct sbd_link_attr *link_attr, u16 id, u16 ch,
+ struct modem_io_t *io_dev)
+{
+ link_attr->id = id;
+ link_attr->ch = ch;
+
+ if (io_dev->attrs & IO_ATTR_NO_LINK_HEADER)
+ link_attr->lnk_hdr = false;
+ else
+ link_attr->lnk_hdr = true;
+
+ link_attr->rb_len[UL] = io_dev->ul_num_buffers;
+ link_attr->buff_size[UL] = io_dev->ul_buffer_size;
+ link_attr->rb_len[DL] = io_dev->dl_num_buffers;
+ link_attr->buff_size[DL] = io_dev->dl_buffer_size;
+
+}
+
+/*
+ * @return the number of actual link channels
+ */
+static unsigned int init_ctrl_tables(struct sbd_link_device *sl)
+{
+ struct modem_io_t **iodevs = sl->ld->mdm_data->iodevs;
+ unsigned int i, ch;
+ int multi_raw_count = 0;
+ unsigned int id = 0;
+
+ /*
+ * Fill ch2id array with MAX_SBD_LINK_IDS value to prevent sbd_ch2id()
+ * from returning 0 for unused channels.
+ */
+ for (i = 0; i < MAX_SBD_SIPC_CHANNELS; i++)
+ sl->ch2id[i] = MAX_SBD_LINK_IDS;
+
+ for (i = 0; i < sl->ld->mdm_data->num_iodevs; i++) {
+ ch = iodevs[i]->ch;
+
+ /* Skip non-IPC or PS ch but allow IPC_MULTI_RAW */
+ if ((!sipc5_ipc_ch(ch) || sipc_ps_ch(ch)) &&
+ (iodevs[i]->format != IPC_MULTI_RAW))
+ continue;
+
+ /* Skip making rb if mismatch region info */
+ if ((iodevs[i]->attrs & IO_ATTR_OPTION_REGION) &&
+ strcmp(iodevs[i]->option_region, CONFIG_OPTION_REGION))
+ continue;
+
+ /* Change channel to QoS priority */
+ if (iodevs[i]->format == IPC_MULTI_RAW) {
+ ch = QOS_HIPRIO + multi_raw_count;
+ multi_raw_count++;
+ if (ch >= QOS_MAX_PRIO) {
+ mif_err("IPC_MULTI_RAW over max count ch: %d\n", ch);
+ continue;
+ }
+ }
+
+ /* Save CH# to LinkID-to-CH conversion table. */
+ sl->id2ch[id] = ch;
+
+ /* Save LinkID to CH-to-LinkID conversion table. */
+ sl->ch2id[ch] = id;
+
+ /* Set up the attribute table entry of a LinkID. */
+ setup_link_attr(&sl->link_attr[id], id, ch, iodevs[i]);
+
+ id++;
+ }
+
+ /* Finally, id has the number of actual link channels. */
+ return id;
+}
+
+int init_sbd_link(struct sbd_link_device *sl)
+{
+ int i, dir, idx;
+ int ret = 0;
+ struct sbd_ring_buffer *rb;
+ struct sbd_ipc_device *ipc_dev;
+ struct sbd_link_attr *link_attr;
+
+ if (unlikely(!sl))
+ return -ENOMEM;
+
+ memset(sl->shmem + DESC_RGN_OFFSET, 0x0, DESC_RGN_SIZE);
+
+ sl->g_desc->version = sl->version;
+ sl->g_desc->num_channels = sl->num_channels;
+ sl->g_desc->rpwp_array_offset = calc_offset(sl->rp[UL], sl->shmem);
+
+ for (i = 0; i < sl->num_channels; i++) {
+ ipc_dev = sbd_id2dev(sl, i);
+ if (unlikely(!ipc_dev))
+ return -ENODEV;
+
+ link_attr = &sl->link_attr[i];
+
+ ipc_dev->id = link_attr->id;
+ ipc_dev->ch = link_attr->ch;
+
+ for (dir = 0; dir < ULDL; dir++) {
+ /*
+ * Setup UL Ring Buffer in the ipc_dev[$i]
+ */
+ rb = &ipc_dev->rb[dir];
+
+ rb->sl = sl;
+ rb->lnk_hdr = link_attr->lnk_hdr;
+ rb->more = false;
+ rb->total = 0;
+ rb->rcvd = 0;
+
+ /*
+ * Initialize an SBD RB instance in the kernel space.
+ */
+ rb->id = link_attr->id;
+#if IS_ENABLED(CONFIG_CH_EXTENSION)
+ rb->ch = link_attr->ch ?: SIPC_CH_EX_ID_PDP_0;
+#else
+ rb->ch = link_attr->ch ?: SIPC_CH_ID_PDP_0;
+#endif
+ rb->dir = dir;
+ rb->len = link_attr->rb_len[dir];
+ rb->buff_size = link_attr->buff_size[dir];
+ rb->payload_offset = 0;
+
+ for (idx = 0; idx < rb->len; idx++) {
+ rb->buff_pos_array[idx] = calc_offset(rb->buff[idx], sl->shmem);
+ rb->buff_len_array[idx] = 0;
+ }
+
+ rb->iod = link_get_iod_with_channel(sl->ld, rb->ch);
+ rb->ld = sl->ld;
+ atomic_set(&rb->busy, 0);
+
+ /*
+ * Setup RB_DESC in the g_desc
+ */
+ rb->rb_desc->ch = rb->ch;
+ rb->rb_desc->direction = rb->dir;
+ rb->rb_desc->signaling = 1;
+ rb->rb_desc->sig_mask = MASK_INT_VALID | MASK_SEND_DATA;
+ rb->rb_desc->length = rb->len;
+ rb->rb_desc->id = rb->id;
+ rb->rb_desc->buff_size = rb->buff_size;
+ rb->rb_desc->payload_offset = rb->payload_offset;
+
+ /*
+ * Setup RB_CH in the g_desc
+ */
+ rb->rb_ch->rb_desc_offset = calc_offset(rb->rb_desc, sl->shmem);
+ rb->rb_ch->buff_pos_array_offset =
+ calc_offset(rb->buff_pos_array, sl->shmem);
+ }
+ }
+
+ print_sbd_config(sl);
+
+ return ret;
+}
+
+int create_sbd_mem_map(struct sbd_link_device *sl)
+{
+ int i, dir;
+ struct sbd_ring_buffer *rb;
+ struct sbd_ipc_device *ipc_dev;
+ struct sbd_link_attr *link_attr;
+
+ u8 *mem_global_desc;
+ u16 *mem_rb_rpwp;
+ u8 *mem_rb_cell_info;
+ u8 *mem_rb_buff;
+
+ u8 *desc_addr = sl->shmem + DESC_RGN_OFFSET;
+ u8 *buff_addr = sl->shmem + BUFF_RGN_OFFSET;
+
+ unsigned int idx;
+ unsigned int rb_len;
+ unsigned int rb_buff_size;
+
+ mem_global_desc = desc_addr;
+ desc_addr += sizeof(struct sbd_global_desc);
+
+ mem_rb_rpwp = (u16 *)desc_addr;
+ desc_addr += sizeof(u16) * ULDL * RDWR * sl->num_channels;
+
+ mem_rb_cell_info = desc_addr;
+ mem_rb_buff = buff_addr;
+
+ sl->g_desc = (struct sbd_global_desc *)mem_global_desc;
+
+ sl->rp[UL] = mem_rb_rpwp + (sl->num_channels * 0);
+ sl->wp[UL] = mem_rb_rpwp + (sl->num_channels * 1);
+ sl->rp[DL] = mem_rb_rpwp + (sl->num_channels * 2);
+ sl->wp[DL] = mem_rb_rpwp + (sl->num_channels * 3);
+
+ for (i = 0; i < sl->num_channels; i++) {
+ ipc_dev = sbd_id2dev(sl, i);
+ if (unlikely(!ipc_dev))
+ return -ENODEV;
+
+ link_attr = &sl->link_attr[i];
+
+ for (dir = 0; dir < ULDL; dir++) {
+ rb = &ipc_dev->rb[dir];
+
+ rb_len = link_attr->rb_len[dir];
+ rb_buff_size = link_attr->buff_size[dir];
+
+ rb->buff_pos_array = (u32 *)desc_addr;
+ desc_addr += rb_len * sizeof(u32);
+
+ rb->buff_len_array = (u32 *)desc_addr;
+ desc_addr += rb_len * sizeof(u32);
+
+ rb->buff_rgn = buff_addr;
+ buff_addr += (rb_len * rb_buff_size);
+
+ if (!rb->buff)
+ rb->buff = kmalloc((rb_len * sizeof(u8 *)), GFP_ATOMIC);
+ if (!rb->buff)
+ return -ENOMEM;
+
+ for (idx = 0; idx < rb_len; idx++)
+ rb->buff[idx] = rb->buff_rgn + (idx * rb_buff_size);
+
+ mif_info("RB[%d:%d][%s] buff_rgn {addr:0x%pK offset:%d size:%u}\n",
+ i, sbd_id2ch(sl, i), udl_str(dir), rb->buff_rgn,
+ calc_offset(rb->buff_rgn, sl->shmem), (rb_len * rb_buff_size));
+
+ rb->rp = &sl->rp[dir][i];
+ rb->wp = &sl->wp[dir][i];
+
+ rb->rb_ch = &sl->g_desc->rb_ch[i][dir];
+ rb->rb_desc = &sl->g_desc->rb_desc[i][dir];
+
+ spin_lock_init(&rb->lock);
+ skb_queue_head_init(&rb->skb_q);
+ }
+ }
+
+ if (desc_addr > (sl->shmem + DESC_RGN_OFFSET + DESC_RGN_SIZE))
+ mif_err("SBD Desc overflow offset: 0x%pK\n", desc_addr);
+
+ if (buff_addr > (sl->shmem + sl->shmem_size))
+ mif_err("SBD Buffer overflow offset: 0x%pK\n", buff_addr);
+
+ return 0;
+}
+
+int create_sbd_link_device(struct link_device *ld, struct sbd_link_device *sl,
+ u8 *shmem_base, unsigned int shmem_size)
+{
+ int ret;
+
+ if (!ld || !sl || !shmem_base)
+ return -EINVAL;
+
+ if (!ld->mdm_data)
+ return -EINVAL;
+
+ sl->ld = ld;
+
+ sl->version = 1;
+
+ sl->shmem = shmem_base;
+ sl->shmem_size = shmem_size;
+ sl->zmb_offset = shmem_size;
+
+ sl->num_channels = init_ctrl_tables(sl);
+
+ ret = create_sbd_mem_map(sl);
+ if (ret < 0) {
+ mif_err("Can't create SBD memory map\n");
+ return ret;
+ }
+
+ mif_info("SHMEM {base:0x%pK size:%d}\n",
+ sl->shmem, sl->shmem_size);
+
+ mif_info("G_DESC_OFFSET = %d(0x%pK)\n",
+ calc_offset(sl->g_desc, sl->shmem),
+ sl->g_desc);
+
+ mif_info("RB_CH_OFFSET = %d (0x%pK)\n",
+ calc_offset(sl->g_desc->rb_ch, sl->shmem),
+ sl->g_desc->rb_ch);
+
+ mif_info("RBD_PAIR_OFFSET = %d (0x%pK)\n",
+ calc_offset(sl->g_desc->rb_desc, sl->shmem),
+ sl->g_desc->rb_desc);
+
+ /* sysfs */
+ ret = sysfs_create_group(&sl->ld->dev->kobj, &sbd_group);
+ if (ret != 0) {
+ mif_err("sysfs_create_group() sbd_group error %d\n", ret);
+ return ret;
+ }
+
+ mif_info("Complete!!\n");
+
+ return 0;
+}
+
+static inline int check_rb_space(struct sbd_ring_buffer *rb, unsigned int qlen,
+ unsigned int in, unsigned int out)
+{
+ unsigned int space;
+
+ if (!circ_valid(qlen, in, out)) {
+ mif_err("ERR! TXQ[%d:%d] DIRTY (qlen:%d in:%d out:%d)\n",
+ rb->id, rb->ch, qlen, in, out);
+ return -EIO;
+ }
+
+ space = circ_get_space(qlen, in, out);
+ if (unlikely(space < 1)) {
+ mif_err_limited("TXQ[%d:%d] NOSPC (qlen:%d in:%d out:%d)\n",
+ rb->id, rb->ch, qlen, in, out);
+ return -ENOSPC;
+ }
+
+ return space;
+}
+
+int sbd_pio_tx(struct sbd_ring_buffer *rb, struct sk_buff *skb)
+{
+ int ret;
+ unsigned int qlen = rb->len;
+ unsigned int in = *rb->wp;
+ unsigned int out = *rb->rp;
+ unsigned int count = skb->len;
+ unsigned int space = (rb->buff_size - rb->payload_offset);
+ u8 *dst;
+
+ ret = check_rb_space(rb, qlen, in, out);
+ if (unlikely(ret < 0))
+ return ret;
+
+ if (unlikely(count > space)) {
+ mif_err("ERR! {id:%d ch:%d} count %d > space %d\n",
+ rb->id, rb->ch, count, space);
+ return -ENOSPC;
+ }
+
+ barrier();
+
+ dst = rb->buff[in] + rb->payload_offset;
+
+ barrier();
+
+ skb_copy_from_linear_data(skb, dst, count);
+
+ if (sipc_ps_ch(rb->ch)) {
+ struct io_device *iod = skbpriv(skb)->iod;
+ unsigned int ch = iod->ch;
+
+ rb->buff_len_array[in] = (skb->len & 0xFFFF);
+ rb->buff_len_array[in] |= (ch << 16);
+ } else {
+ rb->buff_len_array[in] = skb->len;
+ }
+
+ barrier();
+
+ *rb->wp = circ_new_ptr(qlen, in, 1);
+
+ /* Commit the item before incrementing the head */
+ smp_mb();
+
+ return count;
+}
+
+bool check_sbd_tx_pending(struct mem_link_device *mld)
+{
+ int i;
+ unsigned int wp, rp;
+ struct sbd_link_device *sl = &mld->sbd_link_dev;
+
+ for (i = 0; i < sl->num_channels; i++) {
+ wp = sl->wp[UL][i];
+ rp = sl->rp[UL][i];
+
+ if (wp != rp) {
+ mif_info("ch: %d, wp: %u, rp: %u", sbd_id2ch(sl, i), wp, rp);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static inline struct sk_buff *recv_data(struct sbd_ring_buffer *rb, u16 out)
+{
+ struct sk_buff *skb;
+ u8 *src;
+ unsigned int len = rb->buff_len_array[out] & 0xFFFF;
+ unsigned int space = (rb->buff_size - rb->payload_offset);
+
+ if (unlikely(len > space)) {
+ mif_err("ERR! {id:%d ch:%d} size %d > space %d\n",
+ rb->id, rb->ch, len, space);
+ return NULL;
+ }
+
+ skb = dev_alloc_skb(len);
+ if (unlikely(!skb)) {
+ mif_err("ERR! {id:%d ch:%d} alloc_skb(%d) fail\n",
+ rb->id, rb->ch, len);
+ return NULL;
+ }
+
+ src = rb->buff[out] + rb->payload_offset;
+ skb_put(skb, len);
+ skb_copy_to_linear_data(skb, src, len);
+
+ return skb;
+}
+
+static inline void set_skb_priv(struct sbd_ring_buffer *rb, struct sk_buff *skb)
+{
+ struct link_device *ld = rb->ld;
+ struct mem_link_device *mld = ld_to_mem_link_device(ld);
+ unsigned int out = *rb->rp;
+
+ /* Record the IO device, the link device, etc. into &skb->cb */
+ if (sipc_ps_ch(rb->ch)) {
+ unsigned int ch = (rb->buff_len_array[out] >> 16) & 0xff;
+
+ skbpriv(skb)->iod = link_get_iod_with_channel(rb->ld, ch);
+ skbpriv(skb)->ld = rb->ld;
+ skbpriv(skb)->sipc_ch = ch;
+ skbpriv(skb)->napi = &mld->mld_napi;
+ } else {
+ skbpriv(skb)->iod = rb->iod;
+ skbpriv(skb)->ld = rb->ld;
+ skbpriv(skb)->sipc_ch = rb->ch;
+ skbpriv(skb)->napi = NULL;
+ }
+}
+
+int sbd_pio_rx(struct sbd_ring_buffer *rb, struct sk_buff **skb)
+{
+ unsigned int qlen = rb->len;
+ unsigned int out = *rb->rp;
+
+ if (out >= qlen) {
+ mif_err("out value exceeds ring buffer size\n");
+ return -EFAULT;
+ }
+
+ *skb = recv_data(rb, out);
+ if (unlikely(!(*skb)))
+ return -ENOMEM;
+
+ set_lnk_hdr(rb, *skb);
+
+ set_skb_priv(rb, *skb);
+
+ check_more(rb, *skb);
+
+ *rb->rp = circ_new_ptr(qlen, out, 1);
+
+ return 0;
+}
diff --git a/link_device_memory_snapshot.c b/link_device_memory_snapshot.c
new file mode 100644
index 0000000..7340ace
--- /dev/null
+++ b/link_device_memory_snapshot.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2011 Samsung Electronics.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "modem_prj.h"
+#include "modem_utils.h"
+#include "link_device_memory.h"
+
+static struct kmem_cache *msb_kmem_cache;
+
+int msb_init(void)
+{
+ msb_kmem_cache = kmem_cache_create("msb_kmem_cache",
+ sizeof(struct mst_buff),
+ 0,
+ (SLAB_HWCACHE_ALIGN | SLAB_PANIC),
+ NULL);
+ if (!msb_kmem_cache)
+ return -ENOMEM;
+
+ return 0;
+}
+
+struct mst_buff *msb_alloc(void)
+{
+ return kmem_cache_zalloc(msb_kmem_cache, GFP_ATOMIC);
+}
+
+void msb_free(struct mst_buff *msb)
+{
+ kmem_cache_free(msb_kmem_cache, msb);
+}
+
+static inline void __msb_queue_head_init(struct mst_buff_head *list)
+{
+ list->prev = list->next = (struct mst_buff *)list;
+ list->qlen = 0;
+}
+
+void msb_queue_head_init(struct mst_buff_head *list)
+{
+ spin_lock_init(&list->lock);
+ __msb_queue_head_init(list);
+}
+
+static inline void __msb_insert(struct mst_buff *msb,
+ struct mst_buff *prev, struct mst_buff *next,
+ struct mst_buff_head *list)
+{
+ msb->next = next;
+ msb->prev = prev;
+ next->prev = prev->next = msb;
+ list->qlen++;
+}
+
+static inline void __msb_queue_before(struct mst_buff_head *list,
+ struct mst_buff *next,
+ struct mst_buff *msb)
+{
+ __msb_insert(msb, next->prev, next, list);
+}
+
+static inline void __msb_queue_after(struct mst_buff_head *list,
+ struct mst_buff *prev,
+ struct mst_buff *msb)
+{
+ __msb_insert(msb, prev, prev->next, list);
+}
+
+static inline void __msb_queue_tail(struct mst_buff_head *list,
+ struct mst_buff *msb)
+{
+ __msb_queue_before(list, (struct mst_buff *)list, msb);
+}
+
+static inline void __msb_queue_head(struct mst_buff_head *list,
+ struct mst_buff *msb)
+{
+ __msb_queue_after(list, (struct mst_buff *)list, msb);
+}
+
+void msb_queue_tail(struct mst_buff_head *list, struct mst_buff *msb)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&list->lock, flags);
+
+ __msb_queue_tail(list, msb);
+
+ spin_unlock_irqrestore(&list->lock, flags);
+}
+
+void msb_queue_head(struct mst_buff_head *list, struct mst_buff *msb)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&list->lock, flags);
+
+ __msb_queue_head(list, msb);
+
+ spin_unlock_irqrestore(&list->lock, flags);
+}
+
+static inline struct mst_buff *__msb_peek(const struct mst_buff_head *list_)
+{
+ struct mst_buff *list = ((const struct mst_buff *)list_)->next;
+
+ if (list == (struct mst_buff *)list_)
+ list = NULL;
+ return list;
+}
+
+static inline void __msb_unlink(struct mst_buff *msb,
+ struct mst_buff_head *list)
+{
+ struct mst_buff *next, *prev;
+
+ list->qlen--;
+
+ next = msb->next;
+ prev = msb->prev;
+
+ msb->next = msb->prev = NULL;
+
+ next->prev = prev;
+ prev->next = next;
+}
+
+static inline struct mst_buff *__msb_dequeue(struct mst_buff_head *list)
+{
+ struct mst_buff *msb = __msb_peek(list);
+
+ if (msb)
+ __msb_unlink(msb, list);
+
+ return msb;
+}
+
+struct mst_buff *msb_dequeue(struct mst_buff_head *list)
+{
+ unsigned long flags;
+ struct mst_buff *result;
+
+ spin_lock_irqsave(&list->lock, flags);
+
+ result = __msb_dequeue(list);
+
+ spin_unlock_irqrestore(&list->lock, flags);
+
+ return result;
+}
+
+void msb_queue_purge(struct mst_buff_head *list)
+{
+ struct mst_buff *msb;
+
+ while ((msb = msb_dequeue(list)) != NULL)
+ msb_free(msb);
+}
+
+static void __take_sbd_status(struct mem_link_device *mld, enum direction dir,
+ struct mem_snapshot *mst)
+{
+ mst->dir = dir;
+
+ mst->magic = ioread32(mld->legacy_link_dev.magic);
+ mst->access = ioread32(mld->legacy_link_dev.mem_access);
+
+ if (mld->recv_cp2ap_irq)
+ mst->int2ap = mld->recv_cp2ap_irq(mld);
+ else
+ mst->int2ap = 0;
+
+ if (mld->read_ap2cp_irq)
+ mst->int2cp = mld->read_ap2cp_irq(mld);
+ else
+ mst->int2cp = 0;
+}
+
+static void __take_mem_status(struct mem_link_device *mld, enum direction dir,
+ struct mem_snapshot *mst)
+{
+ int i;
+
+ mst->dir = dir;
+
+ mst->magic = ioread32(mld->legacy_link_dev.magic);
+ mst->access = ioread32(mld->legacy_link_dev.mem_access);
+
+ for (i = 0; i < IPC_MAP_MAX; i++) {
+ struct legacy_ipc_device *dev = mld->legacy_link_dev.dev[i];
+
+ mst->head[i][TX] = get_txq_head(dev);
+ mst->tail[i][TX] = get_txq_tail(dev);
+ mst->head[i][RX] = get_rxq_head(dev);
+ mst->tail[i][RX] = get_rxq_tail(dev);
+ }
+
+ if (mld->recv_cp2ap_irq)
+ mst->int2ap = mld->recv_cp2ap_irq(mld);
+ else
+ mst->int2ap = 0;
+
+ if (mld->read_ap2cp_irq)
+ mst->int2cp = mld->read_ap2cp_irq(mld);
+ else
+ mst->int2cp = 0;
+}
+
+struct mst_buff *mem_take_snapshot(struct mem_link_device *mld,
+ enum direction dir)
+{
+ struct mst_buff *msb = msb_alloc();
+
+ if (!msb)
+ return NULL;
+
+ if (sbd_active(&mld->sbd_link_dev))
+ __take_sbd_status(mld, dir, &msb->snapshot);
+ else
+ __take_mem_status(mld, dir, &msb->snapshot);
+
+ return msb;
+}
diff --git a/link_device_pcie_iommu.c b/link_device_pcie_iommu.c
new file mode 100644
index 0000000..a78e60d
--- /dev/null
+++ b/link_device_pcie_iommu.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Samsung Electronics.
+ *
+ */
+
+#include "link_device_pcie_iommu.h"
+
+#define PCIE_CH2HSI(ch) ((ch) + 1)
+
+extern void pcie_iommu_tlb_invalidate_all(int hsi_block_num);
+extern int pcie_iommu_map(unsigned long iova, phys_addr_t paddr, size_t size,
+ int prot, int hsi_block_num);
+extern size_t pcie_iommu_unmap(unsigned long iova, size_t size, int hsi_block_num);
+
+void cpif_pcie_iommu_enable_regions(struct mem_link_device *mld)
+{
+ static bool enabled_region;
+
+ struct pktproc_adaptor *ppa = &mld->pktproc;
+ struct link_device *ld = &mld->link_dev;
+ struct modem_ctl *mc = ld->mc;
+
+ u32 cp_num = ld->mdm_data->cp_num;
+ u32 shmem_idx;
+ u32 size;
+ int ret;
+
+ if (enabled_region)
+ return;
+
+ for (shmem_idx = 0 ; shmem_idx < MAX_CP_SHMEM ; shmem_idx++) {
+ if (shmem_idx == SHMEM_MSI && !(mld->attrs & LINK_ATTR_XMIT_BTDLR_PCIE))
+ continue;
+
+ if (shmem_idx == SHMEM_PKTPROC)
+ size = ppa->buff_rgn_offset;
+ else
+ size = cp_shmem_get_size(cp_num, shmem_idx);
+
+ if (cp_shmem_get_base(cp_num, shmem_idx)) {
+ ret = pcie_iommu_map(cp_shmem_get_base(cp_num, shmem_idx),
+ cp_shmem_get_base(cp_num, shmem_idx),
+ size, 0, PCIE_CH2HSI(mc->pcie_ch_num));
+ mif_info("pcie iommu idx:%d addr:0x%08lx size:0x%08x ret:%d\n",
+ shmem_idx, cp_shmem_get_base(cp_num, shmem_idx), size, ret);
+ }
+ }
+
+ enabled_region = true;
+}
+
+int cpif_pcie_iommu_init(struct pktproc_queue *q)
+{
+ struct cpif_pcie_iommu_ctrl *ioc = &q->ioc;
+ size_t size = sizeof(void *) * q->num_desc;
+
+ mif_info("iommu init num_desc:%u\n", q->num_desc);
+
+ if (ioc->pf_buf) {
+ memset(ioc->pf_buf, 0, size);
+ } else {
+ ioc->pf_buf = kvzalloc(size, GFP_KERNEL);
+ if (!ioc->pf_buf)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void cpif_pcie_iommu_reset(struct pktproc_queue *q)
+{
+ struct modem_ctl *mc = dev_get_drvdata(q->ppa->dev);
+ struct pktproc_desc_sktbuf *desc = q->desc_sktbuf;
+ struct cpif_pcie_iommu_ctrl *ioc = &q->ioc;
+ unsigned int usage, idx;
+ bool do_unmap = true;
+
+ if (!ioc->pf_buf)
+ return;
+
+ usage = circ_get_usage(q->num_desc, ioc->curr_fore, q->done_ptr);
+ idx = q->done_ptr;
+
+ mif_info("iommu reset done:%u curr_fore:%u usage:%u, fore:%u\n",
+ q->done_ptr, ioc->curr_fore, usage, *q->fore_ptr);
+
+ while (usage--) {
+ if (do_unmap) {
+ unsigned long src_pa;
+
+ src_pa = desc[idx].cp_data_paddr - q->cp_buff_pbase +
+ q->q_buff_pbase - q->ppa->skb_padding_size;
+ cpif_pcie_iommu_try_ummap_va(q, src_pa, ioc->pf_buf[idx], idx);
+
+ /* Just free the frags if not mapped yet */
+ if (idx == *q->fore_ptr)
+ do_unmap = false;
+ }
+
+ page_frag_free(ioc->pf_buf[idx]);
+ idx = circ_new_ptr(q->num_desc, idx, 1);
+ }
+
+ /* Initialize */
+ pcie_iommu_tlb_invalidate_all(PCIE_CH2HSI(mc->pcie_ch_num));
+ if (ioc->pf_cache.va) {
+ __page_frag_cache_drain(virt_to_page(ioc->pf_cache.va),
+ ioc->pf_cache.pagecnt_bias);
+ }
+ memset(&q->ioc, 0, offsetof(struct cpif_pcie_iommu_ctrl, pf_buf));
+}
+
+void *cpif_pcie_iommu_map_va(struct pktproc_queue *q, unsigned long src_pa,
+ u32 idx, u32 *map_cnt)
+{
+ struct modem_ctl *mc = dev_get_drvdata(q->ppa->dev);
+ struct cpif_pcie_iommu_ctrl *ioc = &q->ioc;
+ const size_t pf_size = q->ppa->true_packet_size;
+ void *addr_des, *addr_asc;
+
+ /*
+ * Every page orders are compatible with IOMMU granularity.
+ * But the poped addrs are in descending order.
+ */
+ addr_des = page_frag_alloc(&ioc->pf_cache, pf_size, GFP_ATOMIC);
+ if (!addr_des) {
+ mif_err_limited("failed to alloc page frag\n");
+ return NULL;
+ }
+
+ /* Map the last page */
+ *map_cnt = 0;
+ if (ioc->map_page_va != ioc->pf_cache.va) {
+ unsigned long map_size, tailroom;
+ int ret;
+
+ if (!ioc->map_src_pa)
+ goto set_map;
+
+ map_size = page_size(virt_to_page(ioc->map_page_va));
+ tailroom = q->q_buff_pbase + q->q_buff_size - ioc->map_src_pa;
+ if (map_size > tailroom)
+ map_size = tailroom;
+
+#ifdef LINK_DEVICE_PCIE_IOMMU_DEBUG
+ mif_debug("map idx:%u src_pa:0x%lX va:0x%p size:0x%lX\n",
+ ioc->map_idx, ioc->map_src_pa, ioc->map_page_va, map_size);
+#endif
+
+ ret = pcie_iommu_map(ioc->map_src_pa, virt_to_phys(ioc->map_page_va),
+ map_size, 0, PCIE_CH2HSI(mc->pcie_ch_num));
+ if (ret) {
+ mif_err("map failure idx:%u src_pa:0x%lX va:0x%p size:0x%lX\n",
+ ioc->map_idx, ioc->map_src_pa, ioc->map_page_va, map_size);
+ return NULL;
+ }
+ ioc->mapped_cnt++;
+ ioc->mapped_size += map_size;
+
+ /* Store the last mapping size */
+ if (!idx)
+ ioc->end_map_size = (u32)map_size;
+
+ *map_cnt = circ_get_usage(q->num_desc, idx, ioc->map_idx);
+
+set_map:
+ ioc->map_src_pa = src_pa;
+ ioc->map_page_va = ioc->pf_cache.va;
+ ioc->map_idx = idx;
+ ioc->pf_offset = 0;
+ }
+
+ /* Convert an address in accending order */
+ addr_asc = ioc->pf_cache.va + ioc->pf_offset;
+
+ ioc->pf_buf[idx] = addr_asc;
+ ioc->pf_offset += pf_size;
+ ioc->curr_fore = idx;
+
+ /*
+ * The first buffer should be mapped with a new page.
+ * Drain the page frag cache at the last buffer.
+ */
+ if (idx == (q->num_desc - 1)) {
+ __page_frag_cache_drain(virt_to_page(ioc->pf_cache.va),
+ ioc->pf_cache.pagecnt_bias);
+ ioc->pf_cache.va = NULL;
+ }
+
+ return addr_asc;
+}
+
+void cpif_pcie_iommu_try_ummap_va(struct pktproc_queue *q, unsigned long src_pa,
+ void *addr, u32 idx)
+{
+ struct modem_ctl *mc = dev_get_drvdata(q->ppa->dev);
+ struct cpif_pcie_iommu_ctrl *ioc = &q->ioc;
+ u32 unmap_size;
+ size_t ret;
+
+ if (!ioc->unmap_src_pa)
+ goto set_unmap;
+
+ unmap_size = !idx ? ioc->end_map_size : ioc->unmap_page_size;
+
+ if (src_pa >= ioc->unmap_src_pa && src_pa < ioc->unmap_src_pa + unmap_size)
+ return;
+
+#ifdef LINK_DEVICE_PCIE_IOMMU_DEBUG
+ mif_debug("unmap src_pa:0x%lX size:0x%X\n", ioc->unmap_src_pa, unmap_size);
+#endif
+
+ ret = pcie_iommu_unmap(ioc->unmap_src_pa, unmap_size, PCIE_CH2HSI(mc->pcie_ch_num));
+ if (ret != unmap_size) {
+ mif_err("invalid unmap size:0x%zX expected:0x%X src_pa:0x%lX\n",
+ ret, unmap_size, ioc->unmap_src_pa);
+ }
+ ioc->mapped_cnt--;
+ ioc->mapped_size -= unmap_size;
+
+set_unmap:
+ ioc->unmap_src_pa = src_pa;
+ ioc->unmap_page_size = page_size(virt_to_head_page(addr));
+}
diff --git a/link_device_pcie_iommu.h b/link_device_pcie_iommu.h
new file mode 100644
index 0000000..1211b8b
--- /dev/null
+++ b/link_device_pcie_iommu.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Samsung Electronics.
+ *
+ */
+
+#ifndef __LINK_DEVICE_PCIE_IOMMU_H__
+#define __LINK_DEVICE_PCIE_IOMMU_H__
+
+#include "link_device_memory.h"
+
+void cpif_pcie_iommu_enable_regions(struct mem_link_device *mld);
+int cpif_pcie_iommu_init(struct pktproc_queue *q);
+void cpif_pcie_iommu_reset(struct pktproc_queue *q);
+
+void *cpif_pcie_iommu_map_va(struct pktproc_queue *q, unsigned long src_pa,
+ u32 idx, u32 *map_cnt);
+void cpif_pcie_iommu_try_ummap_va(struct pktproc_queue *q, unsigned long src_pa,
+ void *addr, u32 idx);
+
+extern bool exynos_pcie_is_sysmmu_enabled(int ch_num);
+#endif /* __LINK_DEVICE_PCIE_IOMMU_H__ */
diff --git a/link_rx_pktproc.c b/link_rx_pktproc.c
new file mode 100644
index 0000000..a064f5a
--- /dev/null
+++ b/link_rx_pktproc.c
@@ -0,0 +1,2239 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2019, Samsung Electronics.
+ *
+ */
+
+#include <asm/cacheflush.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/ip6_checksum.h>
+#include <net/udp.h>
+#include <net/tcp.h>
+#include <linux/shm_ipc.h>
+#include "modem_prj.h"
+#include "modem_utils.h"
+#include "link_device_memory.h"
+#if IS_ENABLED(CONFIG_EXYNOS_DIT)
+#include "dit.h"
+#endif
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+#include "link_device_pcie_iommu.h"
+#endif
+
+static struct pktproc_perftest_data perftest_data[PERFTEST_MODE_MAX] = {
+ {
+ /* empty */
+ },
+ {
+ /* PERFTEST_MODE_IPV4
+ * port: 5000 -> 5001
+ * payload: 1464 (0x5b8)
+ */
+ .header = {
+ 0x45, 0x00, 0x05, 0xB8, 0x00, 0x00, 0x40, 0x00,
+ 0x80, 0x11, 0x71, 0xDF, 0xC0, 0xA8, 0x01, 0x03,
+ 0xC0, 0xA8, 0x01, 0x02, 0x13, 0x88, 0x13, 0x89,
+ 0x05, 0xA4, 0x00, 0x00
+ },
+ .header_len = 28,
+ .dst_port_offset = 22,
+ .packet_len = 1464
+ },
+ {
+ /* PERFTEST_MODE_CLAT
+ * port: 5000 -> 5001
+ * payload: 1444 (0x5a4)
+ */
+ .header = {
+ 0x60, 0x0a, 0xf8, 0x0c, 0x05, 0xa4, 0x11, 0x40,
+ 0x00, 0x64, 0xff, 0x9b, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x02,
+ 0x20, 0x01, 0x02, 0xd8, 0xe1, 0x43, 0x7b, 0xfb,
+ 0x1d, 0xda, 0x90, 0x9d, 0x8b, 0x8d, 0x05, 0xe7,
+ 0x13, 0x88, 0x13, 0x89, 0x05, 0xa4, 0x00, 0x00,
+ },
+ .header_len = 48,
+ .dst_port_offset = 42,
+ .packet_len = 1484
+ },
+ {
+ /* PERFTEST_MODE_IPV6
+ * port: 5000 -> 5001
+ * payload: 1444 (0x5a4)
+ */
+ .header = {
+ 0x60, 0x0a, 0xf8, 0x0c, 0x05, 0x90, 0x11, 0x40,
+ 0x20, 0x01, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x20, 0x01, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02,
+ 0x7d, 0xae, 0x3d, 0x4e, 0xac, 0xf2, 0x8a, 0x2b,
+ 0x13, 0x88, 0x13, 0x89, 0x05, 0x90, 0x00, 0x00,
+ },
+ .header_len = 48,
+ .dst_port_offset = 42,
+ .packet_len = 1464
+ },
+};
+
+static bool pktproc_check_hw_checksum(u8 status)
+{
+ if (unlikely(status & PKTPROC_STATUS_IGNR))
+ return false;
+ if (unlikely(!(status & PKTPROC_STATUS_IPCS) || !(status & PKTPROC_STATUS_TCPC)))
+ return false;
+ if (unlikely((status & PKTPROC_STATUS_IPCSF) || (status & PKTPROC_STATUS_TCPCF)))
+ return false;
+
+ return true;
+}
+
+static void pktproc_set_pktgen_checksum(struct pktproc_queue *q, u8 *data)
+{
+ unsigned int off;
+ struct udphdr *uh;
+
+ switch (data[0] & 0xF0) {
+ case 0x40:
+ off = sizeof(struct iphdr);
+ break;
+ case 0x60:
+ off = sizeof(struct ipv6hdr);
+ break;
+ default:
+ return;
+ }
+
+ uh = (struct udphdr *)(data + off);
+ uh->check = htons(0x1234);
+}
+
+static ssize_t pktgen_gro_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct modem_ctl *mc = dev_get_drvdata(dev);
+ struct link_device *ld = get_current_link(mc->iod);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ struct pktproc_adaptor *ppa = &mld->pktproc;
+ unsigned int gro;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &gro);
+ if (ret)
+ return -EINVAL;
+
+ ppa->pktgen_gro = (gro > 0 ? true : false);
+
+ return count;
+}
+
+static ssize_t pktgen_gro_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct modem_ctl *mc = dev_get_drvdata(dev);
+ struct link_device *ld = get_current_link(mc->iod);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ struct pktproc_adaptor *ppa = &mld->pktproc;
+ ssize_t count = 0;
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "pktgen gro:%d\n", ppa->pktgen_gro);
+
+ return count;
+}
+
+/*
+ * Get a packet: ringbuf mode
+ */
+static int pktproc_get_pkt_from_ringbuf_mode(struct pktproc_queue *q, struct sk_buff **new_skb)
+{
+ int ret = 0;
+ u16 len;
+ u16 ch_id;
+ u8 *src;
+ struct sk_buff *skb = NULL;
+ struct link_device *ld = &q->mld->link_dev;
+ struct pktproc_desc_ringbuf *desc = q->desc_ringbuf;
+
+ if (!pktproc_check_active(q->ppa, q->q_idx)) {
+ mif_err_limited("Queue %d not activated\n", q->q_idx);
+ return -EACCES;
+ }
+ if (q->ppa->desc_mode != DESC_MODE_RINGBUF) {
+ mif_err_limited("Invalid desc_mode %d\n", q->ppa->desc_mode);
+ return -EINVAL;
+ }
+
+ /* Get data */
+ len = desc[*q->rear_ptr].length;
+ if (len > q->ppa->max_packet_size) {
+ mif_err_limited("Length is invalid:%d\n", len);
+ q->stat.err_len++;
+ ret = -EPERM;
+ goto rx_error_on_desc;
+ }
+ ch_id = desc[*q->rear_ptr].channel_id;
+ if (ch_id == SIPC5_CH_ID_MAX) {
+ mif_err_limited("Channel ID is invalid:%d\n", ch_id);
+ q->stat.err_chid++;
+ ret = -EPERM;
+ goto rx_error_on_desc;
+ }
+ src = desc[*q->rear_ptr].cp_data_paddr - q->cp_buff_pbase + q->q_buff_vbase;
+ if ((src < q->q_buff_vbase) || (src > q->q_buff_vbase + q->q_buff_size)) {
+ mif_err_limited("Data address is invalid:%pK q_buff_vbase:%pK size:0x%08x\n",
+ src, q->q_buff_vbase, q->q_buff_size);
+ q->stat.err_addr++;
+ ret = -EINVAL;
+ goto rx_error_on_desc;
+ }
+
+#if !IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOCC)
+ if (q->ppa->buff_rgn_cached && !q->ppa->use_hw_iocc)
+ dma_sync_single_for_cpu(q->ppa->dev, virt_to_phys(src), len, DMA_FROM_DEVICE);
+#endif
+
+ pp_debug("len:%d ch_id:%d src:%pK\n", len, ch_id, src);
+
+ /* Build skb */
+ skb = napi_alloc_skb(q->napi_ptr, len);
+ if (unlikely(!skb)) {
+ mif_err_limited("alloc_skb() error\n");
+ q->stat.err_nomem++;
+ ret = -ENOMEM;
+ goto rx_error;
+ }
+ skb_put(skb, len);
+ skb_copy_to_linear_data(skb, src, len);
+#ifdef PKTPROC_DEBUG_PKT
+ pr_buffer("pktproc", (char *)skb->data, (size_t)len, (size_t)40);
+#endif
+
+ /* Set priv */
+ skbpriv(skb)->lnk_hdr = 0;
+ skbpriv(skb)->sipc_ch = ch_id;
+ skbpriv(skb)->iod = link_get_iod_with_channel(ld, skbpriv(skb)->sipc_ch);
+ skbpriv(skb)->ld = ld;
+ skbpriv(skb)->napi = q->napi_ptr;
+
+ switch (q->ppa->version) {
+ case PKTPROC_V2:
+ if (pktproc_check_hw_checksum(desc[*q->rear_ptr].status))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ q->stat.err_csum++;
+ break;
+ default:
+ break;
+ }
+
+ if (unlikely(q->ppa->pktgen_gro)) {
+ pktproc_set_pktgen_checksum(q, skb->data);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+
+ *new_skb = skb;
+ q->stat.pass_cnt++;
+ *q->rear_ptr = circ_new_ptr(q->num_desc, *q->rear_ptr, 1);
+
+ return 0;
+
+rx_error_on_desc:
+ mif_err_limited("Skip invalid descriptor at %d\n", *q->rear_ptr);
+ *q->rear_ptr = circ_new_ptr(q->num_desc, *q->rear_ptr, 1);
+
+rx_error:
+ if (skb)
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+/*
+ * Get a packet : sktbuf mode on 32bit region
+ */
+static int pktproc_clear_data_addr(struct pktproc_queue *q)
+{
+ struct pktproc_desc_sktbuf *desc = q->desc_sktbuf;
+ struct pktproc_adaptor *ppa = q->ppa;
+
+ if (ppa->desc_mode != DESC_MODE_SKTBUF) {
+ mif_err_limited("Invalid desc_mode %d\n", ppa->desc_mode);
+ return -EINVAL;
+ }
+
+ if (!ppa->use_netrx_mng) {
+ mif_err_limited("Buffer manager is not set\n");
+ return -EPERM;
+ }
+
+ mif_info("Unmap buffer from %d to %d\n", q->done_ptr, *q->fore_ptr);
+ while (*q->fore_ptr != q->done_ptr) {
+#if !IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOCC)
+ if (ppa->buff_rgn_cached && !ppa->use_hw_iocc && q->dma_addr[q->done_ptr]) {
+ dma_unmap_single_attrs(ppa->dev, q->dma_addr[q->done_ptr],
+ ppa->max_packet_size, DMA_FROM_DEVICE, 0);
+ q->dma_addr[q->done_ptr] = 0;
+ }
+#endif
+ desc[q->done_ptr].cp_data_paddr = 0;
+ q->done_ptr = circ_new_ptr(q->num_desc, q->done_ptr, 1);
+ }
+
+ cpif_init_netrx_mng(q->manager);
+
+ memset(desc, 0, q->desc_size);
+
+ return 0;
+}
+
+static int pktproc_clear_data_addr_without_bm(struct pktproc_queue *q)
+{
+ struct pktproc_desc_sktbuf *desc = q->desc_sktbuf;
+
+#if IS_ENABLED(CONFIG_EXYNOS_CPIF_NETRX_MGR)
+ if (q->ppa->use_netrx_mng) {
+ mif_err_limited("Buffer manager is set\n");
+ return -EPERM;
+ }
+#endif
+
+#if !IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOCC)
+ mif_info("Unmap all buffers\n");
+ if (q->ppa->buff_rgn_cached && !q->ppa->use_hw_iocc) {
+ int i;
+ for (i = 0; i < q->num_desc; i++) {
+ if(q->dma_addr[i]) {
+ dma_unmap_single_attrs(q->ppa->dev, q->dma_addr[i],
+ q->ppa->max_packet_size, DMA_FROM_DEVICE, 0);
+ q->dma_addr[i] = 0;
+ }
+ }
+ }
+#endif
+ memset(desc, 0, q->desc_size);
+
+ return 0;
+}
+
+static int pktproc_fill_data_addr(struct pktproc_queue *q)
+{
+ struct pktproc_desc_sktbuf *desc = q->desc_sktbuf;
+ struct pktproc_adaptor *ppa = q->ppa;
+ u32 space;
+ u32 fore;
+ int i;
+ unsigned long flags;
+
+ if (ppa->desc_mode != DESC_MODE_SKTBUF) {
+ mif_err_limited("Invalid desc_mode %d\n", ppa->desc_mode);
+ return -EINVAL;
+ }
+
+ if (!ppa->use_netrx_mng) {
+ mif_err_limited("Buffer manager is not set\n");
+ return -EPERM;
+ }
+
+ spin_lock_irqsave(&q->lock, flags);
+
+ space = circ_get_space(q->num_desc, *q->fore_ptr, q->done_ptr);
+ pp_debug("Q%d:%d/%d/%d Space:%d\n",
+ q->q_idx, *q->fore_ptr, *q->rear_ptr, q->done_ptr, space);
+
+ fore = *q->fore_ptr;
+ for (i = 0; i < space; i++) {
+ struct cpif_addr_pair *addrpair = cpif_map_rx_buf(q->manager);
+ if (unlikely(!addrpair)) {
+ mif_err_limited("skb alloc error due to no memory\n");
+ q->stat.err_bm_nomem++;
+ spin_unlock_irqrestore(&q->lock, flags);
+ return -ENOMEM;
+ }
+
+#if !IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOCC)
+ if (ppa->buff_rgn_cached && !ppa->use_hw_iocc) {
+ q->dma_addr[fore] = dma_map_single_attrs(ppa->dev,
+ (u8 *)addrpair->ap_addr + ppa->skb_padding_size,
+ ppa->max_packet_size, DMA_FROM_DEVICE, 0);
+ if (dma_mapping_error(ppa->dev, q->dma_addr[fore])) {
+ mif_err_limited("dma_map_single_attrs() failed\n");
+ q->dma_addr[fore] = 0;
+ spin_unlock_irqrestore(&q->lock, flags);
+ return -ENOMEM;
+ }
+ }
+#endif
+
+ desc[fore].cp_data_paddr = addrpair->cp_addr + ppa->skb_padding_size;
+
+ if (fore == 0)
+ desc[fore].control |= (1 << 7); /* HEAD */
+
+ if (fore == (q->num_desc - 1))
+ desc[fore].control |= (1 << 3); /* RINGEND */
+
+ if (unlikely(desc[fore].reserved0 != 0)) { /* W/A to detect mem poison */
+ mif_err("mem poison:0x%llX r0:%d c:%d s:%d l%d cl%d r1:%d\n",
+ desc[fore].cp_data_paddr, desc[fore].reserved0,
+ desc[fore].control, desc[fore].status,
+ desc[fore].lro, desc[fore].clat, desc[fore].reserved1);
+ panic("memory poison\n");
+ }
+ *q->fore_ptr = circ_new_ptr(q->num_desc, *q->fore_ptr, 1);
+ fore = circ_new_ptr(q->num_desc, fore, 1);
+ }
+
+ pp_debug("Q:%d fore/rear/done:%d/%d/%d\n",
+ q->q_idx, *q->fore_ptr, *q->rear_ptr, q->done_ptr);
+
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return 0;
+}
+
+static int pktproc_fill_data_addr_without_bm(struct pktproc_queue *q)
+{
+ struct pktproc_desc_sktbuf *desc = q->desc_sktbuf;
+ unsigned long dst_paddr;
+ u32 fore;
+ int i;
+ unsigned long flags;
+ u32 space;
+ u32 fore_inc = 1;
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+ struct modem_ctl *mc = dev_get_drvdata(q->ppa->dev);
+#endif
+
+#if IS_ENABLED(CONFIG_EXYNOS_CPIF_NETRX_MGR)
+ if (q->ppa->use_netrx_mng) {
+ mif_err_limited("Buffer manager is set\n");
+ return -EPERM;
+ }
+#endif
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+ if (exynos_pcie_is_sysmmu_enabled(mc->pcie_ch_num))
+ fore = q->ioc.curr_fore;
+ else
+ fore = *q->fore_ptr;
+#else
+ fore = *q->fore_ptr;
+#endif
+
+ /* The fore pointer is passed by CP from shared memory. Check the
+ * range to avoid OOB access */
+ if ((fore < 0) || (fore >= q->num_desc)) {
+ mif_err("Invalid fore_ptr (%d) passed by CP on queue(%d)!\n",
+ fore, q->q_idx);
+ return -EINVAL;
+ }
+
+ pp_debug("Q%d:%d/%d/%d\n",
+ q->q_idx, fore, *q->rear_ptr, q->done_ptr);
+
+ spin_lock_irqsave(&q->lock, flags);
+
+ if (q->ppa->buff_rgn_cached) {
+ space = circ_get_space(q->num_desc, fore, q->done_ptr);
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+ if (exynos_pcie_is_sysmmu_enabled(mc->pcie_ch_num)) {
+ if (space > q->ppa->space_margin)
+ space -= q->ppa->space_margin;
+ else
+ space = 0;
+ }
+#endif
+ } else {
+ space = q->num_desc;
+ }
+
+ for (i = 0; i < space; i++) {
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU) || !IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOCC)
+ u8 *dst_vaddr = NULL;
+#endif
+ dst_paddr = q->q_buff_pbase + (q->ppa->true_packet_size * fore);
+ if (dst_paddr > (q->q_buff_pbase + q->q_buff_size))
+ mif_err_limited("dst_paddr:0x%lx is over 0x%lx\n",
+ dst_paddr, q->q_buff_pbase + q->q_buff_size);
+
+ pp_debug("Q:%d fore_ptr:%d dst_paddr:0x%lx\n",
+ q->q_idx, fore, dst_paddr);
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+ if (exynos_pcie_is_sysmmu_enabled(mc->pcie_ch_num)) {
+ dst_vaddr = cpif_pcie_iommu_map_va(q, dst_paddr, fore, &fore_inc);
+ if (!dst_vaddr) {
+ mif_err_limited("cpif_pcie_iommu_get_va() failed\n");
+ spin_unlock_irqrestore(&q->lock, flags);
+ return -ENOMEM;
+ }
+ }
+#endif
+
+#if !IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOCC)
+ if (exynos_pcie_is_sysmmu_enabled(mc->pcie_ch_num)) {
+ if (q->ppa->buff_rgn_cached && !q->ppa->use_hw_iocc) {
+ if (dst_vaddr)
+ goto dma_map;
+
+ dst_vaddr = q->q_buff_vbase + (q->ppa->true_packet_size * fore);
+ if (dst_vaddr > (q->q_buff_vbase + q->q_buff_size))
+ mif_err_limited("dst_vaddr:%pK is over %pK\n",
+ dst_vaddr, q->q_buff_vbase + q->q_buff_size);
+
+dma_map:
+ q->dma_addr[fore] =
+ dma_map_single_attrs(q->ppa->dev,
+ dst_vaddr + q->ppa->skb_padding_size,
+ q->ppa->max_packet_size, DMA_FROM_DEVICE, 0);
+ if (dma_mapping_error(q->ppa->dev, q->dma_addr[fore])) {
+ mif_err_limited("dma_map_single_attrs() failed\n");
+ q->dma_addr[fore] = 0;
+ spin_unlock_irqrestore(&q->lock, flags);
+ return -ENOMEM;
+ }
+ }
+ }
+#endif
+
+ desc[fore].cp_data_paddr = (dst_paddr - q->q_buff_pbase) +
+ q->cp_buff_pbase +
+ q->ppa->skb_padding_size;
+
+ if (fore == 0)
+ desc[fore].control |= (1 << 7); /* HEAD */
+
+ if (fore == (q->num_desc - 1)) {
+ desc[fore].control |= (1 << 3); /* RINGEND */
+ if (!q->ppa->buff_rgn_cached)
+ continue;
+ }
+
+ if (fore_inc)
+ *q->fore_ptr = circ_new_ptr(q->num_desc, *q->fore_ptr, fore_inc);
+ fore = circ_new_ptr(q->num_desc, fore, 1);
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+ if (exynos_pcie_is_sysmmu_enabled(mc->pcie_ch_num))
+ q->ioc.curr_fore = fore;
+#endif
+ }
+
+ pp_debug("Q:%d fore/rear/done:%d/%d/%d\n",
+ q->q_idx, *q->fore_ptr, *q->rear_ptr, q->done_ptr);
+
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return 0;
+}
+
+static int pktproc_update_fore_ptr(struct pktproc_queue *q, u32 count)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ if (!count)
+ return 0;
+
+ if (q->ppa->buff_rgn_cached) {
+ ret = q->alloc_rx_buf(q);
+ if (ret)
+ mif_err_limited("alloc_rx_buf() error %d Q%d\n", ret, q->q_idx);
+ } else {
+ spin_lock_irqsave(&q->lock, flags);
+ *q->fore_ptr = circ_new_ptr(q->num_desc, *q->fore_ptr, count);
+ spin_unlock_irqrestore(&q->lock, flags);
+ }
+
+ return ret;
+}
+
+static bool is_desc_valid(struct pktproc_queue *q, struct pktproc_desc_sktbuf *desc)
+{
+ if (desc->length > q->ppa->max_packet_size) {
+ mif_err_limited("Length is invalid:%d\n", desc->length);
+ q->stat.err_len++;
+ return false;
+ }
+
+ if (desc->channel_id == SIPC5_CH_ID_MAX) {
+ mif_err_limited("Channel ID is invalid:%d\n", desc->channel_id);
+ q->stat.err_chid++;
+ return false;
+ }
+
+ return true;
+}
+
+static u8 *get_packet_vaddr(struct pktproc_queue *q, struct pktproc_desc_sktbuf *desc)
+{
+ u8 *ret;
+ struct pktproc_adaptor *ppa = q->ppa;
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+ struct modem_ctl *mc = dev_get_drvdata(ppa->dev);
+#endif
+
+#if IS_ENABLED(CONFIG_EXYNOS_CPIF_NETRX_MGR)
+ if (q->manager) {
+ ret = (u8 *)cpif_unmap_rx_buf(q->manager,
+ desc->cp_data_paddr -
+ ppa->skb_padding_size, false);
+ if (!ret) {
+ mif_err_limited("invalid data address. null given\n");
+ q->stat.err_addr++;
+ return NULL;
+ }
+ } else
+#endif
+ {
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+ if (exynos_pcie_is_sysmmu_enabled(mc->pcie_ch_num)) {
+ unsigned long src_paddr = desc->cp_data_paddr - q->cp_buff_pbase +
+ q->q_buff_pbase - ppa->skb_padding_size;
+
+ ret = (u8 *)q->ioc.pf_buf[q->done_ptr];
+ cpif_pcie_iommu_try_ummap_va(q, src_paddr, ret, q->done_ptr);
+ } else {
+ ret = desc->cp_data_paddr - q->cp_buff_pbase +
+ q->q_buff_vbase - ppa->skb_padding_size;
+
+ if ((ret < q->q_buff_vbase) || (ret > q->q_buff_vbase + q->q_buff_size)) {
+ mif_err_limited("Data address is invalid:%pK data:%pK size:0x%08x\n",
+ ret, q->q_buff_vbase, q->q_buff_size);
+ q->stat.err_addr++;
+ return NULL;
+ }
+ }
+#else
+ ret = desc->cp_data_paddr - q->cp_buff_pbase +
+ q->q_buff_vbase - ppa->skb_padding_size;
+
+ if ((ret < q->q_buff_vbase) || (ret > q->q_buff_vbase + q->q_buff_size)) {
+ mif_err_limited("Data address is invalid:%pK data:%pK size:0x%08x\n",
+ ret, q->q_buff_vbase, q->q_buff_size);
+ q->stat.err_addr++;
+ return NULL;
+ }
+#endif
+ }
+
+ ret += ppa->skb_padding_size;
+
+#if !IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOCC)
+ if (ppa->buff_rgn_cached && !ppa->use_hw_iocc && q->dma_addr[q->done_ptr]) {
+ dma_unmap_single_attrs(ppa->dev, q->dma_addr[q->done_ptr],
+ ppa->max_packet_size, DMA_FROM_DEVICE, 0);
+ q->dma_addr[q->done_ptr] = 0;
+ }
+#endif
+
+ return ret;
+}
+
+static struct sk_buff *cpif_build_skb_single(struct pktproc_queue *q, u8 *src, u16 len,
+ u16 front_pad_size, u16 rear_pad_size, int *buffer_count)
+{
+ struct sk_buff *skb;
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+ struct modem_ctl *mc = dev_get_drvdata(q->ppa->dev);
+#endif
+
+#if IS_ENABLED(CONFIG_EXYNOS_CPIF_NETRX_MGR)
+ if (q->manager) {
+ skb = build_skb(src - front_pad_size, q->manager->frag_size);
+ if (unlikely(!skb))
+ goto error;
+
+ skb_reserve(skb, front_pad_size);
+ } else
+#endif
+ {
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+ if (exynos_pcie_is_sysmmu_enabled(mc->pcie_ch_num)) {
+ skb = build_skb(src - front_pad_size, q->ppa->true_packet_size);
+ if (unlikely(!skb))
+ goto error;
+
+ skb_reserve(skb, front_pad_size);
+ } else {
+ skb = napi_alloc_skb(q->napi_ptr, len);
+ if (unlikely(!skb))
+ goto error;
+
+ skb_copy_to_linear_data(skb, src, len);
+ }
+#else
+ skb = napi_alloc_skb(q->napi_ptr, len);
+ if (unlikely(!skb))
+ goto error;
+
+ skb_copy_to_linear_data(skb, src, len);
+#endif
+ }
+
+ skb_put(skb, len);
+ *buffer_count += 1;
+ q->done_ptr = circ_new_ptr(q->num_desc, q->done_ptr, 1);
+
+ return skb;
+
+error:
+ mif_err_limited("getting skb failed\n");
+
+ q->stat.err_nomem++;
+#if IS_ENABLED(CONFIG_EXYNOS_CPIF_NETRX_MGR)
+ if (q->manager && !q->manager->already_retrieved)
+ q->manager->already_retrieved = src;
+#endif
+
+ return NULL;
+}
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC_LRO)
+static u32 cpif_prepare_lro_and_get_headlen(struct sk_buff *skb, bool *is_udp)
+{
+ u32 headlen = 0;
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ if (iph->version == 6) {
+ struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb->data;
+
+ headlen += sizeof(struct ipv6hdr);
+ if (ipv6h->nexthdr == NEXTHDR_TCP) {
+ struct tcphdr *th = (struct tcphdr *)(skb->data + headlen);
+
+ headlen += th->doff * 4;
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
+ } else {
+ struct udphdr *uh = (struct udphdr *)(skb->data + headlen);
+ __be16 backup_len = uh->len;
+
+ uh->check = 0;
+ uh->len = htons(skb->len - headlen);
+ uh->check = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
+ ntohs(uh->len), IPPROTO_UDP,
+ csum_partial(uh, ntohs(uh->len), 0));
+ uh->len = backup_len;
+ headlen += sizeof(struct udphdr);
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4 | SKB_GSO_FRAGLIST;
+ *is_udp = true;
+ }
+ } else { /* ipv4 */
+ headlen += sizeof(struct iphdr);
+ if (iph->protocol == IPPROTO_TCP) {
+ struct tcphdr *th = (struct tcphdr *)(skb->data + headlen);
+
+ headlen += th->doff * 4;
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
+ } else {
+ headlen += sizeof(struct udphdr);
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4 | SKB_GSO_FRAGLIST;
+ *is_udp = true;
+ }
+ }
+
+ return headlen;
+}
+
+static struct sk_buff *cpif_build_skb_gro(struct pktproc_queue *q, u8 *src, u16 len,
+ int *buffer_count, bool *nomem)
+{
+ struct sk_buff *skb_head, *skb, *last;
+ struct pktproc_desc_sktbuf *desc;
+ struct iphdr *iph;
+ struct ipv6hdr *ipv6h;
+ struct udphdr *uh;
+ u32 hdr_len;
+ bool is_udp = false;
+
+ skb_head = cpif_build_skb_single(q, src, len, q->ppa->skb_padding_size,
+ sizeof(struct skb_shared_info), buffer_count);
+ if (unlikely(!skb_head))
+ goto gro_fail_nomem;
+
+ hdr_len = cpif_prepare_lro_and_get_headlen(skb_head, &is_udp);
+ skb_shinfo(skb_head)->gso_size = skb_head->len - hdr_len;
+ skb_shinfo(skb_head)->gso_segs = 1;
+ skb_frag_list_init(skb_head);
+ skb_head->csum_level = 1;
+
+ last = NULL;
+ while (q->desc_sktbuf[q->done_ptr].lro & (LRO_MID_SEG | LRO_LAST_SEG)) {
+ u8 *tmp_src;
+ u16 tmp_len;
+ bool last_seg = (q->desc_sktbuf[q->done_ptr].lro & LRO_LAST_SEG) ?
+ true : false;
+
+ desc = &q->desc_sktbuf[q->done_ptr];
+
+ if (!is_desc_valid(q, desc)) {
+ mif_err_limited("Err! invalid desc. HW GRO failed\n");
+ goto gro_fail_inval;
+ }
+
+ tmp_src = get_packet_vaddr(q, desc);
+ if (!tmp_src) {
+ mif_err_limited("Err! invalid packet vaddr. HW GRO failed\n");
+ goto gro_fail_inval;
+ }
+
+ tmp_len = desc->length;
+ skb = cpif_build_skb_single(q, tmp_src, tmp_len, q->ppa->skb_padding_size,
+ sizeof(struct skb_shared_info), buffer_count);
+ if (unlikely(!skb))
+ goto gro_fail_nomem;
+ skb->transport_header = q->ppa->skb_padding_size - hdr_len;
+ skb->network_header = q->ppa->skb_padding_size - hdr_len;
+ skb->mac_header = q->ppa->skb_padding_size - hdr_len;
+
+ if (is_udp) { /* need to generate header including checksum */
+ u8 *hdr_start = skb->data - hdr_len;
+
+ skb_copy_from_linear_data(skb_head, hdr_start, hdr_len);
+ iph = (struct iphdr *)hdr_start;
+ if (iph->version == 4) {
+ uh = (struct udphdr *)(hdr_start + sizeof(struct iphdr));
+ iph->tot_len = htons(tmp_len + hdr_len);
+ iph->check = ip_fast_csum((unsigned char *)iph,
+ iph->ihl);
+ } else { /* ipv6 */
+ uh = (struct udphdr *)(hdr_start + sizeof(struct ipv6hdr));
+ ipv6h = (struct ipv6hdr *)hdr_start;
+ ipv6h->payload_len = htons(tmp_len + sizeof(struct udphdr));
+ }
+
+ uh->len = htons(tmp_len + sizeof(struct udphdr));
+ if (iph->version == 6) { /* checksum required for udp v6 only */
+ uh->check = 0;
+ uh->check = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
+ ntohs(uh->len), IPPROTO_UDP,
+ csum_partial(uh, ntohs(uh->len), 0));
+ }
+ }
+
+ if (last)
+ last->next = skb;
+ else
+ skb_shinfo(skb_head)->frag_list = skb;
+ last = skb;
+ skb_head->data_len += skb->len;
+ skb_head->truesize += skb->truesize;
+ skb_head->len += skb->len;
+ skb_shinfo(skb_head)->gso_segs += 1;
+
+ if (last_seg)
+ break;
+ }
+
+ iph = (struct iphdr *)skb_head->data;
+ if (iph->version == 4)
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+
+ return skb_head;
+
+gro_fail_nomem:
+ *nomem = true;
+gro_fail_inval:
+ if (skb_head)
+ dev_kfree_skb_any(skb_head);
+ return NULL;
+}
+#endif
+
+static int pktproc_get_pkt_from_sktbuf_mode(struct pktproc_queue *q, struct sk_buff **new_skb)
+{
+ int ret = 0;
+ int buffer_count = 0;
+ u16 len;
+ u8 ch_id;
+ /* It will be the start of skb->data */
+ u8 *src;
+ struct pktproc_adaptor *ppa = q->ppa;
+ struct sk_buff *skb = NULL;
+ struct pktproc_desc_sktbuf desc_done_ptr = q->desc_sktbuf[q->done_ptr];
+ struct link_device *ld = &q->mld->link_dev;
+ bool csum = false;
+
+ if (!is_desc_valid(q, &desc_done_ptr)) {
+ ret = -EINVAL;
+ goto rx_error_on_desc;
+ }
+
+ src = get_packet_vaddr(q, &desc_done_ptr);
+ if (!src) {
+ ret = -EINVAL;
+ goto rx_error_on_desc;
+ }
+
+ len = desc_done_ptr.length;
+ ch_id = desc_done_ptr.channel_id;
+ csum = pktproc_check_hw_checksum(desc_done_ptr.status);
+ if (!csum)
+ q->stat.err_csum++;
+ if (unlikely(ppa->pktgen_gro)) {
+ pktproc_set_pktgen_checksum(q, src);
+ csum = true;
+ }
+
+ pp_debug("Q:%d done_ptr:%d len:%d ch_id:%d src:%pK csum:%d\n",
+ q->q_idx, q->done_ptr, len, ch_id, src, csum);
+
+#ifdef PKTPROC_DEBUG_PKT
+ pr_buffer("pktproc", (char *)src + ppa->skb_padding_size, (size_t)len, (size_t)40);
+#endif
+
+#if IS_ENABLED(CONFIG_EXYNOS_DIT)
+ if (dit_check_dir_use_queue(DIT_DIR_RX, q->q_idx)) {
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+ struct modem_ctl *mc = dev_get_drvdata(ppa->dev);
+ if (exynos_pcie_is_sysmmu_enabled(mc->pcie_ch_num))
+ unsigned long src_paddr = 0;
+ else
+ unsigned long src_paddr = desc_done_ptr.cp_data_paddr - q->cp_buff_pbase +
+ q->q_buff_pbase;
+#else
+ unsigned long src_paddr = desc_done_ptr.cp_data_paddr - q->cp_buff_pbase +
+ q->q_buff_pbase;
+#endif
+ ret = dit_enqueue_src_desc_ring(DIT_DIR_RX,
+ src, src_paddr, len, ch_id, csum);
+ if (ret < 0) {
+ mif_err_limited("Enqueue failed at fore/rear/done:%d/%d/%d, ret: %d\n",
+ *q->fore_ptr, *q->rear_ptr, q->done_ptr, ret);
+
+ q->stat.err_enqueue_dit++;
+ goto rx_error;
+ }
+
+ q->stat.pass_cnt++;
+ q->done_ptr = circ_new_ptr(q->num_desc, q->done_ptr, 1);
+
+ return 1; /* dit cannot support HW GRO packets */
+ }
+#endif
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC_LRO)
+ /* guaranteed that only TCP/IP, UDP/IP in this case */
+ if (desc_done_ptr.lro == (LRO_MODE_ON | LRO_FIRST_SEG)) {
+ bool nomem = false;
+
+ if (!csum)
+ mif_info("CSUM error on LRO: 0x%X\n", desc_done_ptr.status);
+
+ skb = cpif_build_skb_gro(q, src, len, &buffer_count, &nomem);
+ if (unlikely(!skb)) {
+ if (nomem) {
+ ret = -ENOMEM;
+ if (buffer_count != 0) /* intermediate seg */
+ q->done_ptr = circ_new_ptr(q->num_desc, q->done_ptr, 1);
+ goto rx_error;
+ } else {
+ ret = -EINVAL;
+ goto rx_error_on_desc;
+ }
+ }
+ q->stat.lro_cnt++;
+ } else {
+ skb = cpif_build_skb_single(q, src, len, ppa->skb_padding_size,
+ sizeof(struct skb_shared_info), &buffer_count);
+ if (unlikely(!skb)) {
+ ret = -ENOMEM;
+ goto rx_error;
+ }
+ }
+#else
+ skb = cpif_build_skb_single(q, src, len, ppa->skb_padding_size,
+ sizeof(struct skb_shared_info), &buffer_count);
+ if (unlikely(!skb)) {
+ ret = -ENOMEM;
+ goto rx_error;
+ }
+#endif
+
+ if (csum)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (ppa->use_exclusive_irq)
+ skb_record_rx_queue(skb, q->q_idx);
+
+ /* Set priv */
+ skbpriv(skb)->lnk_hdr = 0;
+ skbpriv(skb)->sipc_ch = ch_id;
+ skbpriv(skb)->iod = link_get_iod_with_channel(ld, ch_id);
+ skbpriv(skb)->ld = ld;
+ skbpriv(skb)->napi = q->napi_ptr;
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC_CLAT)
+ /* CLAT[1:0] = {CLAT On, CLAT Pkt} */
+ if (desc_done_ptr.clat == 0x03)
+ skbpriv(skb)->rx_clat = 1;
+#endif
+
+ *new_skb = skb;
+
+ q->stat.pass_cnt += buffer_count;
+
+ return buffer_count;
+
+rx_error_on_desc:
+ mif_err_limited("Skip invalid descriptor at %d and crash\n", q->done_ptr);
+ ld->link_trigger_cp_crash(q->mld, CRASH_REASON_MIF_RX_BAD_DATA,
+ "invalid descriptor given");
+ q->done_ptr = circ_new_ptr(q->num_desc, q->done_ptr, 1);
+
+rx_error:
+ if (skb)
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+/*
+ * Clean RX ring
+ */
+int pktproc_get_usage(struct pktproc_queue *q)
+{
+ u32 usage = 0;
+
+ switch (q->ppa->desc_mode) {
+ case DESC_MODE_RINGBUF:
+ usage = circ_get_usage(q->num_desc, *q->fore_ptr, *q->rear_ptr);
+ break;
+ case DESC_MODE_SKTBUF:
+ usage = circ_get_usage(q->num_desc, *q->rear_ptr, q->done_ptr);
+ break;
+ default:
+ usage = 0;
+ break;
+ }
+
+ return usage;
+}
+
+int pktproc_get_usage_fore_rear(struct pktproc_queue *q)
+{
+ u32 usage = 0;
+
+ switch (q->ppa->desc_mode) {
+ case DESC_MODE_RINGBUF:
+ usage = circ_get_usage(q->num_desc, *q->fore_ptr, *q->rear_ptr);
+ break;
+ case DESC_MODE_SKTBUF:
+ usage = circ_get_usage(q->num_desc, *q->rear_ptr, *q->fore_ptr);
+ break;
+ default:
+ usage = 0;
+ break;
+ }
+
+ return usage;
+}
+
+static int pktproc_clean_rx_ring(struct pktproc_queue *q, int budget, int *work_done)
+{
+ int ret = 0;
+ u32 num_frames = 0;
+ u32 rcvd_total = 0;
+#if IS_ENABLED(CONFIG_EXYNOS_DIT)
+ u32 rcvd_dit = 0;
+#endif
+ u32 budget_used = 0;
+
+ num_frames = pktproc_get_usage(q);
+
+ if (!num_frames)
+ return 0;
+
+ pp_debug("Q%d num_frames:%d fore/rear/done: %d/%d/%d\n",
+ q->q_idx, num_frames,
+ *q->fore_ptr, *q->rear_ptr, q->done_ptr);
+
+ while (rcvd_total < num_frames && budget_used < budget) {
+ struct sk_buff *skb = NULL;
+
+ ret = q->get_packet(q, &skb);
+ if (unlikely(ret < 0)) {
+ mif_err_limited("get_packet() error %d\n", ret);
+ break;
+ }
+ rcvd_total += ret;
+ budget_used++;
+#if IS_ENABLED(CONFIG_EXYNOS_DIT)
+ /* skb will be null if dit fills the skb */
+ if (!skb) {
+ rcvd_dit += ret; /* ret will be always 1 */
+ continue;
+ }
+#endif
+
+ ret = q->mld->pass_skb_to_net(q->mld, skb);
+ if (ret < 0)
+ break;
+ }
+
+#if IS_ENABLED(CONFIG_EXYNOS_DIT)
+ if (rcvd_dit) {
+ dit_kick(DIT_DIR_RX, false);
+
+ /* dit processed every packets*/
+ if (rcvd_dit == rcvd_total)
+ goto out;
+ }
+#endif
+
+#if IS_ENABLED(CONFIG_CPIF_TP_MONITOR)
+#if IS_ENABLED(CONFIG_EXYNOS_DIT)
+ if (rcvd_total - rcvd_dit > 0)
+#else
+ if (rcvd_total > 0)
+#endif
+ tpmon_start();
+#endif
+
+#if IS_ENABLED(CONFIG_EXYNOS_DIT)
+ q->update_fore_ptr(q, rcvd_total - rcvd_dit);
+#else
+ q->update_fore_ptr(q, rcvd_total);
+#endif
+
+#if IS_ENABLED(CONFIG_EXYNOS_DIT)
+out:
+#endif
+ *work_done = rcvd_total;
+
+ return ret;
+}
+
+/*
+ * perftest
+ */
+static void pktproc_perftest_napi_schedule(void *arg)
+{
+ struct pktproc_queue *q = arg;
+
+ if (!q) {
+ mif_err_limited("q is null\n");
+ return;
+ }
+
+ if (!pktproc_get_usage(q))
+ return;
+
+ if (napi_schedule_prep(q->napi_ptr)) {
+ q->disable_irq(q);
+ __napi_schedule(q->napi_ptr);
+ }
+}
+
+static unsigned int pktproc_perftest_gen_rx_packet_sktbuf_mode(
+ struct pktproc_queue *q, int packet_num, int session)
+{
+ struct pktproc_desc_sktbuf *desc = q->desc_sktbuf;
+ struct pktproc_perftest *perf = &q->ppa->perftest;
+ u32 header_len = perftest_data[perf->mode].header_len;
+ u32 rear_ptr;
+ unsigned int space, loop_count;
+ u8 *src;
+ u32 *seq;
+ u16 *dst_port;
+ u16 *dst_addr;
+ int i, j;
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+ struct modem_ctl *mc = dev_get_drvdata(q->ppa->dev);
+#endif
+
+ rear_ptr = *q->rear_ptr;
+ space = circ_get_space(q->num_desc, rear_ptr, *q->fore_ptr);
+ loop_count = min_t(unsigned int, space, packet_num);
+
+ for (i = 0 ; i < loop_count ; i++) {
+ /* set desc */
+ desc[rear_ptr].status =
+ PKTPROC_STATUS_DONE | PKTPROC_STATUS_TCPC | PKTPROC_STATUS_IPCS;
+ desc[rear_ptr].length = perftest_data[perf->mode].packet_len;
+ desc[rear_ptr].filter_result = 0x9;
+ desc[rear_ptr].channel_id = perf->ch;
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+ /* set data */
+ if (exynos_pcie_is_sysmmu_enabled(mc->pcie_ch_num))
+ src = q->ioc.pf_buf[rear_ptr] + q->ppa->skb_padding_size;
+ else
+ src = desc[rear_ptr].cp_data_paddr -
+ q->cp_buff_pbase + q->q_buff_vbase;
+#else
+ src = desc[rear_ptr].cp_data_paddr -
+ q->cp_buff_pbase + q->q_buff_vbase;
+#endif
+ memset(src, 0x0, desc[rear_ptr].length);
+ memcpy(src, perftest_data[perf->mode].header, header_len);
+ seq = (u32 *)(src + header_len);
+ *seq = htonl(perf->seq_counter[session]++);
+ dst_port = (u16 *)(src + perftest_data[perf->mode].dst_port_offset);
+ *dst_port = htons(5001 + session);
+ if (perf->mode == PERFTEST_MODE_CLAT) {
+ for (j = 0 ; j < 8 ; j++) {
+ dst_addr = (u16 *)(src + 24 + (j * 2));
+ *dst_addr = htons(perf->clat_ipv6[j]);
+ }
+ }
+
+ rear_ptr = circ_new_ptr(q->num_desc, rear_ptr, 1);
+ }
+
+ *q->rear_ptr = rear_ptr;
+
+ return loop_count;
+}
+
+static int pktproc_perftest_thread(void *arg)
+{
+ struct mem_link_device *mld = arg;
+ struct pktproc_adaptor *ppa = &mld->pktproc;
+ struct pktproc_queue *q = ppa->q[0];
+ struct pktproc_perftest *perf = &ppa->perftest;
+ bool session_queue = false;
+ int i, pkts;
+
+ if (ppa->use_exclusive_irq && (perf->session > 1) && (perf->session <= ppa->num_queue))
+ session_queue = true;
+
+ /* max 1023 packets per 1ms for 12Gbps */
+ pkts = (perf->session > 0 ? (1023 / perf->session) : 0);
+ do {
+ for (i = 0 ; i < perf->session ; i++) {
+ int napi_cpu = perf->ipi_cpu[0];
+
+ if (session_queue)
+ q = ppa->q[i];
+
+ if (!pktproc_perftest_gen_rx_packet_sktbuf_mode(q, pkts, i))
+ continue;
+
+ if (session_queue)
+ napi_cpu = perf->ipi_cpu[i];
+
+ if (napi_cpu >= 0 && cpu_online(napi_cpu)) {
+ smp_call_function_single(napi_cpu,
+ pktproc_perftest_napi_schedule,
+ (void *)q, 0);
+ } else {
+ pktproc_perftest_napi_schedule(q);
+ }
+ }
+
+ udelay(perf->udelay);
+
+ if (kthread_should_stop())
+ break;
+ } while (perf->test_run);
+
+ return 0;
+}
+
+static ssize_t perftest_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct modem_ctl *mc = dev_get_drvdata(dev);
+ struct link_device *ld = get_current_link(mc->iod);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ struct pktproc_adaptor *ppa = &mld->pktproc;
+ struct pktproc_perftest new_perf;
+ struct pktproc_perftest *perf = &ppa->perftest;
+
+ static struct task_struct *worker_task;
+ int ret;
+
+ perf->ipi_cpu[0] = -1;
+ if (ppa->use_exclusive_irq) {
+ perf->ipi_cpu[0] = 4;
+ perf->ipi_cpu[1] = 4;
+ perf->ipi_cpu[2] = 4;
+ perf->ipi_cpu[3] = 4;
+ }
+
+ memcpy((void *)&new_perf, (void *)perf, sizeof(struct pktproc_perftest));
+
+ switch (perf->mode) {
+ case PERFTEST_MODE_CLAT:
+ ret = sscanf(buf,
+ "%u %hu %hu %hu %hu %hx:%hx:%hx:%hx:%hx:%hx:%hx:%hx %hu %hu %hu %hu",
+ &new_perf.mode, &new_perf.session, &new_perf.ch,
+ &new_perf.cpu, &new_perf.udelay,
+ &new_perf.clat_ipv6[0], &new_perf.clat_ipv6[1], &new_perf.clat_ipv6[2],
+ &new_perf.clat_ipv6[3], &new_perf.clat_ipv6[4], &new_perf.clat_ipv6[5],
+ &new_perf.clat_ipv6[6], &new_perf.clat_ipv6[7],
+ &new_perf.ipi_cpu[0], &new_perf.ipi_cpu[1], &new_perf.ipi_cpu[2],
+ &new_perf.ipi_cpu[3]);
+ break;
+ default:
+ ret = sscanf(buf, "%u %hu %hu %hu %hu %hu %hu %hu %hu",
+ &new_perf.mode, &new_perf.session, &new_perf.ch,
+ &new_perf.cpu, &new_perf.udelay,
+ &new_perf.ipi_cpu[0], &new_perf.ipi_cpu[1], &new_perf.ipi_cpu[2],
+ &new_perf.ipi_cpu[3]);
+ break;
+ }
+
+ if (ret < 1)
+ return -EINVAL;
+
+ if (new_perf.mode >= PERFTEST_MODE_MAX)
+ new_perf.mode = PERFTEST_MODE_STOP;
+
+ if (new_perf.session > PKTPROC_MAX_QUEUE)
+ new_perf.session = PKTPROC_MAX_QUEUE;
+
+ if (new_perf.ch > SIPC5_CH_ID_MAX)
+ new_perf.ch = SIPC5_CH_ID_MAX;
+
+ memcpy((void *)perf, (void *)&new_perf, sizeof(struct pktproc_perftest));
+
+ switch (perf->mode) {
+ case PERFTEST_MODE_STOP:
+ if (perf->test_run)
+ kthread_stop(worker_task);
+
+ perf->seq_counter[0] = 0;
+ perf->seq_counter[1] = 0;
+ perf->seq_counter[2] = 0;
+ perf->seq_counter[3] = 0;
+ perf->test_run = false;
+ break;
+ case PERFTEST_MODE_IPV4:
+ case PERFTEST_MODE_CLAT:
+ case PERFTEST_MODE_IPV6:
+ if (perf->test_run)
+ kthread_stop(worker_task);
+
+ perf->test_run = true;
+ worker_task = kthread_create_on_node(pktproc_perftest_thread,
+ mld, cpu_to_node(perf->cpu), "perftest/%d", perf->cpu);
+ kthread_bind(worker_task, perf->cpu);
+ wake_up_process(worker_task);
+ break;
+ default:
+ break;
+ }
+
+ return count;
+}
+
+static ssize_t perftest_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct modem_ctl *mc = dev_get_drvdata(dev);
+ struct link_device *ld = get_current_link(mc->iod);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ struct pktproc_adaptor *ppa = &mld->pktproc;
+ struct pktproc_perftest *perf = &ppa->perftest;
+ ssize_t count = 0;
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "test_run:%d\n", perf->test_run);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "mode:%d\n", perf->mode);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "session:%d\n", perf->session);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "ch:%d\n", perf->ch);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "udelay:%d\n", perf->udelay);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "cpu:%d\n", perf->cpu);
+
+ if (ppa->use_exclusive_irq)
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "ipi cpu:%d %d %d %d\n",
+ perf->ipi_cpu[0], perf->ipi_cpu[1], perf->ipi_cpu[2], perf->ipi_cpu[3]);
+
+ if (perf->mode == PERFTEST_MODE_CLAT)
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "clat %x:%x:%x:%x:%x:%x:%x:%x\n",
+ perf->clat_ipv6[0], perf->clat_ipv6[1], perf->clat_ipv6[2],
+ perf->clat_ipv6[3], perf->clat_ipv6[4], perf->clat_ipv6[5],
+ perf->clat_ipv6[6], perf->clat_ipv6[7]);
+
+ return count;
+}
+
+/*
+ * NAPI
+ */
+static void pktproc_enable_irq(struct pktproc_queue *q)
+{
+#if IS_ENABLED(CONFIG_MCU_IPC)
+ cp_mbox_enable_handler(q->irq_idx, q->mld->irq_cp2ap_msg);
+#endif
+}
+
+static void pktproc_disable_irq(struct pktproc_queue *q)
+{
+#if IS_ENABLED(CONFIG_MCU_IPC)
+ cp_mbox_disable_handler(q->irq_idx, q->mld->irq_cp2ap_msg);
+#endif
+}
+
+static int pktproc_poll(struct napi_struct *napi, int budget)
+{
+ struct pktproc_queue *q = container_of(napi, struct pktproc_queue, napi);
+ struct mem_link_device *mld = q->mld;
+ struct link_device *ld = &mld->link_dev;
+ struct modem_ctl *mc = ld->mc;
+
+ int ret;
+ u32 rcvd = 0;
+
+ if (unlikely(!cp_online(mc)))
+ goto poll_exit;
+
+ if (!pktproc_check_active(q->ppa, q->q_idx))
+ goto poll_exit;
+
+ ret = q->clean_rx_ring(q, budget, &rcvd);
+ if ((ret == -EBUSY) || (ret == -ENOMEM))
+ goto poll_retry;
+
+ if (rcvd < budget) {
+ napi_complete_done(napi, rcvd);
+ q->enable_irq(q);
+
+ return rcvd;
+ }
+
+poll_retry:
+ return budget;
+
+poll_exit:
+ napi_complete(napi);
+ q->enable_irq(q);
+
+ return 0;
+}
+
+/*
+ * IRQ handler
+ */
+static irqreturn_t pktproc_irq_handler(int irq, void *arg)
+{
+ struct pktproc_queue *q = arg;
+
+ if (!q) {
+ mif_err_limited("q is null\n");
+ return IRQ_HANDLED;
+ }
+
+ if (!pktproc_get_usage(q))
+ return IRQ_HANDLED;
+
+#if IS_ENABLED(CONFIG_CPIF_TP_MONITOR)
+ tpmon_start();
+#endif
+
+ if (napi_schedule_prep(q->napi_ptr)) {
+ q->disable_irq(q);
+ __napi_schedule(q->napi_ptr);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Debug
+ */
+static ssize_t region_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct modem_ctl *mc = dev_get_drvdata(dev);
+ struct link_device *ld = get_current_link(mc->iod);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ struct pktproc_adaptor *ppa = &mld->pktproc;
+ ssize_t count = 0;
+ int i;
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "Version:%d\n", ppa->version);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "CP base:0x%08llx\n", ppa->cp_base);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "Descriptor mode:%d\n", ppa->desc_mode);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "Num of queue:%d\n", ppa->num_queue);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "NetRX manager:%d\n",
+ ppa->use_netrx_mng);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "Exclusive interrupt:%d\n",
+ ppa->use_exclusive_irq);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "HW cache coherency:%d\n",
+ ppa->use_hw_iocc);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "Max packet size:%d\n",
+ ppa->max_packet_size);
+ if (ppa->true_packet_size != ppa->max_packet_size) {
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "True packet size:%d\n",
+ ppa->true_packet_size);
+ }
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "Padding size:%d\n",
+ ppa->skb_padding_size);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "Dedicated BAAW:%d\n",
+ ppa->use_dedicated_baaw);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "info:%s desc:%s/buff:%s\n",
+ ppa->info_rgn_cached ? "C" : "NC",
+ ppa->desc_rgn_cached ? "C" : "NC",
+ ppa->buff_rgn_cached ? "C" : "NC");
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "\n");
+
+ for (i = 0; i < ppa->num_queue; i++) {
+ struct pktproc_queue *q = ppa->q[i];
+
+ if (!pktproc_check_active(ppa, q->q_idx)) {
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "Queue %d is not active\n", i);
+ continue;
+ }
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "Queue%d\n", i);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, " num_desc:%d(0x%08x)\n",
+ q->q_info_ptr->num_desc, q->q_info_ptr->num_desc);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, " cp_desc_pbase:0x%08x\n",
+ q->q_info_ptr->cp_desc_pbase);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, " desc_size:0x%08x\n",
+ q->desc_size);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, " cp_buff_pbase:0x%08x\n",
+ q->q_info_ptr->cp_buff_pbase);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, " q_buff_size:0x%08x\n",
+ q->q_buff_size);
+#if IS_ENABLED(CONFIG_EXYNOS_DIT)
+ count += scnprintf(&buf[count], PAGE_SIZE - count, " DIT:%d\n",
+ dit_check_dir_use_queue(DIT_DIR_RX, q->q_idx));
+#endif
+
+#if IS_ENABLED(CONFIG_EXYNOS_CPIF_NETRX_MGR)
+ if (q->manager) {
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "Buffer manager\n");
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ " total number of packets:%llu\n",
+ q->manager->num_packet);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ " frag size:%llu\n",
+ q->manager->frag_size);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "\n");
+ }
+#endif
+ }
+
+ return count;
+}
+
+static ssize_t status_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct modem_ctl *mc = dev_get_drvdata(dev);
+ struct link_device *ld = get_current_link(mc->iod);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ struct pktproc_adaptor *ppa = &mld->pktproc;
+ ssize_t count = 0;
+ int i;
+
+ for (i = 0; i < ppa->num_queue; i++) {
+ struct pktproc_queue *q = ppa->q[i];
+
+ if (!pktproc_check_active(ppa, q->q_idx)) {
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "Queue %d is not active\n", i);
+ continue;
+ }
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "Queue%d\n", i);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, " num_desc:%d\n",
+ q->num_desc);
+ switch (ppa->desc_mode) {
+ case DESC_MODE_RINGBUF:
+ count += scnprintf(&buf[count], PAGE_SIZE - count, " fore/rear:%d/%d\n",
+ *q->fore_ptr, *q->rear_ptr);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, " fore~rear:%d\n",
+ circ_get_usage(q->num_desc, *q->fore_ptr, *q->rear_ptr));
+ break;
+ case DESC_MODE_SKTBUF:
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ " fore/rear/done:%d/%d/%d\n",
+ *q->fore_ptr, *q->rear_ptr, q->done_ptr);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ " fore~rear:%d rear~done:%d rear~fore:%d\n",
+ circ_get_usage(q->num_desc, *q->fore_ptr, *q->rear_ptr),
+ circ_get_usage(q->num_desc, *q->rear_ptr, q->done_ptr),
+ circ_get_usage(q->num_desc, *q->rear_ptr, *q->fore_ptr));
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+ if (exynos_pcie_is_sysmmu_enabled(mc->pcie_ch_num)) {
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ " iommu_mapped cnt:%u size:0x%llX\n",
+ q->ioc.mapped_cnt, q->ioc.mapped_size);
+ }
+#endif
+ break;
+ default:
+ break;
+ }
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, " pass:%lld lro:%lld\n",
+ q->stat.pass_cnt, q->stat.lro_cnt);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ " fail:len%lld chid%lld addr%lld nomem%lld bmnomem%lld csum%lld\n",
+ q->stat.err_len, q->stat.err_chid, q->stat.err_addr,
+ q->stat.err_nomem, q->stat.err_bm_nomem, q->stat.err_csum);
+
+#if IS_ENABLED(CONFIG_EXYNOS_DIT)
+ if (dit_check_dir_use_queue(DIT_DIR_RX, q->q_idx))
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ " fail:enqueue_dit%lld\n",
+ q->stat.err_enqueue_dit);
+#endif
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_RO(region);
+static DEVICE_ATTR_RO(status);
+static DEVICE_ATTR_RW(perftest);
+static DEVICE_ATTR_RW(pktgen_gro);
+
+static struct attribute *pktproc_attrs[] = {
+ &dev_attr_region.attr,
+ &dev_attr_status.attr,
+ &dev_attr_perftest.attr,
+ &dev_attr_pktgen_gro.attr,
+ NULL,
+};
+
+static const struct attribute_group pktproc_group = {
+ .attrs = pktproc_attrs,
+ .name = "pktproc",
+};
+
+/*
+ * Initialize PktProc
+ */
+int pktproc_init(struct pktproc_adaptor *ppa)
+{
+ int i;
+ int ret = 0;
+ struct mem_link_device *mld;
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+ struct modem_ctl *mc = dev_get_drvdata(ppa->dev);
+#endif
+
+ if (!ppa) {
+ mif_err("ppa is null\n");
+ return -EPERM;
+ }
+
+ mld = container_of(ppa, struct mem_link_device, pktproc);
+
+ mif_info("version:%d cp_base:0x%08llx desc_mode:%d num_queue:%d\n",
+ ppa->version, ppa->cp_base, ppa->desc_mode, ppa->num_queue);
+ mif_info("interrupt:%d iocc:%d max_packet_size:%d\n",
+ ppa->use_exclusive_irq, ppa->use_hw_iocc, ppa->max_packet_size);
+
+ for (i = 0; i < ppa->num_queue; i++) {
+ struct pktproc_queue *q = ppa->q[i];
+
+ mif_info("Q%d\n", i);
+
+ napi_synchronize(&q->napi);
+
+ switch (ppa->desc_mode) {
+ case DESC_MODE_SKTBUF:
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+ if (exynos_pcie_is_sysmmu_enabled(mc->pcie_ch_num))
+ cpif_pcie_iommu_reset(q);
+#endif
+ if (pktproc_check_active(q->ppa, q->q_idx))
+ q->clear_data_addr(q);
+#if IS_ENABLED(CONFIG_EXYNOS_CPIF_NETRX_MGR)
+ if (q->manager)
+ mif_info("num packets:%llu frag size:%llu\n",
+ q->manager->num_packet,
+ q->manager->frag_size);
+#endif
+ break;
+ default:
+ break;
+ }
+
+ *q->fore_ptr = 0;
+ *q->rear_ptr = 0;
+ q->done_ptr = 0;
+
+ if (mld->pktproc_use_36bit_addr) {
+ q->q_info_ptr->cp_desc_pbase = q->cp_desc_pbase >> 4;
+ q->q_info_ptr->cp_buff_pbase = q->cp_buff_pbase >> 4;
+ } else {
+ q->q_info_ptr->cp_desc_pbase = q->cp_desc_pbase;
+ q->q_info_ptr->cp_buff_pbase = q->cp_buff_pbase;
+ }
+
+ q->q_info_ptr->num_desc = q->num_desc;
+
+ memset(&q->stat, 0, sizeof(struct pktproc_statistics));
+
+ switch (ppa->desc_mode) {
+ case DESC_MODE_SKTBUF:
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+ if (exynos_pcie_is_sysmmu_enabled(mc->pcie_ch_num)) {
+ ret = cpif_pcie_iommu_init(q);
+ if (ret) {
+ mif_err("cpif_pcie_iommu_init() error %d Q%d\n", ret, q->q_idx);
+ continue;
+ }
+ }
+#endif
+ ret = q->alloc_rx_buf(q);
+ if (ret) {
+ mif_err("alloc_rx_buf() error %d Q%d\n", ret, q->q_idx);
+ continue;
+ }
+ break;
+ default:
+ break;
+ }
+
+ mif_info("num_desc:0x%08x cp_desc_pbase:0x%08x desc_size:0x%08x\n",
+ q->q_info_ptr->num_desc, q->q_info_ptr->cp_desc_pbase,
+ q->desc_size);
+ mif_info("cp_buff_pbase:0x%08llx q_buff_size:0x%08x\n",
+ q->cp_buff_pbase, q->q_buff_size);
+ mif_info("fore:%d rear:%d done:%d\n",
+ *q->fore_ptr, *q->rear_ptr, q->done_ptr);
+
+ atomic_set(&q->active, 1);
+ }
+
+ return 0;
+}
+
+/*
+ * Create PktProc
+ */
+static int pktproc_create_buffer_manager(struct pktproc_queue *q, u64 ap_desc_pbase)
+{
+ struct pktproc_adaptor *ppa;
+ unsigned int desc_total_size = 0;
+ struct cpif_addr_pair desc_addr_pair;
+ u64 frag_size = 0; /* size of fragmenting a page */
+
+ if (!q) {
+ mif_err("q is null\n");
+ return -EINVAL;
+ }
+
+ desc_total_size = q->num_desc * sizeof(struct pktproc_desc_sktbuf);
+ ppa = q->ppa;
+ if (!ppa) {
+ mif_err("ppa is null\n");
+ return -EINVAL;
+ }
+
+ if (!ppa->use_netrx_mng) {
+ mif_err("use_netrx_mng is not set\n");
+ return -EINVAL;
+ }
+
+ if (q->manager != NULL) {
+ mif_info("buffer manager is already initialized\n");
+ return 0;
+ }
+
+ desc_addr_pair.cp_addr = q->cp_desc_pbase;
+ desc_addr_pair.ap_addr = phys_to_virt(ap_desc_pbase);
+ frag_size = SKB_DATA_ALIGN(ppa->max_packet_size + ppa->skb_padding_size)
+ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ mif_info("about to init netrx mng: cp_addr: 0x%llX ap_addr: %pK frag_size: %llu\n",
+ q->cp_desc_pbase, q->desc_sktbuf, frag_size);
+ mif_info("desc_total_size:%d cp_buff_pbase: 0x%llX num_desc: %d\n",
+ desc_total_size, q->cp_buff_pbase, q->num_desc);
+ q->manager = cpif_create_netrx_mng(&desc_addr_pair, desc_total_size,
+ q->cp_buff_pbase, frag_size,
+ q->num_desc);
+ if (!q->manager) {
+ mif_err("cpif_create_netrx_mng() error\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void pktproc_adjust_size(struct pktproc_adaptor *ppa)
+{
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+ struct modem_ctl *mc = dev_get_drvdata(ppa->dev);
+ if (exynos_pcie_is_sysmmu_enabled(mc->pcie_ch_num)) {
+ ppa->skb_padding_size = SKB_FRONT_PADDING;
+ } else {
+ if (ppa->use_netrx_mng)
+ ppa->skb_padding_size = SKB_FRONT_PADDING;
+ else
+ ppa->skb_padding_size = 0;
+ }
+#else
+ if (ppa->use_netrx_mng)
+ ppa->skb_padding_size = SKB_FRONT_PADDING;
+ else
+ ppa->skb_padding_size = 0;
+#endif
+
+ ppa->true_packet_size = ppa->max_packet_size;
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+ if (exynos_pcie_is_sysmmu_enabled(mc->pcie_ch_num)) {
+ ppa->true_packet_size += ppa->skb_padding_size;
+ ppa->true_packet_size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ mif_info("adjusted iommu required:%u true_packet_size:%lu\n",
+ ppa->true_packet_size, roundup_pow_of_two(ppa->true_packet_size));
+ ppa->true_packet_size = roundup_pow_of_two(ppa->true_packet_size);
+ ppa->space_margin = PAGE_FRAG_CACHE_MAX_SIZE / ppa->true_packet_size;
+ }
+#endif
+}
+
+static int pktproc_get_info(struct pktproc_adaptor *ppa, struct device_node *np)
+{
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+ struct modem_ctl *mc = dev_get_drvdata(ppa->dev);
+#endif
+ mif_dt_read_u64(np, "pktproc_cp_base", ppa->cp_base);
+ mif_dt_read_u32(np, "pktproc_dl_version", ppa->version);
+
+ switch (ppa->version) {
+ case PKTPROC_V1:
+ ppa->desc_mode = DESC_MODE_RINGBUF;
+ ppa->num_queue = 1;
+ ppa->use_exclusive_irq = 0;
+ break;
+ case PKTPROC_V2:
+ mif_dt_read_u32(np, "pktproc_dl_desc_mode", ppa->desc_mode);
+ mif_dt_read_u32(np, "pktproc_dl_num_queue", ppa->num_queue);
+ if (ppa->num_queue > PKTPROC_MAX_QUEUE)
+ ppa->num_queue = PKTPROC_MAX_QUEUE;
+#if IS_ENABLED(CONFIG_EXYNOS_CPIF_IOMMU)
+ mif_dt_read_u32(np, "pktproc_dl_use_netrx_mng", ppa->use_netrx_mng);
+ mif_dt_read_u32(np, "pktproc_dl_netrx_capacity", ppa->netrx_capacity);
+ /* Check if config and dt are consistent */
+ if(ppa->use_netrx_mng != IS_ENABLED(CONFIG_EXYNOS_CPIF_NETRX_MGR)) {
+ mif_err("netrx mgr config and dt are inconsistent\n");
+ panic("netrx mgr config and dt are inconsistent\n");
+ return -EINVAL;
+ }
+#else
+ ppa->use_netrx_mng = 0;
+ ppa->netrx_capacity = 0;
+#endif
+ mif_dt_read_u32(np, "pktproc_dl_use_exclusive_irq", ppa->use_exclusive_irq);
+#if IS_ENABLED(CONFIG_MCU_IPC)
+ if (ppa->use_exclusive_irq) {
+ int ret;
+
+ ret = of_property_read_u32_array(np, "pktproc_dl_exclusive_irq_idx",
+ ppa->exclusive_irq_idx, ppa->num_queue);
+ if (ret) {
+ mif_err("pktproc_dl_exclusive_irq_idx error:%d\n", ret);
+ return ret;
+ }
+ }
+#endif
+ break;
+ default:
+ mif_err("Unsupported version:%d\n", ppa->version);
+ return -EINVAL;
+ }
+
+ mif_info("version:%d cp_base:0x%08llx mode:%d num_queue:%d\n",
+ ppa->version, ppa->cp_base, ppa->desc_mode, ppa->num_queue);
+ mif_info("use_netrx_mng:%d netrx_capacity:%d exclusive_irq:%d\n",
+ ppa->use_netrx_mng, ppa->netrx_capacity, ppa->use_exclusive_irq);
+
+ mif_dt_read_u32(np, "pktproc_dl_use_hw_iocc", ppa->use_hw_iocc);
+ mif_dt_read_u32(np, "pktproc_dl_max_packet_size", ppa->max_packet_size);
+ mif_dt_read_u32(np, "pktproc_dl_use_dedicated_baaw", ppa->use_dedicated_baaw);
+ mif_info("iocc:%d max_packet_size:%d baaw:%d\n",
+ ppa->use_hw_iocc, ppa->max_packet_size, ppa->use_dedicated_baaw);
+
+ mif_dt_read_u32(np, "pktproc_dl_info_rgn_offset", ppa->info_rgn_offset);
+ mif_dt_read_u32(np, "pktproc_dl_info_rgn_size", ppa->info_rgn_size);
+ mif_dt_read_u32(np, "pktproc_dl_desc_rgn_offset", ppa->desc_rgn_offset);
+ mif_dt_read_u32(np, "pktproc_dl_desc_rgn_size", ppa->desc_rgn_size);
+ mif_dt_read_u32(np, "pktproc_dl_buff_rgn_offset", ppa->buff_rgn_offset);
+ mif_dt_read_u32(np, "pktproc_dl_buff_rgn_size", ppa->buff_rgn_size);
+ mif_info("info_rgn 0x%08x 0x%08x desc_rgn 0x%08x 0x%08x %u buff_rgn 0x%08x 0x%08x\n",
+ ppa->info_rgn_offset, ppa->info_rgn_size,
+ ppa->desc_rgn_offset, ppa->desc_rgn_size,
+ ppa->desc_num_ratio_percent,
+ ppa->buff_rgn_offset, ppa->buff_rgn_size);
+
+ mif_dt_read_u32(np, "pktproc_dl_info_rgn_cached", ppa->info_rgn_cached);
+ mif_dt_read_u32(np, "pktproc_dl_desc_rgn_cached", ppa->desc_rgn_cached);
+ mif_dt_read_u32(np, "pktproc_dl_buff_rgn_cached", ppa->buff_rgn_cached);
+ mif_info("cached:%d/%d/%d\n", ppa->info_rgn_cached, ppa->desc_rgn_cached,
+ ppa->buff_rgn_cached);
+ if (ppa->use_netrx_mng && !ppa->buff_rgn_cached) {
+ mif_err("Buffer manager requires cached buff region\n");
+ return -EINVAL;
+ }
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+ if (exynos_pcie_is_sysmmu_enabled(mc->pcie_ch_num)) {
+ if (ppa->use_netrx_mng || !ppa->buff_rgn_cached || ppa->desc_mode != DESC_MODE_SKTBUF) {
+ mif_err("not compatible with pcie iommu\n");
+ return -EINVAL;
+ }
+ }
+#endif
+
+ /* Check if config and dt are consistent */
+ if (ppa->use_hw_iocc != IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOCC)) {
+ mif_err("PCIe IOCC config and dt are inconsistent\n");
+ panic("PCIe IOCC config and dt are inconsistent\n");
+ return -EINVAL;
+ }
+
+ pktproc_adjust_size(ppa);
+
+ return 0;
+}
+
+int pktproc_create(struct platform_device *pdev, struct mem_link_device *mld,
+ unsigned long memaddr, u32 memsize)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct pktproc_adaptor *ppa = &mld->pktproc;
+ u32 buff_size_by_q, accum_buff_size;
+ u32 alloc_size;
+ int i;
+ int ret = 0;
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+ struct modem_ctl *mc = dev_get_drvdata(&pdev->dev);
+#endif
+
+ if (!np) {
+ mif_err("of_node is null\n");
+ return -EINVAL;
+ }
+ if (!ppa) {
+ mif_err("ppa is null\n");
+ return -EINVAL;
+ }
+
+ ppa->dev = &pdev->dev;
+
+ mif_dt_read_u32_noerr(np, "pktproc_dl_support", ppa->support);
+ if (!ppa->support) {
+ mif_err("pktproc_support is 0.\n");
+ panic("pktproc_support is 0\n");
+ return 0;
+ }
+
+ /* Get info */
+ ret = pktproc_get_info(ppa, np);
+ if (ret != 0) {
+ mif_err("pktproc_get_dt() error %d\n", ret);
+ return ret;
+ }
+
+ if (!ppa->use_hw_iocc && ppa->info_rgn_cached) {
+ mif_err("cannot support sw iocc based caching on info region\n");
+ return -EINVAL;
+ }
+
+ if (!ppa->use_hw_iocc && ppa->desc_rgn_cached) {
+ mif_err("cannot support sw iocc based caching on desc region\n");
+ return -EINVAL;
+ }
+
+ /* Get base addr */
+ mif_info("memaddr:0x%lx memsize:0x%08x\n", memaddr, memsize);
+
+ if (ppa->info_rgn_cached)
+ ppa->info_vbase = phys_to_virt(memaddr + ppa->info_rgn_offset);
+ else {
+ ppa->info_vbase = cp_shmem_get_nc_region(memaddr + ppa->info_rgn_offset,
+ ppa->info_rgn_size);
+ if (!ppa->info_vbase) {
+ mif_err("ppa->info_base error\n");
+ return -ENOMEM;
+ }
+ }
+
+ if (ppa->desc_rgn_cached)
+ ppa->desc_vbase = phys_to_virt(memaddr + ppa->desc_rgn_offset);
+ else {
+ ppa->desc_vbase = cp_shmem_get_nc_region(memaddr + ppa->desc_rgn_offset,
+ ppa->desc_rgn_size);
+ if (!ppa->desc_vbase) {
+ mif_err("ppa->desc_base error\n");
+ return -ENOMEM;
+ }
+ }
+ memset(ppa->info_vbase, 0, ppa->info_rgn_size);
+ memset(ppa->desc_vbase, 0, ppa->desc_rgn_size);
+ mif_info("info + desc size:0x%08x\n", ppa->info_rgn_size + ppa->desc_rgn_size);
+
+ if (!ppa->use_netrx_mng) {
+ buff_size_by_q = ppa->buff_rgn_size / ppa->num_queue;
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+ if (exynos_pcie_is_sysmmu_enabled(mc->pcie_ch_num)) {
+ mif_info("Rounded down queue size from 0x%08x to 0x%08x\n",
+ buff_size_by_q, rounddown(buff_size_by_q, SZ_4K));
+ buff_size_by_q = rounddown(buff_size_by_q, SZ_4K);
+ }
+#endif
+ ppa->buff_pbase = memaddr + ppa->buff_rgn_offset;
+ if (ppa->buff_rgn_cached) {
+ ppa->buff_vbase = phys_to_virt(ppa->buff_pbase);
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+ if (exynos_pcie_is_sysmmu_enabled(mc->pcie_ch_num)) {
+ mif_info("release iommu buffer region offset:0x%08x\n",
+ ppa->buff_rgn_offset);
+ cp_shmem_release_rmem(mld->link_dev.mdm_data->cp_num,
+ SHMEM_PKTPROC, ppa->buff_rgn_offset);
+ }
+#endif
+ } else {
+ ppa->buff_vbase = cp_shmem_get_nc_region(ppa->buff_pbase,
+ ppa->buff_rgn_size);
+ }
+ mif_info("Total buff buffer size:0x%08x Queue:%d Size by queue:0x%08x\n",
+ ppa->buff_rgn_size, ppa->num_queue, buff_size_by_q);
+ } else
+ accum_buff_size = 0;
+
+ /* Create queue */
+ for (i = 0; i < ppa->num_queue; i++) {
+ struct pktproc_queue *q;
+
+ mif_info("Create queue %d\n", i);
+
+ ppa->q[i] = kzalloc(sizeof(*ppa->q[i]), GFP_ATOMIC);
+ if (ppa->q[i] == NULL) {
+ ret = -ENOMEM;
+ goto create_error;
+ }
+ q = ppa->q[i];
+ q->ppa = ppa;
+
+ atomic_set(&q->active, 0);
+
+ /* Info region */
+ switch (ppa->version) {
+ case PKTPROC_V1:
+ q->info_v1 = (struct pktproc_info_v1 *)ppa->info_vbase;
+ q->q_info_ptr = &q->info_v1->q_info;
+ break;
+ case PKTPROC_V2:
+ q->info_v2 = (struct pktproc_info_v2 *)ppa->info_vbase;
+ q->info_v2->num_queues = ppa->num_queue;
+ q->info_v2->desc_mode = ppa->desc_mode;
+ q->info_v2->irq_mode = ppa->use_exclusive_irq;
+ q->info_v2->max_packet_size = ppa->max_packet_size;
+ q->q_info_ptr = &q->info_v2->q_info[i];
+ break;
+ default:
+ mif_err("Unsupported version:%d\n", ppa->version);
+ ret = -EINVAL;
+ goto create_error;
+ }
+
+ /* Descriptor, data buffer region */
+ switch (ppa->desc_mode) {
+ case DESC_MODE_RINGBUF:
+ q->q_buff_pbase = ppa->buff_pbase + (i * buff_size_by_q);
+ q->q_buff_vbase = ppa->buff_vbase + (i * buff_size_by_q);
+ q->cp_buff_pbase = ppa->cp_base + ppa->buff_rgn_offset +
+ (i * buff_size_by_q);
+ if (mld->pktproc_use_36bit_addr)
+ q->q_info_ptr->cp_buff_pbase = q->cp_buff_pbase >> 4;
+ else
+ q->q_info_ptr->cp_buff_pbase = q->cp_buff_pbase;
+ q->q_buff_size = buff_size_by_q;
+#if !IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOCC)
+ if (ppa->buff_rgn_cached && !ppa->use_hw_iocc)
+ dma_sync_single_for_device(ppa->dev,
+ q->q_buff_pbase, q->q_buff_size, DMA_FROM_DEVICE);
+#endif
+ q->num_desc = buff_size_by_q / ppa->true_packet_size;
+ q->q_info_ptr->num_desc = q->num_desc;
+
+ q->desc_ringbuf = ppa->desc_vbase +
+ (i * sizeof(struct pktproc_desc_ringbuf) *
+ q->num_desc);
+ q->cp_desc_pbase = ppa->cp_base + ppa->desc_rgn_offset +
+ (i * sizeof(struct pktproc_desc_ringbuf) *
+ q->num_desc);
+ if (mld->pktproc_use_36bit_addr)
+ q->q_info_ptr->cp_desc_pbase = q->cp_desc_pbase >> 4;
+ else
+ q->q_info_ptr->cp_desc_pbase = q->cp_desc_pbase;
+ q->desc_size = sizeof(struct pktproc_desc_ringbuf) * q->num_desc;
+
+ q->get_packet = pktproc_get_pkt_from_ringbuf_mode;
+ q->irq_handler = pktproc_irq_handler;
+ break;
+ case DESC_MODE_SKTBUF:
+ if (ppa->use_netrx_mng) {
+ q->num_desc = ppa->netrx_capacity;
+ q->alloc_rx_buf = pktproc_fill_data_addr;
+ q->clear_data_addr = pktproc_clear_data_addr;
+ q->cp_buff_pbase = ppa->cp_base + ppa->buff_rgn_offset
+ + accum_buff_size;
+
+ } else {
+ q->q_buff_pbase = ppa->buff_pbase + (i * buff_size_by_q);
+ q->q_buff_vbase = ppa->buff_vbase + (i * buff_size_by_q);
+ q->cp_buff_pbase = ppa->cp_base + ppa->buff_rgn_offset +
+ (i * buff_size_by_q);
+ q->q_buff_size = buff_size_by_q;
+ q->num_desc = buff_size_by_q / ppa->true_packet_size;
+ q->alloc_rx_buf = pktproc_fill_data_addr_without_bm;
+ q->clear_data_addr = pktproc_clear_data_addr_without_bm;
+ }
+
+ if (mld->pktproc_use_36bit_addr)
+ q->q_info_ptr->cp_buff_pbase = q->cp_buff_pbase >> 4;
+ else
+ q->q_info_ptr->cp_buff_pbase = q->cp_buff_pbase;
+
+ q->q_info_ptr->num_desc = q->num_desc;
+
+ q->desc_sktbuf = ppa->desc_vbase +
+ (i * sizeof(struct pktproc_desc_sktbuf) *
+ q->num_desc);
+ q->cp_desc_pbase = ppa->cp_base + ppa->desc_rgn_offset +
+ (i * sizeof(struct pktproc_desc_sktbuf) *
+ q->num_desc);
+ if (mld->pktproc_use_36bit_addr)
+ q->q_info_ptr->cp_desc_pbase = q->cp_desc_pbase >> 4;
+ else
+ q->q_info_ptr->cp_desc_pbase = q->cp_desc_pbase;
+
+ mif_info("cp_desc_pbase - 36bit addr: 0x%08llx, 32bit addr: 0x%08x\n",
+ q->cp_desc_pbase, q->q_info_ptr->cp_desc_pbase);
+
+ q->desc_size = sizeof(struct pktproc_desc_sktbuf) * q->num_desc;
+
+ alloc_size = sizeof(dma_addr_t) * q->num_desc;
+ q->dma_addr = kzalloc(alloc_size, GFP_KERNEL);
+ if (!q->dma_addr) {
+ mif_err("kzalloc() dma_addr failed\n");
+ ret = -ENOMEM;
+ goto create_error;
+ }
+
+ if (ppa->use_netrx_mng) {
+ /* to make phys_to_virt macro operable */
+ u64 ap_desc_pbase = memaddr + ppa->desc_rgn_offset +
+ (i * sizeof(struct pktproc_desc_sktbuf)
+ * q->num_desc);
+ mif_info("create buffer manager\n");
+ ret = pktproc_create_buffer_manager(q, ap_desc_pbase);
+ if (ret < 0) {
+ mif_err("failed to create netrx mng:%d\n", ret);
+ goto create_error;
+ }
+ accum_buff_size += q->manager->total_buf_size;
+ }
+
+ q->get_packet = pktproc_get_pkt_from_sktbuf_mode;
+ q->irq_handler = pktproc_irq_handler;
+ q->update_fore_ptr = pktproc_update_fore_ptr;
+ break;
+ default:
+ mif_err("Unsupported version:%d\n", ppa->version);
+ ret = -EINVAL;
+ goto create_error;
+ }
+
+ if ((!q->manager) &&
+ (q->cp_desc_pbase + q->desc_size) > q->cp_buff_pbase) {
+ mif_err("Descriptor overflow:0x%08llx 0x%08x 0x%08llx\n",
+ q->cp_desc_pbase, q->desc_size, q->cp_buff_pbase);
+ ret = -EINVAL;
+ goto create_error;
+ }
+
+ spin_lock_init(&q->lock);
+
+ q->clean_rx_ring = pktproc_clean_rx_ring;
+
+ q->q_idx = i;
+ q->mld = mld;
+
+ /* NAPI */
+ if (ppa->use_exclusive_irq) {
+ init_dummy_netdev(&q->netdev);
+ netif_napi_add_weight(&q->netdev, &q->napi, pktproc_poll, NAPI_POLL_WEIGHT);
+ napi_enable(&q->napi);
+ q->napi_ptr = &q->napi;
+ } else {
+ q->napi_ptr = &q->mld->mld_napi;
+ }
+
+ /* IRQ handler */
+ q->enable_irq = pktproc_enable_irq;
+ q->disable_irq = pktproc_disable_irq;
+ if (ppa->use_exclusive_irq) {
+#if IS_ENABLED(CONFIG_MCU_IPC)
+ q->irq_idx = ppa->exclusive_irq_idx[q->q_idx];
+ ret = cp_mbox_register_handler(q->irq_idx,
+ mld->irq_cp2ap_msg, q->irq_handler, q);
+ if (ret) {
+ mif_err("cp_mbox_register_handler() error:%d\n", ret);
+ goto create_error;
+ }
+#endif
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE)
+ /* Set by request_pcie_msi_int() */
+#endif
+ }
+
+ q->q_info_ptr->fore_ptr = 0;
+ q->q_info_ptr->rear_ptr = 0;
+
+ q->fore_ptr = &q->q_info_ptr->fore_ptr;
+ q->rear_ptr = &q->q_info_ptr->rear_ptr;
+ q->done_ptr = *q->rear_ptr;
+
+ mif_info("num_desc:%d cp_desc_pbase:0x%08llx desc_size:0x%08x\n",
+ q->num_desc, q->cp_desc_pbase, q->desc_size);
+ if (!q->manager)
+ mif_info("cp_buff_pbase:0x%08llx buff_size:0x%08x\n",
+ q->cp_buff_pbase, q->q_buff_size);
+
+#if IS_ENABLED(CONFIG_EXYNOS_DIT)
+ if (q->q_idx == 0) {
+ ret = dit_set_pktproc_queue_num(DIT_DIR_RX, q->q_idx);
+ if (ret)
+ mif_err("dit_set_buf_size() error:%d\n", ret);
+
+ ret = dit_set_buf_size(DIT_DIR_RX, ppa->max_packet_size);
+ if (ret)
+ mif_err("dit_set_buf_size() error:%d\n", ret);
+
+ ret = dit_set_desc_ring_len(DIT_DIR_RX, q->num_desc - 1);
+ if (ret)
+ mif_err("dit_set_desc_ring_len() error:%d\n", ret);
+ }
+#endif
+ }
+
+ /* Debug */
+ ret = sysfs_create_group(&pdev->dev.kobj, &pktproc_group);
+ if (ret != 0) {
+ mif_err("sysfs_create_group() error %d\n", ret);
+ goto create_error;
+ }
+
+ return 0;
+
+create_error:
+ for (i = 0; i < ppa->num_queue; i++) {
+ if (!ppa->q[i])
+ continue;
+
+ if (ppa->q[i]->manager)
+ cpif_exit_netrx_mng(ppa->q[i]->manager);
+
+ kfree(ppa->q[i]->dma_addr);
+ kfree(ppa->q[i]);
+ }
+
+ if (!ppa->info_rgn_cached && ppa->info_vbase)
+ vunmap(ppa->info_vbase);
+ if (!ppa->desc_rgn_cached && ppa->desc_vbase)
+ vunmap(ppa->desc_vbase);
+ if (!ppa->buff_rgn_cached && ppa->buff_vbase)
+ vunmap(ppa->buff_vbase);
+
+ return ret;
+}
diff --git a/link_rx_pktproc.h b/link_rx_pktproc.h
new file mode 100644
index 0000000..0add65b
--- /dev/null
+++ b/link_rx_pktproc.h
@@ -0,0 +1,359 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2019, Samsung Electronics.
+ *
+ */
+
+#ifndef __LINK_RX_PKTPROC_H__
+#define __LINK_RX_PKTPROC_H__
+
+#include "cpif_netrx_mng.h"
+
+/* Debug */
+/* #define PKTPROC_DEBUG */
+/* #define PKTPROC_DEBUG_PKT */
+#ifdef PKTPROC_DEBUG
+#define pp_debug(fmt, ...) mif_info(fmt, ##__VA_ARGS__)
+#else
+#define pp_debug(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
+#endif
+
+/* Numbers */
+#define PKTPROC_MAX_QUEUE 4
+
+/* Status bit field */
+#define PKTPROC_STATUS_DONE 0x01
+#define PKTPROC_STATUS_TCPCF 0x04
+#define PKTPROC_STATUS_IPCSF 0x08
+#define PKTPROC_STATUS_IGNR 0x10
+#define PKTPROC_STATUS_TCPC 0x20
+#define PKTPROC_STATUS_IPCS 0x40
+#define PKTPROC_STATUS_PFD 0x80
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC_LRO)
+/* LRO bit field */
+#define LRO_LAST_SEG 0x01
+#define LRO_MID_SEG 0x02
+#define LRO_FIRST_SEG 0x04
+#define LRO_PACKET 0x08
+#define LRO_MODE_ON 0x10
+
+/* W/A padding for LRO packet (except first) TCP (60 bytes) + IPv6 (40 bytes) = 100 */
+#define SKB_FRONT_PADDING (NET_SKB_PAD + NET_IP_ALIGN + SKB_DATA_ALIGN(100))
+#else
+#define SKB_FRONT_PADDING (NET_SKB_PAD + NET_IP_ALIGN)
+#endif
+
+/*
+ * PktProc info region
+ */
+/* Queue info */
+struct pktproc_q_info {
+ u32 cp_desc_pbase;
+ u32 num_desc;
+ u32 cp_buff_pbase;
+ u32 fore_ptr;
+ u32 rear_ptr;
+} __packed;
+
+/* Info for V1 */
+struct pktproc_info_v1 {
+ struct pktproc_q_info q_info;
+} __packed;
+
+/* Info for V2 */
+struct pktproc_info_v2 {
+ u32 num_queues:4,
+ desc_mode:2,
+ irq_mode:2,
+ max_packet_size:16,
+ reserved:8;
+ struct pktproc_q_info q_info[PKTPROC_MAX_QUEUE];
+} __packed;
+
+/*
+ * PktProc descriptor region
+ */
+/* RingBuf mode */
+struct pktproc_desc_ringbuf {
+ u32 cp_data_paddr;
+ u32 information;
+ u32 reserve1;
+ u16 reserve2;
+ u16 filter_result;
+ u16 length;
+ u8 channel_id;
+ u8 reserve3;
+ u8 status;
+ u8 reserve4;
+ u16 reserve5;
+} __packed;
+
+/* SktBuf mode */
+struct pktproc_desc_sktbuf {
+ u64 cp_data_paddr:36,
+ reserved0:4,
+ control:8,
+ status:8,
+ lro:5,
+ clat:2,
+ reserved1:1;
+ u16 length;
+ u16 filter_result;
+ u16 information;
+ u8 channel_id;
+ u8 reserved2:4,
+ itg:2,
+ reserved3:2;
+} __packed;
+
+/* Statistics */
+struct pktproc_statistics {
+ u64 pass_cnt;
+ u64 lro_cnt;
+ u64 err_len;
+ u64 err_chid;
+ u64 err_addr;
+ u64 err_nomem;
+ u64 err_bm_nomem;
+ u64 err_csum;
+ u64 err_enqueue_dit;
+};
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+struct cpif_pcie_iommu_ctrl {
+ struct page_frag_cache pf_cache;
+ u32 pf_offset;
+ u32 curr_fore;
+
+ /* Will */
+ unsigned long map_src_pa;
+ void *map_page_va;
+ u32 map_idx;
+
+ unsigned long unmap_src_pa;
+ u32 unmap_page_size;
+
+ /* Was */
+ u32 end_map_size;
+
+ /* These elements must be at the end */
+ void **pf_buf;
+ /* Debug */
+ u32 mapped_cnt;
+ u64 mapped_size;
+};
+#endif
+
+/* Logical view for each queue */
+struct pktproc_queue {
+ u32 q_idx;
+ atomic_t active;
+ spinlock_t lock;
+
+ struct mem_link_device *mld;
+ struct pktproc_adaptor *ppa;
+
+ /* Pointer to fore_ptr of q_info. Increased AP when desc_mode is ringbuf mode */
+ u32 *fore_ptr;
+ /* Pointer to rear_ptr of q_info. Increased CP when desc_mode is sktbuf mode */
+ u32 *rear_ptr;
+ /* Follow rear_ptr when desc_mode is sktbuf mode */
+ u32 done_ptr;
+
+ /* Store */
+ u64 cp_desc_pbase;
+ u32 num_desc;
+ u64 cp_buff_pbase;
+
+ /* Pointer to info region by version */
+ union {
+ struct {
+ struct pktproc_info_v1 *info_v1;
+ };
+ struct {
+ struct pktproc_info_v2 *info_v2;
+ };
+ };
+ struct pktproc_q_info *q_info_ptr; /* Pointer to q_info of info_v */
+
+ /* Pointer to desc region by addr mode */
+ union {
+ struct {
+ struct pktproc_desc_ringbuf *desc_ringbuf; /* RingBuf mode */
+ };
+ struct {
+ struct pktproc_desc_sktbuf *desc_sktbuf; /* SktBuf mode */
+ };
+ };
+ u32 desc_size;
+
+ /* Pointer to data buffer for a queue */
+ u8 __iomem *q_buff_vbase;
+ unsigned long q_buff_pbase;
+ u32 q_buff_size;
+
+ /* CP interface network rx manager */
+ struct cpif_netrx_mng *manager; /* Pointer to rx manager */
+ dma_addr_t *dma_addr;
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+ struct cpif_pcie_iommu_ctrl ioc;
+#endif
+
+ /* IRQ */
+ int irq;
+#if IS_ENABLED(CONFIG_MCU_IPC)
+ u32 irq_idx;
+#endif
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE)
+ bool msi_irq_wake;
+#endif
+
+ /* NAPI */
+ struct net_device netdev;
+ struct napi_struct napi;
+ struct napi_struct *napi_ptr;
+
+ /* Statistics */
+ struct pktproc_statistics stat;
+
+ /* Func */
+ irqreturn_t (*irq_handler)(int irq, void *arg);
+ void (*enable_irq)(struct pktproc_queue *q);
+ void (*disable_irq)(struct pktproc_queue *q);
+ int (*get_packet)(struct pktproc_queue *q, struct sk_buff **new_skb);
+ int (*clean_rx_ring)(struct pktproc_queue *q, int budget, int *work_done);
+ int (*alloc_rx_buf)(struct pktproc_queue *q);
+ int (*update_fore_ptr)(struct pktproc_queue *q, u32 count);
+ int (*clear_data_addr)(struct pktproc_queue *q);
+};
+
+/*
+ * Descriptor structure mode
+ * 0: RingBuf mode. Destination data address is decided by CP
+ * 1: SktBuf mode. Destination data address is decided by AP
+ */
+enum pktproc_desc_mode {
+ DESC_MODE_RINGBUF,
+ DESC_MODE_SKTBUF,
+ MAX_DESC_MODE
+};
+
+/*
+ * PktProc version
+ * 1: Single queue, ringbuf desc mode
+ * 2: Multi queue, ringbuf/sktbuf desc mode
+ */
+enum pktproc_version {
+ PKTPROC_V1 = 1,
+ PKTPROC_V2,
+ MAX_VERSION
+};
+
+enum pktproc_perftest_mode {
+ PERFTEST_MODE_STOP,
+ PERFTEST_MODE_IPV4,
+ PERFTEST_MODE_CLAT,
+ PERFTEST_MODE_IPV6,
+ PERFTEST_MODE_MAX
+};
+
+struct pktproc_perftest {
+ bool test_run;
+ enum pktproc_perftest_mode mode;
+ u16 session;
+ u16 ch;
+ u16 cpu;
+ u16 ipi_cpu[PKTPROC_MAX_QUEUE];
+ u16 udelay;
+ u32 seq_counter[PKTPROC_MAX_QUEUE];
+ u16 clat_ipv6[8];
+};
+
+struct pktproc_perftest_data {
+ u8 header[48];
+ u32 header_len;
+ u16 dst_port_offset;
+ u16 packet_len;
+};
+
+/* PktProc adaptor */
+struct pktproc_adaptor {
+ bool support; /* Is support PktProc feature? */
+ enum pktproc_version version; /* Version */
+
+ u64 cp_base; /* CP base address for pktproc */
+ u32 info_rgn_offset; /* Offset of info region */
+ u32 info_rgn_size; /* Size of info region */
+ u32 desc_rgn_offset; /* Offset of descriptor region */
+ u32 desc_rgn_size; /* Size of descriptor region */
+ u32 buff_rgn_offset; /* Offset of data buffer region */
+ u32 buff_rgn_size; /* Size of data buffer region */
+
+ bool info_rgn_cached;
+ bool desc_rgn_cached;
+ bool buff_rgn_cached;
+
+ enum pktproc_desc_mode desc_mode; /* Descriptor structure mode */
+ u32 desc_num_ratio_percent; /* Number of descriptors ratio as percent */
+ u32 num_queue; /* Number of queue */
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+ u32 space_margin;
+#endif
+ bool use_exclusive_irq; /* Exclusive interrupt */
+#if IS_ENABLED(CONFIG_MCU_IPC)
+ u32 exclusive_irq_idx[PKTPROC_MAX_QUEUE];
+#endif
+ bool use_hw_iocc; /* H/W IO cache coherency */
+ u32 max_packet_size; /* Max packet size CP sees */
+ u32 true_packet_size; /* True packet size AP allocated */
+ bool use_dedicated_baaw; /* BAAW for 36bit address */
+
+ struct device *dev;
+
+ bool use_netrx_mng;
+ u32 netrx_capacity;
+ u32 skb_padding_size;
+
+ void __iomem *info_vbase; /* I/O region for information */
+ void __iomem *desc_vbase; /* I/O region for descriptor */
+ void __iomem *buff_vbase; /* I/O region for data buffer */
+ unsigned long buff_pbase;
+ struct pktproc_queue *q[PKTPROC_MAX_QUEUE]; /* Logical queue */
+
+ /* Debug */
+ struct pktproc_perftest perftest;
+ bool pktgen_gro;
+};
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC)
+extern int pktproc_create(struct platform_device *pdev, struct mem_link_device *mld,
+ unsigned long memaddr, u32 memsize);
+extern int pktproc_init(struct pktproc_adaptor *ppa);
+extern int pktproc_get_usage(struct pktproc_queue *q);
+extern int pktproc_get_usage_fore_rear(struct pktproc_queue *q);
+
+static inline int pktproc_check_support(struct pktproc_adaptor *ppa)
+{
+ return ppa->support;
+}
+
+static inline int pktproc_check_active(struct pktproc_adaptor *ppa, u32 q_idx)
+{
+ if (!ppa->q[q_idx])
+ return 0;
+
+ return atomic_read(&ppa->q[q_idx]->active);
+}
+#else
+static inline int pktproc_create(struct platform_device *pdev, struct mem_link_device *mld,
+ unsigned long memaddr, u32 memsize) { return 0; }
+static inline int pktproc_init(struct pktproc_adaptor *ppa) { return 0; }
+static inline int pktproc_get_usage(struct pktproc_queue *q) { return 0; }
+static inline int pktproc_get_usage_fore_rear(struct pktproc_queue *q) { return 0; }
+static inline int pktproc_check_support(struct pktproc_adaptor *ppa) { return 0; }
+static inline int pktproc_check_active(struct pktproc_adaptor *ppa, u32 q_idx) { return 0; }
+#endif
+
+#endif /* __LINK_RX_PKTPROC_H__ */
diff --git a/link_tx_pktproc.c b/link_tx_pktproc.c
new file mode 100644
index 0000000..d3d4992
--- /dev/null
+++ b/link_tx_pktproc.c
@@ -0,0 +1,642 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2021, Samsung Electronics.
+ *
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/shm_ipc.h>
+#include "modem_prj.h"
+#include "modem_utils.h"
+#include "link_device_memory.h"
+#if IS_ENABLED(CONFIG_EXYNOS_DIT)
+#include "dit.h"
+#endif
+
+static int pktproc_send_pkt_to_cp(struct pktproc_queue_ul *q, struct sk_buff *skb)
+{
+ struct pktproc_q_info_ul *q_info = q->q_info;
+ struct pktproc_desc_ul *desc;
+ void *target_addr;
+ int len = skb->len;
+#if IS_ENABLED(CONFIG_EXYNOS_DIT)
+ bool use_dit;
+#endif
+ u32 space;
+
+ q->stat.total_cnt++;
+ if (!pktproc_check_ul_q_active(q->ppa_ul, q->q_idx)) {
+ mif_err_limited("Queue[%d] not activated\n", q->q_idx);
+ q->stat.inactive_cnt++;
+ return -EACCES;
+ }
+
+ /* avoid race with the done_ptr disordering */
+ smp_rmb();
+
+ space = circ_get_space(q->num_desc, q->done_ptr, q_info->rear_ptr);
+ if (space < 1) {
+ mif_err_limited("NOSPC Queue[%d] num_desc:%d fore:%d done:%d rear:%d\n",
+ q->q_idx, q->num_desc, q_info->fore_ptr, q->done_ptr,
+ q_info->rear_ptr);
+ q->stat.buff_full_cnt++;
+ return -ENOSPC;
+ }
+
+#if IS_ENABLED(CONFIG_EXYNOS_DIT)
+ use_dit = dit_check_dir_use_queue(DIT_DIR_TX, q->q_idx);
+ if (!use_dit)
+#endif
+ {
+ target_addr = (void *)(q->q_buff_vbase +
+ (q->done_ptr * q->max_packet_size));
+ skb_copy_from_linear_data(skb, target_addr, skb->len);
+ }
+
+ desc = &q->desc_ul[q->done_ptr];
+ desc->sktbuf_point = q->buff_addr_cp + (q->done_ptr * q->max_packet_size);
+
+ desc->data_size = skb->len;
+ if (q->ppa_ul->padding_required)
+ desc->data_size += CP_PADDING;
+ desc->total_pkt_size = desc->data_size;
+ desc->last_desc = 0;
+ desc->seg_on = 0;
+ desc->hw_set = 0;
+ desc->lcid = skbpriv(skb)->sipc_ch;
+
+ barrier();
+
+#if IS_ENABLED(CONFIG_EXYNOS_DIT)
+ if (use_dit) {
+ int ret;
+ /* skb may not be valid after dit_enqueue is done */
+ ret = dit_enqueue_src_desc_ring_skb(DIT_DIR_TX, skb);
+ if (ret < 0) {
+ mif_err_limited("Enqueue failed Queue[%d] done:%d ret:%d\n",
+ q->q_idx, q->done_ptr, ret);
+ q->stat.buff_full_cnt++;
+ return ret;
+ }
+ }
+#endif
+
+ q->done_ptr = circ_new_ptr(q->num_desc, q->done_ptr, 1);
+
+ /* ensure the done_ptr ordering */
+ smp_mb();
+
+ return len;
+}
+
+static int pktproc_set_end(struct pktproc_queue_ul *q, unsigned int desc_index,
+ unsigned int prev_offset)
+{
+ struct pktproc_q_info_ul *q_info = q->q_info;
+ struct pktproc_desc_ul *prev_desc;
+ unsigned int prev_index;
+
+ if (unlikely(desc_index >= q->num_desc))
+ return -EINVAL;
+
+ if (unlikely(circ_empty(q->done_ptr, q_info->rear_ptr)))
+ return -EAGAIN;
+
+ if (desc_index == q->q_info->fore_ptr)
+ return -ERANGE;
+
+ prev_index = circ_prev_ptr(q->num_desc, desc_index, prev_offset);
+
+ prev_desc = &q->desc_ul[prev_index];
+ prev_desc->last_desc = 1;
+
+ q->stat.pass_cnt++;
+
+ return 0;
+}
+
+static int pktproc_ul_update_fore_ptr(struct pktproc_queue_ul *q, u32 count)
+{
+ u32 offset = q->ppa_ul->cp_quota;
+ unsigned int last_ptr;
+ unsigned int fore_ptr;
+ unsigned int i;
+ int ret;
+
+ last_ptr = q->q_info->fore_ptr;
+ fore_ptr = circ_new_ptr(q->num_desc, last_ptr, count);
+
+ if (q->ppa_ul->end_bit_owner == END_BIT_CP)
+ goto set_fore;
+
+ if (count < offset)
+ goto set_last;
+
+ for (i = 0; i < count - offset; i += offset) {
+ last_ptr = circ_new_ptr(q->num_desc, last_ptr, offset);
+ ret = pktproc_set_end(q, last_ptr, 1);
+ if (ret) {
+ mif_err_limited("set end failed. q_idx:%d, ret:%d\n", q->q_idx, ret);
+ goto error;
+ }
+ }
+
+set_last:
+ ret = pktproc_set_end(q, fore_ptr, 1);
+ if (ret) {
+ mif_err_limited("set end failed. q_idx:%d, ret:%d\n", q->q_idx, ret);
+ goto error;
+ }
+
+set_fore:
+ q->q_info->fore_ptr = fore_ptr;
+
+ /* ensure the fore_ptr ordering */
+ smp_mb();
+
+error:
+ return 0;
+}
+
+/*
+ * Debug
+ */
+static ssize_t region_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct modem_ctl *mc = dev_get_drvdata(dev);
+ struct link_device *ld = get_current_link(mc->iod);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ struct pktproc_adaptor_ul *ppa_ul = &mld->pktproc_ul;
+ struct pktproc_info_ul *info_ul =
+ (struct pktproc_info_ul *)ppa_ul->info_vbase;
+
+ ssize_t count = 0;
+ int i;
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "CP base:0x%08llx\n", ppa_ul->cp_base);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "Num of queue:%d\n", ppa_ul->num_queue);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "HW cache coherency:%d\n",
+ ppa_ul->use_hw_iocc);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "\n");
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "End bit owner:%d\n",
+ info_ul->end_bit_owner);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "CP quota:%d\n", info_ul->cp_quota);
+
+ for (i = 0; i < ppa_ul->num_queue; i++) {
+ struct pktproc_queue_ul *q = ppa_ul->q[i];
+
+ if (!pktproc_check_ul_q_active(ppa_ul, q->q_idx)) {
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "Queue %d is not active\n", i);
+ continue;
+ }
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "Queue%d\n", i);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, " num_desc:%d(0x%08x)\n",
+ q->q_info->num_desc, q->q_info->num_desc);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, " cp_desc_pbase:0x%08x\n",
+ q->q_info->cp_desc_pbase);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, " desc_size:0x%08x\n",
+ q->desc_size);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, " cp_buff_pbase:0x%08x\n",
+ q->q_info->cp_buff_pbase);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, " q_buff_size:0x%08x\n",
+ q->q_buff_size);
+ }
+
+ return count;
+}
+
+static ssize_t status_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct modem_ctl *mc = dev_get_drvdata(dev);
+ struct link_device *ld = get_current_link(mc->iod);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ struct pktproc_adaptor_ul *ppa_ul = &mld->pktproc_ul;
+ ssize_t count = 0;
+ int i;
+
+ for (i = 0; i < ppa_ul->num_queue; i++) {
+ struct pktproc_queue_ul *q = ppa_ul->q[i];
+
+ if (!pktproc_check_ul_q_active(ppa_ul, q->q_idx)) {
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "Queue %d is not active\n", i);
+ continue;
+ }
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "Queue%d\n", i);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, " num_desc:%d\n",
+ q->num_desc);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, " fore/rear:%d/%d\n",
+ q->q_info->fore_ptr, q->q_info->rear_ptr);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, " pass:%lld\n",
+ q->stat.pass_cnt);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ " fail: buff_full:%lld inactive:%lld\n",
+ q->stat.buff_full_cnt, q->stat.inactive_cnt);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, " total:%lld\n",
+ q->stat.total_cnt);
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_RO(region);
+static DEVICE_ATTR_RO(status);
+
+static struct attribute *pktproc_ul_attrs[] = {
+ &dev_attr_region.attr,
+ &dev_attr_status.attr,
+ NULL,
+};
+
+static const struct attribute_group pktproc_ul_group = {
+ .attrs = pktproc_ul_attrs,
+ .name = "pktproc_ul",
+};
+
+/*
+ * Initialize PktProc
+ */
+int pktproc_init_ul(struct pktproc_adaptor_ul *ppa_ul)
+{
+ int i;
+ struct mem_link_device *mld;
+ struct pktproc_info_ul *info;
+
+ mld = container_of(ppa_ul, struct mem_link_device, pktproc_ul);
+
+ if (!ppa_ul) {
+ mif_err("ppa_ul is null\n");
+ return -EPERM;
+ }
+
+ info = (struct pktproc_info_ul *)ppa_ul->info_vbase;
+
+ if (info->end_bit_owner == END_BIT_AP && info->cp_quota <= 0) {
+ mif_err("invalid cp quota: %d\n", info->cp_quota);
+ return -EINVAL;
+ }
+
+ ppa_ul->cp_quota = info->cp_quota;
+ ppa_ul->end_bit_owner = info->end_bit_owner;
+ mif_info("CP quota set to %d\n", ppa_ul->cp_quota);
+
+ if (!pktproc_check_support_ul(ppa_ul))
+ return 0;
+
+
+ for (i = 0; i < ppa_ul->num_queue; i++) {
+ struct pktproc_queue_ul *q = ppa_ul->q[i];
+
+ mif_info("PKTPROC UL Q%d\n", i);
+
+ *q->fore_ptr = 0; /* sets q_info->fore_ptr to 0 */
+ q->done_ptr = 0;
+ *q->rear_ptr = 0; /* sets q_info->rear_ptr to 0 */
+
+ if (mld->pktproc_use_36bit_addr) {
+ q->q_info->cp_desc_pbase = q->cp_desc_pbase >> 4;
+ q->q_info->cp_buff_pbase = q->cp_buff_pbase >> 4;
+ } else {
+ q->q_info->cp_desc_pbase = q->cp_desc_pbase;
+ q->q_info->cp_buff_pbase = q->cp_buff_pbase;
+ }
+
+ q->q_info->num_desc = q->num_desc;
+
+#if IS_ENABLED(CONFIG_EXYNOS_DIT)
+ if (dit_check_dir_use_queue(DIT_DIR_TX, q->q_idx))
+ dit_reset_dst_wp_rp(DIT_DIR_TX);
+#endif
+
+ memset(&q->stat, 0, sizeof(struct pktproc_statistics_ul));
+
+ atomic_set(&q->active, 1);
+ atomic_set(&q->busy, 0);
+ mif_info("num_desc:0x%08x cp_desc_pbase:0x%08llx cp_buff_pbase:0x%08llx\n",
+ q->num_desc, q->cp_desc_pbase, q->cp_buff_pbase);
+ mif_info("fore:%d rear:%d\n",
+ q->q_info->fore_ptr, q->q_info->rear_ptr);
+ }
+
+ return 0;
+}
+
+/*
+ * Create PktProc
+ */
+static int pktproc_get_info_ul(struct pktproc_adaptor_ul *ppa_ul,
+ struct device_node *np)
+{
+ mif_dt_read_u64(np, "pktproc_cp_base", ppa_ul->cp_base);
+ mif_dt_read_u32(np, "pktproc_ul_num_queue", ppa_ul->num_queue);
+ if (ppa_ul->num_queue == 1 && !IS_ENABLED(CONFIG_CP_PKTPROC_UL_SINGLE_QUEUE)) {
+ mif_err("Need to enable UL single queue config for single UL queue\n");
+ panic("Need to enable UL single queue config for single UL queue.\n");
+ return -EINVAL;
+ }
+ if (ppa_ul->num_queue > PKTPROC_UL_QUEUE_MAX)
+ ppa_ul->num_queue = PKTPROC_UL_QUEUE_MAX;
+ mif_dt_read_u32(np, "pktproc_ul_max_packet_size", ppa_ul->default_max_packet_size);
+ mif_dt_read_u32_noerr(np, "pktproc_ul_hiprio_ack_only", ppa_ul->hiprio_ack_only);
+ mif_dt_read_u32(np, "pktproc_ul_use_hw_iocc", ppa_ul->use_hw_iocc);
+ mif_dt_read_u32(np, "pktproc_ul_info_rgn_cached", ppa_ul->info_rgn_cached);
+ mif_dt_read_u32(np, "pktproc_ul_desc_rgn_cached", ppa_ul->desc_rgn_cached);
+ mif_dt_read_u32(np, "pktproc_ul_buff_rgn_cached", ppa_ul->buff_rgn_cached);
+ mif_dt_read_u32(np, "pktproc_ul_padding_required",
+ ppa_ul->padding_required);
+ mif_info("cp_base:0x%08llx num_queue:%d max_packet_size:%d hiprio_ack_only:%d iocc:%d\n",
+ ppa_ul->cp_base, ppa_ul->num_queue, ppa_ul->default_max_packet_size,
+ ppa_ul->hiprio_ack_only, ppa_ul->use_hw_iocc);
+ mif_info("cached: %d/%d/%d padding_required:%d\n", ppa_ul->info_rgn_cached,
+ ppa_ul->desc_rgn_cached, ppa_ul->buff_rgn_cached,
+ ppa_ul->padding_required);
+
+ mif_dt_read_u32(np, "pktproc_ul_info_rgn_offset",
+ ppa_ul->info_rgn_offset);
+ mif_dt_read_u32(np, "pktproc_ul_info_rgn_size",
+ ppa_ul->info_rgn_size);
+ mif_dt_read_u32(np, "pktproc_ul_desc_rgn_offset",
+ ppa_ul->desc_rgn_offset);
+ mif_dt_read_u32(np, "pktproc_ul_desc_rgn_size",
+ ppa_ul->desc_rgn_size);
+ mif_dt_read_u32(np, "pktproc_ul_buff_rgn_offset",
+ ppa_ul->buff_rgn_offset);
+ mif_dt_read_u32(np, "pktproc_ul_buff_rgn_size",
+ ppa_ul->buff_rgn_size);
+ mif_info("info_rgn 0x%08lx 0x%08lx desc_rgn 0x%08lx 0x%08lx buff_rgn 0x%08lx 0x%08lx\n",
+ ppa_ul->info_rgn_offset, ppa_ul->info_rgn_size, ppa_ul->desc_rgn_offset,
+ ppa_ul->desc_rgn_size, ppa_ul->buff_rgn_offset, ppa_ul->buff_rgn_size);
+
+ return 0;
+}
+
+int pktproc_create_ul(struct platform_device *pdev, struct mem_link_device *mld,
+ unsigned long memaddr, u32 memsize)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct pktproc_adaptor_ul *ppa_ul = &mld->pktproc_ul;
+ struct pktproc_info_ul *ul_info;
+ u32 buff_size, buff_size_by_q;
+ u32 last_q_desc_offset;
+ int i, ret;
+#if IS_ENABLED(CONFIG_EXYNOS_CPIF_IOMMU)
+ u64 temp;
+#endif
+
+ if (!np) {
+ mif_err("of_node is null\n");
+ return -EINVAL;
+ }
+ if (!ppa_ul) {
+ mif_err("ppa_ul is null\n");
+ return -EINVAL;
+ }
+
+ mif_dt_read_u32_noerr(np, "pktproc_ul_support", ppa_ul->support);
+ if (!ppa_ul->support) {
+ mif_info("pktproc_support_ul is 0. Just return\n");
+ return 0;
+ }
+
+ /* Get info */
+ ret = pktproc_get_info_ul(ppa_ul, np);
+ if (ret != 0) {
+ mif_err("pktproc_get_info_ul() error %d\n", ret);
+ return ret;
+ }
+
+#if !IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOCC)
+ if (!ppa_ul->use_hw_iocc && ppa_ul->info_rgn_cached) {
+ mif_err("cannot support sw iocc based caching on info region\n");
+ return -EINVAL;
+ }
+
+ if (!ppa_ul->use_hw_iocc && ppa_ul->desc_rgn_cached) {
+ mif_err("cannot support sw iocc based caching on desc region\n");
+ return -EINVAL;
+ }
+
+ if (!ppa_ul->use_hw_iocc && ppa_ul->buff_rgn_cached) {
+ mif_err("cannot support sw iocc based caching on buff region\n");
+ return -EINVAL;
+ }
+#endif
+
+ /* Get base addr */
+ mif_info("memaddr:0x%lx memsize:0x%08x\n", memaddr, memsize);
+
+ if (ppa_ul->info_rgn_cached) {
+ ppa_ul->info_vbase = phys_to_virt(memaddr + ppa_ul->info_rgn_offset);
+ } else {
+ ppa_ul->info_vbase = cp_shmem_get_nc_region(memaddr +
+ ppa_ul->info_rgn_offset, ppa_ul->info_rgn_size);
+ if (!ppa_ul->info_vbase) {
+ mif_err("ppa->info_vbase error\n");
+ ret = -ENOMEM;
+ goto create_error;
+ }
+ }
+
+#if IS_ENABLED(CONFIG_EXYNOS_CPIF_IOMMU)
+ ppa_ul->desc_map = cpif_vmap_create(ppa_ul->cp_base + ppa_ul->desc_rgn_offset,
+ ppa_ul->desc_rgn_size, ppa_ul->desc_rgn_size);
+ if (unlikely(!ppa_ul->desc_map)) {
+ mif_err("failed to create desc map for pktproc ul\n");
+ ret = -ENOMEM;
+ goto create_error;
+ }
+
+ ppa_ul->buff_map = cpif_vmap_create(ppa_ul->cp_base + ppa_ul->buff_rgn_offset,
+ ppa_ul->buff_rgn_size, ppa_ul->buff_rgn_size);
+ if (unlikely(!ppa_ul->buff_map)) {
+ mif_err("failed to create buff map for pktproc ul\n");
+ cpif_vmap_free(ppa_ul->desc_map);
+ ret = -ENOMEM;
+ goto create_error;
+ }
+
+ temp = cpif_vmap_map_area(ppa_ul->desc_map, 0, 0, memaddr + ppa_ul->desc_rgn_offset);
+ if (temp != ppa_ul->cp_base + ppa_ul->desc_rgn_offset) {
+ cpif_vmap_free(ppa_ul->desc_map);
+ cpif_vmap_free(ppa_ul->buff_map);
+ ret = -EINVAL;
+ goto create_error;
+ }
+
+ temp = cpif_vmap_map_area(ppa_ul->buff_map, 0, 0, memaddr + ppa_ul->buff_rgn_offset);
+ if (temp != ppa_ul->cp_base + ppa_ul->buff_rgn_offset) {
+ cpif_vmap_free(ppa_ul->desc_map);
+ cpif_vmap_free(ppa_ul->buff_map);
+ ret = -EINVAL;
+ goto create_error;
+ }
+#endif
+ if (ppa_ul->desc_rgn_cached) {
+ ppa_ul->desc_vbase = phys_to_virt(memaddr + ppa_ul->desc_rgn_offset);
+ } else {
+ ppa_ul->desc_vbase = cp_shmem_get_nc_region(memaddr +
+ ppa_ul->desc_rgn_offset, ppa_ul->desc_rgn_size);
+ if (!ppa_ul->desc_vbase) {
+ mif_err("ppa->desc_vbase error\n");
+ ret = -ENOMEM;
+ goto create_error;
+ }
+ }
+
+ memset(ppa_ul->info_vbase, 0, ppa_ul->info_rgn_size);
+ memset(ppa_ul->desc_vbase, 0, ppa_ul->desc_rgn_size);
+
+ mif_info("info + desc size:0x%08lx\n",
+ ppa_ul->info_rgn_size + ppa_ul->desc_rgn_size);
+
+ buff_size = ppa_ul->buff_rgn_size;
+ buff_size_by_q = buff_size / ppa_ul->num_queue;
+ if (ppa_ul->buff_rgn_cached) {
+ ppa_ul->buff_vbase = phys_to_virt(memaddr + ppa_ul->buff_rgn_offset);
+ } else {
+ ppa_ul->buff_vbase = cp_shmem_get_nc_region(memaddr +
+ ppa_ul->buff_rgn_offset, buff_size);
+ if (!ppa_ul->buff_vbase) {
+ mif_err("ppa->buff_vbase error\n");
+ ret = -ENOMEM;
+ goto create_error;
+ }
+ }
+
+ mif_info("Total buffer size:0x%08x Queue:%d Size by queue:0x%08x\n",
+ buff_size, ppa_ul->num_queue,
+ buff_size_by_q);
+
+ ul_info = (struct pktproc_info_ul *)ppa_ul->info_vbase;
+ ul_info->num_queues = ppa_ul->num_queue;
+
+ /* Create queue */
+ last_q_desc_offset = 0;
+ for (i = 0; i < ppa_ul->num_queue; i++) {
+ struct pktproc_queue_ul *q;
+
+ mif_info("Queue %d\n", i);
+
+ ppa_ul->q[i] = kzalloc(sizeof(*ppa_ul->q[i]), GFP_ATOMIC);
+ if (ppa_ul->q[i] == NULL) {
+ ret = -ENOMEM;
+ goto create_error;
+ }
+ q = ppa_ul->q[i];
+
+ atomic_set(&q->active, 0);
+
+ /* Info region */
+ q->ul_info = ul_info;
+ q->q_info = &q->ul_info->q_info[i];
+
+ q->q_buff_vbase = ppa_ul->buff_vbase + (i * buff_size_by_q);
+ q->cp_buff_pbase = ppa_ul->cp_base +
+ ppa_ul->buff_rgn_offset + (i * buff_size_by_q);
+
+ if (mld->pktproc_use_36bit_addr)
+ q->q_info->cp_buff_pbase = q->cp_buff_pbase >> 4;
+ else
+ q->q_info->cp_buff_pbase = q->cp_buff_pbase;
+
+ if (ppa_ul->num_queue > 1 && i == PKTPROC_UL_HIPRIO && ppa_ul->hiprio_ack_only) {
+ struct link_device *ld = &mld->link_dev;
+
+ ld->hiprio_ack_only = true;
+ q->max_packet_size = HIPRIO_MAX_PACKET_SIZE;
+ } else {
+ q->max_packet_size = ppa_ul->default_max_packet_size;
+ }
+
+ q->q_buff_size = buff_size_by_q;
+ q->num_desc = buff_size_by_q / q->max_packet_size;
+ q->q_info->num_desc = q->num_desc;
+ q->desc_ul = ppa_ul->desc_vbase + last_q_desc_offset;
+ q->cp_desc_pbase = ppa_ul->cp_base +
+ ppa_ul->desc_rgn_offset + last_q_desc_offset;
+ if (mld->pktproc_use_36bit_addr)
+ q->q_info->cp_desc_pbase = q->cp_desc_pbase >> 4;
+ else
+ q->q_info->cp_desc_pbase = q->cp_desc_pbase;
+ q->desc_size = sizeof(struct pktproc_desc_ul) * q->num_desc;
+ q->buff_addr_cp = ppa_ul->cp_base + ppa_ul->buff_rgn_offset +
+ (i * buff_size_by_q);
+ q->send_packet = pktproc_send_pkt_to_cp;
+ q->update_fore_ptr = pktproc_ul_update_fore_ptr;
+
+ if ((last_q_desc_offset + q->desc_size) > ppa_ul->desc_rgn_size) {
+ mif_err("Descriptor overflow. 0x%08x + 0x%08x > 0x%08lx\n",
+ last_q_desc_offset, q->desc_size, ppa_ul->desc_rgn_size);
+ goto create_error;
+ }
+
+ spin_lock_init(&q->lock);
+
+ q->q_idx = i;
+ q->mld = mld;
+ q->ppa_ul = ppa_ul;
+
+ q->q_info->fore_ptr = 0;
+ q->q_info->rear_ptr = 0;
+
+ q->fore_ptr = &q->q_info->fore_ptr;
+ q->rear_ptr = &q->q_info->rear_ptr;
+ q->done_ptr = *q->fore_ptr;
+
+ last_q_desc_offset += q->desc_size;
+
+ mif_info("num_desc:%d desc_offset:0x%08llx desc_size:0x%08x max_packet_size: %d\n",
+ q->num_desc, q->cp_desc_pbase, q->desc_size, q->max_packet_size);
+ mif_info("buff_offset:0x%08llx buff_size:0x%08x\n",
+ q->cp_buff_pbase, q->q_buff_size);
+
+#if IS_ENABLED(CONFIG_EXYNOS_DIT)
+ if (ppa_ul->num_queue == 1 || q->q_idx == PKTPROC_UL_NORM) {
+ ret = dit_set_pktproc_queue_num(DIT_DIR_TX, q->q_idx);
+ if (ret)
+ mif_err("dit_set_pktproc_queue_num() error:%d\n", ret);
+
+ ret = dit_set_buf_size(DIT_DIR_TX, q->max_packet_size);
+ if (ret)
+ mif_err("dit_set_buf_size() error:%d\n", ret);
+
+ ret = dit_set_pktproc_base(DIT_DIR_TX, memaddr + ppa_ul->buff_rgn_offset +
+ (q->q_idx * buff_size_by_q));
+ if (ret)
+ mif_err("dit_set_pktproc_base() error:%d\n", ret);
+
+ ret = dit_set_desc_ring_len(DIT_DIR_TX, q->num_desc);
+ if (ret)
+ mif_err("dit_set_desc_ring_len() error:%d\n", ret);
+ }
+#endif
+ }
+
+ /* Debug */
+ ret = sysfs_create_group(&pdev->dev.kobj, &pktproc_ul_group);
+ if (ret != 0) {
+ mif_err("sysfs_create_group() error %d\n", ret);
+ goto create_error;
+ }
+
+ return 0;
+
+create_error:
+ for (i = 0; i < ppa_ul->num_queue; i++)
+ kfree(ppa_ul->q[i]);
+
+ if (!ppa_ul->info_rgn_cached && ppa_ul->info_vbase)
+ vunmap(ppa_ul->info_vbase);
+ if (!ppa_ul->desc_rgn_cached && ppa_ul->desc_vbase)
+ vunmap(ppa_ul->desc_vbase);
+ if (!ppa_ul->buff_rgn_cached && ppa_ul->buff_vbase)
+ vunmap(ppa_ul->buff_vbase);
+
+ return ret;
+}
diff --git a/link_tx_pktproc.h b/link_tx_pktproc.h
new file mode 100644
index 0000000..363dcaf
--- /dev/null
+++ b/link_tx_pktproc.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2021, Samsung Electronics.
+ *
+ */
+
+#ifndef __LINK_TX_PKTPROC_H__
+#define __LINK_TX_PKTPROC_H__
+
+#include <linux/skbuff.h>
+#include "link_device_memory.h"
+#if IS_ENABLED(CONFIG_EXYNOS_CPIF_IOMMU)
+#include "cpif_vmapper.h"
+#endif
+
+/* Queue Numbers */
+enum pktproc_ul_queue_t {
+ PKTPROC_UL_HIPRIO = 0,
+ PKTPROC_UL_QUEUE_0 = PKTPROC_UL_HIPRIO,
+ PKTPROC_UL_NORM = 1,
+ PKTPROC_UL_QUEUE_MAX
+};
+
+/*
+ * Descriptor structure mode
+ * 0: End bit is set by AP
+ * 1: End bit is set by CP
+ */
+enum pktproc_end_bit_owner {
+ END_BIT_AP,
+ END_BIT_CP
+};
+
+/* Padding required by CP */
+#define CP_PADDING 76
+#define MAX_UL_PACKET_SIZE 512
+#define HIPRIO_MAX_PACKET_SIZE roundup_pow_of_two(MAX_UL_PACKET_SIZE + CP_PADDING)
+
+/* Q_info */
+struct pktproc_q_info_ul {
+ u32 cp_desc_pbase;
+ u32 num_desc;
+ u32 cp_buff_pbase;
+ u32 fore_ptr;
+ u32 rear_ptr;
+} __packed;
+
+/* info for pktproc UL */
+struct pktproc_info_ul {
+ u32 num_queues:4, mode:4, max_packet_size:16, end_bit_owner:1, reserve1:7;
+ u32 cp_quota:16, reserve2:16;
+ struct pktproc_q_info_ul q_info[PKTPROC_UL_QUEUE_MAX];
+} __packed;
+
+struct pktproc_desc_ul {
+ u32 data_size:20, reserve1:12;
+ u32 total_pkt_size:20, reserve2:12;
+ u64 sktbuf_point:36, reserve3:12, ap2cp_pbp_info:16;
+ u32 last_desc:1, reserve4:31;
+ u32 hw_set:1, seg_on:1, reserve5:2, segment:2, reserve6:2,
+ lcid:8, ap2cp_info_pp:16;
+ u32 reserve7;
+ u32 reserve8;
+} __packed;
+
+/* Statistics */
+struct pktproc_statistics_ul {
+ /* count of trials to write a packet to pktproc UL */
+ u64 total_cnt;
+ /* number of times failed to write a packet due to full buffer */
+ u64 buff_full_cnt;
+ /* number of times failed to write a packet due to inactive q */
+ u64 inactive_cnt;
+ /* number of times succeed to write a packet to pktproc UL */
+ u64 pass_cnt;
+};
+
+/* Logical view for each queue */
+struct pktproc_queue_ul {
+ u32 q_idx;
+ atomic_t active; /* activated when pktproc ul init */
+ atomic_t busy; /* used for flow control */
+ spinlock_t lock;
+
+ struct mem_link_device *mld;
+ struct pktproc_adaptor_ul *ppa_ul;
+
+ u32 *fore_ptr; /* indicates the last last-bit raised desc pointer */
+ u32 done_ptr; /* indicates the last packet written by AP */
+ u32 *rear_ptr; /* indicates the last desc read by CP */
+
+ /* Store */
+ u64 cp_desc_pbase;
+ u32 num_desc;
+ u64 cp_buff_pbase;
+
+ struct pktproc_info_ul *ul_info;
+ struct pktproc_q_info_ul *q_info; /* Pointer to q_info of info_v */
+ struct pktproc_desc_ul *desc_ul;
+
+ u32 desc_size;
+ u64 buff_addr_cp; /* base data address value for cp */
+ u32 max_packet_size;
+
+ /* Pointer to data buffer */
+ u8 __iomem *q_buff_vbase;
+ u32 q_buff_size;
+
+ /* Statistics */
+ struct pktproc_statistics_ul stat;
+
+ /* Func */
+ int (*send_packet)(struct pktproc_queue_ul *q, struct sk_buff *new_skb);
+ int (*update_fore_ptr)(struct pktproc_queue_ul *q, u32 count);
+};
+
+/* PktProc adaptor for UL*/
+struct pktproc_adaptor_ul {
+ bool support; /* Is support PktProc feature? */
+
+ unsigned long long cp_base; /* CP base address for pktproc */
+ unsigned long info_rgn_offset; /* Offset of info region */
+ unsigned long info_rgn_size; /* Size of info region */
+ unsigned long desc_rgn_offset; /* Offset of descriptor region */
+ unsigned long desc_rgn_size; /* Size of descriptor region */
+ unsigned long buff_rgn_offset; /* Offset of data buffer region */
+ unsigned long buff_rgn_size; /* Size of data buffer region */
+
+ u32 num_queue; /* Number of queue */
+ u32 default_max_packet_size; /* packet size pktproc UL can hold */
+ u32 hiprio_ack_only;
+ enum pktproc_end_bit_owner end_bit_owner; /* owner to set end bit. AP:0, CP:1 */
+ u32 cp_quota; /* max number of buffers cp allows us to transfer */
+ bool use_hw_iocc; /* H/W IO cache coherency */
+ bool info_rgn_cached;
+ bool desc_rgn_cached;
+ bool buff_rgn_cached;
+ bool padding_required; /* requires extra length. (s5123 EVT1 only) */
+#if IS_ENABLED(CONFIG_EXYNOS_CPIF_IOMMU)
+ struct cpif_va_mapper *desc_map;
+ struct cpif_va_mapper *buff_map;
+#endif
+ void __iomem *info_vbase; /* I/O region for information */
+ void __iomem *desc_vbase; /* I/O region for descriptor */
+ void __iomem *buff_vbase; /* I/O region for data buffer */
+ struct pktproc_queue_ul *q[PKTPROC_UL_QUEUE_MAX];/* Logical queue */
+};
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC_UL)
+extern int pktproc_create_ul(struct platform_device *pdev,
+ struct mem_link_device *mld, unsigned long memaddr, u32 memsize);
+extern int pktproc_init_ul(struct pktproc_adaptor_ul *ppa_ul);
+static inline int pktproc_check_support_ul(struct pktproc_adaptor_ul *ppa_ul)
+{
+ return ppa_ul->support;
+}
+static inline int pktproc_check_ul_q_active(struct pktproc_adaptor_ul *ppa_ul,
+ u32 q_idx)
+{
+ if (!pktproc_check_support_ul(ppa_ul))
+ return 0;
+
+ if (!ppa_ul->q[q_idx])
+ return 0;
+
+ return atomic_read(&ppa_ul->q[q_idx]->active);
+}
+static inline bool pktproc_ul_q_empty(struct pktproc_q_info_ul *q_info)
+{
+ return (q_info->fore_ptr == q_info->rear_ptr);
+}
+#else
+static inline int pktproc_create_ul(struct platform_device *pdev,
+ struct mem_link_device *mld,
+ unsigned long memaddr, unsigned long memsize) { return 0; }
+static inline int pktproc_init_ul(struct pktproc_adaptor_ul *ppa_ul) { return 0; }
+static inline int pktproc_check_support_ul(struct pktproc_adaptor_ul *ppa_ul)
+{ return 0; }
+static inline int pktproc_check_ul_q_active(struct pktproc_adaptor_ul *ppa_ul,
+ u32 q_idx) { return 0; }
+static inline bool pktproc_ul_q_empty(struct pktproc_q_info_ul *q_info) { return 0; }
+#endif
+
+#endif /* __LINK_TX_PKTPROC_H__ */
+
diff --git a/mcu_ipc.c b/mcu_ipc.c
new file mode 100644
index 0000000..06b4e87
--- /dev/null
+++ b/mcu_ipc.c
@@ -0,0 +1,648 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2014-2020, Samsung Electronics.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+
+#include "mcu_ipc.h"
+#include "mcu_ipc_priv.h"
+#include "modem_utils.h"
+
+/* IRQ handler */
+static irqreturn_t cp_mbox_irq_handler(int irq, void *data)
+{
+ struct cp_mbox_irq_data *irq_data = NULL;
+ u32 irq_stat;
+ int i;
+
+ irq_data = (struct cp_mbox_irq_data *)data;
+ if (!irq_data) {
+ mif_err_limited("irq_data is null\n");
+ return IRQ_HANDLED;
+ }
+
+ if (!irq_data->enable) {
+ mif_err_limited("irq_data %d is disabled\n", irq_data->idx);
+ return IRQ_HANDLED;
+ }
+
+ /*
+ * Check raised interrupts
+ * Only clear and handle unmasked interrupts
+ */
+ spin_lock(&mbox_data.reg_lock);
+
+ irq_stat = mcu_ipc_read(irq_data->sfr_rx.sr) & irq_data->sfr_rx.mask;
+ irq_stat &= ~(mcu_ipc_read(irq_data->sfr_rx.mr)) & irq_data->sfr_rx.mask;
+ mcu_ipc_write(irq_stat, irq_data->sfr_rx.cr);
+
+ spin_unlock(&mbox_data.reg_lock);
+
+ /* Call handlers */
+ for (i = 0; i < MAX_CP_MBOX_HANDLER; i++) {
+ if (irq_stat & (1 << (i + irq_data->sfr_rx.shift))) {
+ if ((1 << (i + irq_data->sfr_rx.shift)) & irq_data->registered_irq) {
+ irq_data->hd[i].handler(i, irq_data->hd[i].data);
+ } else {
+ mif_err_limited("unregistered:%d %d 0x%08x 0x%08lx 0x%08x\n",
+ irq_data->idx, i, irq_stat,
+ irq_data->unmasked_irq << irq_data->sfr_rx.shift,
+ mcu_ipc_read(irq_data->sfr_rx.mr));
+ }
+
+ irq_stat &= ~(1 << (i + irq_data->sfr_rx.shift));
+ }
+
+ if (!irq_stat)
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Register / Unregister */
+int cp_mbox_register_handler(u32 idx, u32 int_num, irq_handler_t handler, void *data)
+{
+ struct cp_mbox_irq_data *irq_data = &mbox_data.irq_data[idx];
+ unsigned long flags;
+
+ if (!handler) {
+ mif_err_limited("handler is null\n");
+ return -EINVAL;
+ }
+ if (int_num >= MAX_CP_MBOX_HANDLER) {
+ mif_err_limited("int_num error:%d\n", int_num);
+ return -EINVAL;
+ }
+ if (!irq_data->enable) {
+ mif_err_limited("irq_data %d is disabled\n", irq_data->idx);
+ return -EACCES;
+ }
+
+ spin_lock_irqsave(&mbox_data.reg_lock, flags);
+
+ irq_data->hd[int_num].data = data;
+ irq_data->hd[int_num].handler = handler;
+ irq_data->registered_irq |= 1 << (int_num + irq_data->sfr_rx.shift);
+ set_bit(int_num, &irq_data->unmasked_irq);
+
+ spin_unlock_irqrestore(&mbox_data.reg_lock, flags);
+
+ cp_mbox_enable_handler(irq_data->idx, int_num);
+ mif_info("idx:%d num:%d intmr0:0x%08x\n",
+ irq_data->idx, int_num, mcu_ipc_read(irq_data->sfr_rx.mr));
+
+ return 0;
+}
+EXPORT_SYMBOL(cp_mbox_register_handler);
+
+int cp_mbox_unregister_handler(u32 idx, u32 int_num, irq_handler_t handler)
+{
+ struct cp_mbox_irq_data *irq_data = &mbox_data.irq_data[idx];
+ unsigned long flags;
+
+ if (!handler) {
+ mif_err_limited("handler is null\n");
+ return -EINVAL;
+ }
+ if (irq_data->hd[int_num].handler != handler) {
+ mif_err_limited("int_num error:%d\n", int_num);
+ return -EINVAL;
+ }
+ if (!irq_data->enable) {
+ mif_err_limited("irq_data %d is disabled\n", irq_data->idx);
+ return -EACCES;
+ }
+
+ cp_mbox_disable_handler(irq_data->idx, int_num);
+ mif_info("idx:%d num:%d intmr0:0x%08x\n",
+ irq_data->idx, int_num, mcu_ipc_read(irq_data->sfr_rx.mr));
+
+ spin_lock_irqsave(&mbox_data.reg_lock, flags);
+
+ irq_data->hd[int_num].data = NULL;
+ irq_data->hd[int_num].handler = NULL;
+ irq_data->registered_irq &= ~(1 << (int_num + irq_data->sfr_rx.shift));
+ clear_bit(int_num, &irq_data->unmasked_irq);
+
+ spin_unlock_irqrestore(&mbox_data.reg_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(cp_mbox_unregister_handler);
+
+/* Handler : Enable / Disable */
+int cp_mbox_enable_handler(u32 idx, u32 int_num)
+{
+ struct cp_mbox_irq_data *irq_data = &mbox_data.irq_data[idx];
+ unsigned long flags;
+ unsigned long tmp;
+
+ /* The irq should have been registered. */
+ if (!(irq_data->registered_irq & BIT(int_num + irq_data->sfr_rx.shift))) {
+ mif_err_limited("int_num is not registered 0x%x %d\n",
+ irq_data->registered_irq, int_num);
+ return -EINVAL;
+ }
+ if (!irq_data->enable) {
+ mif_err_limited("irq_data %d is disabled\n", irq_data->idx);
+ return -EACCES;
+ }
+
+ spin_lock_irqsave(&mbox_data.reg_lock, flags);
+
+ tmp = mcu_ipc_read(irq_data->sfr_rx.mr);
+
+ /* Clear the mask if it was set. */
+ if (test_and_clear_bit(int_num + irq_data->sfr_rx.shift, &tmp))
+ mcu_ipc_write(tmp, irq_data->sfr_rx.mr);
+
+ /* Mark the irq as unmasked */
+ set_bit(int_num, &irq_data->unmasked_irq);
+
+ spin_unlock_irqrestore(&mbox_data.reg_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(cp_mbox_enable_handler);
+
+int cp_mbox_disable_handler(u32 idx, u32 int_num)
+{
+ struct cp_mbox_irq_data *irq_data = &mbox_data.irq_data[idx];
+ unsigned long flags;
+ unsigned long irq_mask;
+
+ /* The interrupt must have been registered. */
+ if (!(irq_data->registered_irq & BIT(int_num + irq_data->sfr_rx.shift))) {
+ mif_err_limited("int_num is not registered 0x%x %d\n",
+ irq_data->registered_irq, int_num);
+ return -EINVAL;
+ }
+ if (!irq_data->enable) {
+ mif_err_limited("irq_data %d is disabled\n", irq_data->idx);
+ return -EACCES;
+ }
+
+ /* Set the mask */
+ spin_lock_irqsave(&mbox_data.reg_lock, flags);
+
+ irq_mask = mcu_ipc_read(irq_data->sfr_rx.mr);
+
+ /* Set the mask if it was not already set */
+ if (!test_and_set_bit(int_num + irq_data->sfr_rx.shift, &irq_mask)) {
+ mcu_ipc_write(irq_mask, irq_data->sfr_rx.mr);
+ udelay(5);
+
+ /* Reset the status bit to signal interrupt needs handling */
+ mcu_ipc_write(BIT(int_num + irq_data->sfr_rx.shift), irq_data->sfr_rx.gr);
+ udelay(5);
+ }
+
+ /* Remove the irq from the umasked irqs */
+ clear_bit(int_num, &irq_data->unmasked_irq);
+
+ spin_unlock_irqrestore(&mbox_data.reg_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(cp_mbox_disable_handler);
+
+/*
+ * This function is used to check the state of the mailbox interrupt
+ * when the interrupt after the interrupt has been masked. This can be
+ * used to check if a new interrupt has been set after being masked. A
+ * masked interrupt will have its status set but will not generate a hard
+ * interrupt. This function will check and clear the status.
+ */
+int cp_mbox_check_handler(u32 idx, u32 int_num)
+{
+ struct cp_mbox_irq_data *irq_data = &mbox_data.irq_data[idx];
+ unsigned long flags;
+ u32 irq_stat;
+
+ /* Interrupt must have been registered. */
+ if (!(irq_data->registered_irq & BIT(int_num + irq_data->sfr_rx.shift))) {
+ mif_err_limited("int_num is not registered 0x%x %d\n",
+ irq_data->registered_irq, int_num);
+ return -EINVAL;
+ }
+ if (!irq_data->enable) {
+ mif_err_limited("irq_data %d is disabled\n", irq_data->idx);
+ return -EACCES;
+ }
+
+ spin_lock_irqsave(&mbox_data.reg_lock, flags);
+
+ /* Interrupt must have been masked. */
+ if (test_bit(int_num, &irq_data->unmasked_irq)) {
+ spin_unlock_irqrestore(&mbox_data.reg_lock, flags);
+ mif_err_limited("Mailbox interrupt (idx: %d, num: %d) is unmasked!\n",
+ irq_data->idx, int_num);
+ return -EINVAL;
+ }
+
+ /* Check and clear the interrupt status bit. */
+ irq_stat = mcu_ipc_read(irq_data->sfr_rx.sr) & BIT(int_num + irq_data->sfr_rx.shift);
+ if (irq_stat)
+ mcu_ipc_write(irq_stat, irq_data->sfr_rx.cr);
+
+ spin_unlock_irqrestore(&mbox_data.reg_lock, flags);
+
+ return irq_stat != 0;
+}
+EXPORT_SYMBOL(cp_mbox_check_handler);
+
+/* Set AP2CP interrupt */
+void cp_mbox_set_interrupt(u32 idx, u32 int_num)
+{
+ struct cp_mbox_irq_data *irq_data = &mbox_data.irq_data[idx];
+
+ mcu_ipc_write((0x1 << int_num) << irq_data->sfr_tx.shift, irq_data->sfr_tx.gr);
+}
+EXPORT_SYMBOL(cp_mbox_set_interrupt);
+
+/* Shared register : Get / Set / Extract / Update / Dump */
+static bool is_valid_sr(u32 sr_num)
+{
+ if (!mbox_data.num_shared_reg) {
+ mif_err("num_shared_reg is 0\n");
+ return false;
+ }
+
+ if (sr_num > mbox_data.num_shared_reg) {
+ mif_err("num_shared_reg is %d:%d\n",
+ sr_num, mbox_data.num_shared_reg);
+ return false;
+ }
+
+ return true;
+}
+
+u32 cp_mbox_get_sr(u32 sr_num)
+{
+ if (!is_valid_sr(sr_num))
+ return 0;
+
+ return mcu_ipc_read(mbox_data.shared_reg_offset + (4 * sr_num));
+}
+EXPORT_SYMBOL(cp_mbox_get_sr);
+
+u32 cp_mbox_extract_sr(u32 sr_num, u32 mask, u32 pos)
+{
+ if (!is_valid_sr(sr_num))
+ return 0;
+
+ return (cp_mbox_get_sr(sr_num) >> pos) & mask;
+}
+EXPORT_SYMBOL(cp_mbox_extract_sr);
+
+void cp_mbox_set_sr(u32 sr_num, u32 msg)
+{
+ if (!is_valid_sr(sr_num))
+ return;
+
+ mcu_ipc_write(msg, mbox_data.shared_reg_offset + (4 * sr_num));
+}
+EXPORT_SYMBOL(cp_mbox_set_sr);
+
+void cp_mbox_update_sr(u32 sr_num, u32 msg, u32 mask, u32 pos)
+{
+ u32 val;
+ unsigned long flags;
+
+ if (!is_valid_sr(sr_num))
+ return;
+
+ spin_lock_irqsave(&mbox_data.reg_lock, flags);
+
+ val = cp_mbox_get_sr(sr_num);
+ val &= ~(mask << pos);
+ val |= (msg & mask) << pos;
+ cp_mbox_set_sr(sr_num, val);
+
+ spin_unlock_irqrestore(&mbox_data.reg_lock, flags);
+}
+EXPORT_SYMBOL(cp_mbox_update_sr);
+
+void cp_mbox_dump_sr(void)
+{
+ unsigned long flags;
+ u32 i, value;
+
+ spin_lock_irqsave(&mbox_data.reg_lock, flags);
+
+ for (i = 0; i < mbox_data.num_shared_reg; i++) {
+ value = mcu_ipc_read(mbox_data.shared_reg_offset + (4 * i));
+ mif_info("mbox dump: 0x%02x: 0x%04x\n", i, value);
+ }
+
+ spin_unlock_irqrestore(&mbox_data.reg_lock, flags);
+}
+EXPORT_SYMBOL(cp_mbox_dump_sr);
+
+/* Reset */
+void cp_mbox_reset(void)
+{
+ u32 reg_val;
+ int i;
+
+ mif_info("Reset mailbox registers\n");
+
+ if (mbox_data.use_sw_reset_reg) {
+ reg_val = mcu_ipc_read(EXYNOS_MCU_IPC_MCUCTLR);
+ reg_val |= (0x1 << MCU_IPC_MCUCTLR_MSWRST);
+
+ mcu_ipc_write(reg_val, EXYNOS_MCU_IPC_MCUCTLR);
+ udelay(5);
+ }
+
+ for (i = 0; i < MAX_CP_MBOX_IRQ_IDX; i++) {
+ struct cp_mbox_irq_data *irq_data = NULL;
+
+ irq_data = &mbox_data.irq_data[i];
+ if (!irq_data || !irq_data->name)
+ break;
+
+ mcu_ipc_write(~(irq_data->unmasked_irq) << irq_data->sfr_rx.shift,
+ irq_data->sfr_rx.mr);
+ mif_info("idx:%d intmr0:0x%08x\n", irq_data->idx,
+ mcu_ipc_read(irq_data->sfr_rx.mr));
+
+ mcu_ipc_write(irq_data->sfr_rx.mask, irq_data->sfr_rx.cr);
+ }
+}
+EXPORT_SYMBOL(cp_mbox_reset);
+
+/* IRQ affinity */
+int cp_mbox_get_affinity(u32 idx)
+{
+ struct cp_mbox_irq_data *irq_data = &mbox_data.irq_data[idx];
+
+ if (!irq_data) {
+ mif_err("irq_data %d is null\n", idx);
+ return -EINVAL;
+ }
+
+ if (!irq_data->enable) {
+ mif_err_limited("irq_data %d is disabled\n", irq_data->idx);
+ return -EACCES;
+ }
+
+ return irq_data->affinity;
+}
+EXPORT_SYMBOL(cp_mbox_get_affinity);
+
+int cp_mbox_set_affinity(u32 idx, int affinity)
+{
+ struct cp_mbox_irq_data *irq_data = &mbox_data.irq_data[idx];
+ int num_cpu;
+
+ if (!irq_data) {
+ mif_err("irq_data %d is null\n", idx);
+ return -EINVAL;
+ }
+
+ if (!irq_data->enable) {
+ mif_err_limited("irq_data %d is disabled\n", irq_data->idx);
+ return -EACCES;
+ }
+
+#if defined(CONFIG_VENDOR_NR_CPUS)
+ num_cpu = CONFIG_VENDOR_NR_CPUS;
+#else
+ num_cpu = 8;
+#endif
+ if (affinity >= num_cpu) {
+ mif_err("idx:%d affinity:%d error. cpu max:%d\n",
+ idx, affinity, num_cpu);
+ return -EINVAL;
+ }
+
+ mif_debug("idx:%d affinity:0x%x\n", idx, affinity);
+ irq_data->affinity = affinity;
+
+ return irq_set_affinity_hint(irq_data->irq, cpumask_of(affinity));
+}
+EXPORT_SYMBOL(cp_mbox_set_affinity);
+
+/* Probe */
+static int cp_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *irq_np = NULL;
+ struct device_node *irq_child_np = NULL;
+ u32 count = 0;
+ int irq;
+ int err = 0;
+ u32 idx = 0;
+ u32 offset[4] = {};
+
+ mif_info("+++\n");
+
+ if (!dev->of_node) {
+ mif_err("dev->of_node is null\n");
+ err = -ENODEV;
+ goto fail;
+ }
+
+ /* DMA mask */
+ if (!pdev->dev.dma_mask)
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ if (!pdev->dev.coherent_dma_mask)
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+
+ /* Region */
+ mbox_data.ioaddr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mbox_data.ioaddr)) {
+ mif_err("failed to request memory resource\n");
+ err = PTR_ERR(mbox_data.ioaddr);
+ goto fail;
+ }
+
+ mbox_data.dev = &pdev->dev;
+ spin_lock_init(&mbox_data.reg_lock);
+
+ /* Shared register */
+ mif_dt_read_u32(dev->of_node, "num_shared_reg", mbox_data.num_shared_reg);
+ mif_dt_read_u32(dev->of_node, "shared_reg_offset", mbox_data.shared_reg_offset);
+ mif_info("num_shared_reg:%d shared_reg_offset:0x%x\n",
+ mbox_data.num_shared_reg, mbox_data.shared_reg_offset);
+
+ /* SW reset reg */
+ mif_dt_read_bool(dev->of_node, "use_sw_reset_reg", mbox_data.use_sw_reset_reg);
+ mif_info("use_sw_reset_reg:%d\n", mbox_data.use_sw_reset_reg);
+
+ /* Interrupt */
+ irq_np = of_get_child_by_name(dev->of_node, "cp_mailbox_irqs");
+ if (!irq_np) {
+ mif_err("of_get_child_by_name() error:irq_np\n");
+ err = -EINVAL;
+ goto fail;
+ }
+ for_each_child_of_node(irq_np, irq_child_np) {
+ struct cp_mbox_irq_data *irq_data = NULL;
+
+ if (count >= MAX_CP_MBOX_IRQ_IDX) {
+ mif_err("count is full:%d\n", count);
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ /* IRQ index */
+ mif_dt_read_u32(irq_child_np, "cp_irq,idx", idx);
+ irq_data = &mbox_data.irq_data[idx];
+ if (!irq_data) {
+ mif_err("irq_data %d is null\n", idx);
+ err = -EINVAL;
+ goto fail;
+ }
+ irq_data->idx = idx;
+
+ /* Enable */
+ mif_dt_read_bool(irq_child_np, "cp_irq,enable", irq_data->enable);
+ if (!irq_data->enable) {
+ mif_err("irq_data %d is disabled\n", idx);
+ count++;
+ continue;
+ }
+
+ /* Name */
+ mif_dt_read_string(irq_child_np, "cp_irq,name", irq_data->name);
+
+ /* SFR */
+ of_property_read_u32_array(irq_child_np, "cp_irq,sfr", offset, 4);
+ irq_data->sfr_rx.gr = EXYNOS_MCU_IPC_INTGR0 + offset[0];
+ irq_data->sfr_rx.cr = EXYNOS_MCU_IPC_INTCR0 + offset[0];
+ irq_data->sfr_rx.mr = EXYNOS_MCU_IPC_INTMR0 + offset[0];
+ irq_data->sfr_rx.sr = EXYNOS_MCU_IPC_INTSR0 + offset[0];
+ irq_data->sfr_rx.msr = EXYNOS_MCU_IPC_INTMSR0 + offset[0];
+ irq_data->sfr_rx.shift = offset[1];
+ irq_data->sfr_rx.mask = 0xFFFF << offset[1];
+
+ irq_data->sfr_tx.gr = EXYNOS_MCU_IPC_INTGR0 + offset[2];
+ irq_data->sfr_tx.cr = EXYNOS_MCU_IPC_INTCR0 + offset[2];
+ irq_data->sfr_tx.mr = EXYNOS_MCU_IPC_INTMR0 + offset[2];
+ irq_data->sfr_tx.sr = EXYNOS_MCU_IPC_INTSR0 + offset[2];
+ irq_data->sfr_tx.msr = EXYNOS_MCU_IPC_INTMSR0 + offset[2];
+ irq_data->sfr_tx.shift = offset[3];
+ irq_data->sfr_tx.mask = 0xFFFF << offset[3];
+
+ /* Request IRQ */
+ irq = platform_get_irq(pdev, irq_data->idx);
+ err = devm_request_irq(&pdev->dev, irq, cp_mbox_irq_handler,
+ IRQF_ONESHOT, irq_data->name, irq_data);
+ if (err) {
+ mif_err("devm_request_irq() error:%d\n", err);
+ goto fail;
+ }
+ err = enable_irq_wake(irq);
+ if (err) {
+ mif_err("enable_irq_wake() error:%d\n", err);
+ goto fail;
+ }
+ irq_data->irq = irq;
+
+ /* IRQ affinity */
+ mif_dt_read_u32(irq_child_np, "cp_irq,affinity", irq_data->affinity);
+ err = cp_mbox_set_affinity(irq_data->idx, irq_data->affinity);
+ if (err)
+ mif_err("cp_mbox_set_affinity() error:%d\n", err);
+
+ /* Init CP2AP interrupt */
+ mcu_ipc_write(irq_data->sfr_rx.mask, irq_data->sfr_rx.mr);
+ mcu_ipc_write(irq_data->sfr_rx.mask, irq_data->sfr_rx.cr);
+
+ mif_info("count:%d idx:%d name:%s rx.gr:0x%02x rx.shift:%d tx.gr:0x%02x tx.shift:%d affinity:%d mr:0x%08x\n",
+ count, irq_data->idx, irq_data->name,
+ irq_data->sfr_rx.gr, irq_data->sfr_rx.shift,
+ irq_data->sfr_tx.gr, irq_data->sfr_tx.shift,
+ irq_data->affinity, mcu_ipc_read(irq_data->sfr_rx.mr));
+
+ count++;
+ }
+
+ dev_set_drvdata(dev, &mbox_data);
+
+ mif_info("---\n");
+
+ return 0;
+
+fail:
+ panic("CP mbox probe failed\n");
+ return err;
+}
+
+static int cp_mbox_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int cp_mbox_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int cp_mbox_resume(struct device *dev)
+{
+ struct cp_mbox_drv_data *data = dev->driver_data;
+ int i;
+
+ if (!data) {
+ mif_err_limited("data is null\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MAX_CP_MBOX_IRQ_IDX; i++) {
+ struct cp_mbox_irq_data *irq_data = NULL;
+
+ irq_data = &data->irq_data[i];
+ if (!irq_data) {
+ mif_err_limited("irq_data %d is null\n", i);
+ return -EINVAL;
+ }
+
+ if (!irq_data->enable)
+ continue;
+
+ cp_mbox_set_affinity(irq_data->idx, irq_data->affinity);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops cp_mbox_pm_ops = {
+ .suspend = cp_mbox_suspend,
+ .resume = cp_mbox_resume,
+};
+
+static const struct of_device_id cp_mbox_dt_match[] = {
+ { .compatible = "samsung,exynos-cp-mailbox", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cp_mbox_dt_match);
+
+static struct platform_driver cp_mbox_driver = {
+ .probe = cp_mbox_probe,
+ .remove = cp_mbox_remove,
+ .driver = {
+ .name = "cp_mailbox",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(cp_mbox_dt_match),
+ .pm = &cp_mbox_pm_ops,
+ .suppress_bind_attrs = true,
+ },
+};
+module_platform_driver(cp_mbox_driver);
+
+MODULE_DESCRIPTION("Exynos CP mailbox driver");
+MODULE_LICENSE("GPL");
diff --git a/mcu_ipc.h b/mcu_ipc.h
new file mode 100644
index 0000000..6933278
--- /dev/null
+++ b/mcu_ipc.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2014-2020, Samsung Electronics.
+ *
+ */
+
+#ifndef __MCU_IPC_H__
+#define __MCU_IPC_H__
+
+#if IS_ENABLED(CONFIG_MCU_IPC)
+extern int cp_mbox_register_handler(u32 idx, u32 int_num, irq_handler_t handler, void *data);
+extern int cp_mbox_unregister_handler(u32 idx, u32 int_num, irq_handler_t handler);
+extern int cp_mbox_enable_handler(u32 idx, u32 int_num);
+extern int cp_mbox_disable_handler(u32 idx, u32 int_num);
+extern int cp_mbox_check_handler(u32 idx, u32 int_num);
+
+extern void cp_mbox_set_interrupt(u32 idx, u32 int_num);
+
+extern u32 cp_mbox_get_sr(u32 sr_num);
+extern u32 cp_mbox_extract_sr(u32 sr_num, u32 mask, u32 pos);
+extern void cp_mbox_set_sr(u32 sr_num, u32 msg);
+extern void cp_mbox_update_sr(u32 sr_num, u32 msg, u32 mask, u32 pos);
+extern void cp_mbox_dump_sr(void);
+
+extern void cp_mbox_reset(void);
+extern int cp_mbox_set_affinity(u32 idx, int affinity);
+#else /* CONFIG_MCU_IPC */
+static inline int cp_mbox_register_handler(u32 idx, u32 int_num, irq_handler_t handler, void *data)
+{ return 0; }
+static inline int cp_mbox_unregister_handler(u32 idx, u32 int_num, irq_handler_t handler)
+{ return 0; }
+static inline int cp_mbox_enable_handler(u32 idx, u32 int_num) { return 0; }
+static inline int cp_mbox_disable_handler(u32 idx, u32 int_num) { return 0; }
+static inline int cp_mbox_check_handler(u32 idx, u32 int_num) { return 0; }
+
+static inline void cp_mbox_set_interrupt(u32 idx, u32 int_num) { return; }
+
+static inline u32 cp_mbox_get_sr(u32 sr_num) { return 0; }
+static inline u32 cp_mbox_extract_sr(u32 sr_num, u32 mask, u32 pos) { return 0; }
+static inline void cp_mbox_set_sr(u32 sr_num, u32 msg) { return; }
+static inline void cp_mbox_update_sr(u32 sr_num, u32 msg, u32 mask, u32 pos) { return; }
+static inline void cp_mbox_dump_sr(void) { return; }
+
+static inline void cp_mbox_reset(void) { return; }
+static inline int cp_mbox_set_affinity(u32 idx, int affinity) { return 0; }
+#endif /* CONFIG_MCU_IPC */
+#endif /* __MCU_IPC_H__ */
diff --git a/mcu_ipc_priv.h b/mcu_ipc_priv.h
new file mode 100644
index 0000000..b0f50c7
--- /dev/null
+++ b/mcu_ipc_priv.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2014-2020, Samsung Electronics.
+ *
+ */
+
+#ifndef __MCU_IPC_PRIV_H__
+#define __MCU_IPC_PRIV_H__
+
+#include <dt-bindings/soc/google/exynos-cpif.h>
+
+/* Registers */
+#define EXYNOS_MCU_IPC_MCUCTLR 0x0
+#define EXYNOS_MCU_IPC_INTGR0 0x8
+#define EXYNOS_MCU_IPC_INTCR0 0xc
+#define EXYNOS_MCU_IPC_INTMR0 0x10
+#define EXYNOS_MCU_IPC_INTSR0 0x14
+#define EXYNOS_MCU_IPC_INTMSR0 0x18
+
+/* Bits definition */
+#define MCU_IPC_MCUCTLR_MSWRST (0)
+
+/* */
+#define MAX_CP_MBOX_HANDLER 16
+struct cp_mbox_handler {
+ void *data;
+ irq_handler_t handler;
+};
+
+struct cp_mbox_irq_sfr {
+ u32 gr;
+ u32 cr;
+ u32 mr;
+ u32 sr;
+ u32 msr;
+ u32 mask;
+ u32 shift;
+};
+
+struct cp_mbox_irq_data {
+ char *name;
+ u32 idx;
+ bool enable;
+
+ struct cp_mbox_irq_sfr sfr_rx;
+ struct cp_mbox_irq_sfr sfr_tx;
+
+ int irq;
+ int affinity;
+ u32 registered_irq;
+ unsigned long unmasked_irq;
+
+ struct cp_mbox_handler hd[MAX_CP_MBOX_HANDLER];
+};
+
+struct cp_mbox_drv_data {
+ void __iomem *ioaddr;
+
+ struct device *dev;
+
+ u32 num_shared_reg;
+ u32 shared_reg_offset;
+ bool use_sw_reset_reg;
+
+ struct cp_mbox_irq_data irq_data[MAX_CP_MBOX_IRQ_IDX];
+
+ spinlock_t reg_lock;
+
+ int irq;
+};
+
+/* */
+static struct cp_mbox_drv_data mbox_data;
+
+static inline void mcu_ipc_write(u32 val, u32 reg)
+{
+ writel(val, mbox_data.ioaddr + reg);
+}
+
+static inline u32 mcu_ipc_read(u32 reg)
+{
+ return readl(mbox_data.ioaddr + reg);
+}
+
+#endif /* __MCU_IPC_PRIV_H__ */
diff --git a/modem_ctrl.c b/modem_ctrl.c
new file mode 100644
index 0000000..819022f
--- /dev/null
+++ b/modem_ctrl.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Samsung Electronics.
+ *
+ */
+
+#include "modem_prj.h"
+#include "modem_utils.h"
+
+void modem_ctrl_set_kerneltime(struct modem_ctl *mc)
+{
+ struct modem_data *modem = mc->mdm_data;
+ struct mem_link_device *mld = modem->mld;
+ struct utc_time t;
+
+ get_utc_time(&t);
+ mif_info("time = %d.%06d\n", t.sec + (t.min * 60), t.us);
+
+ if (mld->ap2cp_kerneltime_sec.type == DRAM_V2) {
+ set_ctrl_msg(&mld->ap2cp_kerneltime_sec, t.sec + (t.min * 60));
+ set_ctrl_msg(&mld->ap2cp_kerneltime_usec, t.us);
+ } else {
+ update_ctrl_msg(&mld->ap2cp_kerneltime, t.sec + (t.min * 60),
+ modem->sbi_ap2cp_kerneltime_sec_mask,
+ modem->sbi_ap2cp_kerneltime_sec_pos);
+ update_ctrl_msg(&mld->ap2cp_kerneltime, t.us,
+ modem->sbi_ap2cp_kerneltime_usec_mask,
+ modem->sbi_ap2cp_kerneltime_usec_pos);
+ }
+}
+
+int modem_ctrl_check_offset_data(struct modem_ctl *mc)
+{
+ struct modem_data *modem = mc->mdm_data;
+ struct mem_link_device *mld = modem->mld;
+ struct link_device *ld = get_current_link(mc->iod);
+ u32 value;
+
+ if (modem->offset_cmsg_offset) {
+ value = ioread32(mld->cmsg_offset);
+ if (modem->cmsg_offset != value) {
+ mif_err("ERR: cmsg_offset was damaged: 0x%X (expected: 0x%X)\n",
+ value, modem->cmsg_offset);
+ goto data_error;
+ }
+ }
+
+ if (modem->offset_srinfo_offset) {
+ value = ioread32(mld->srinfo_offset);
+ if (modem->srinfo_offset != value) {
+ mif_err("ERR: srinfo_offset was damaged: 0x%X (expected: 0x%X)\n",
+ value, modem->srinfo_offset);
+ goto data_error;
+ }
+ }
+
+ if (modem->offset_clk_table_offset) {
+ value = ioread32(mld->clk_table_offset);
+ if (modem->clk_table_offset != value) {
+ mif_err("ERR: clk_table_offset was damaged: 0x%X (expected: 0x%X)\n",
+ value, modem->clk_table_offset);
+ goto data_error;
+ }
+ }
+
+ if (modem->offset_buff_desc_offset) {
+ value = ioread32(mld->buff_desc_offset);
+ if (modem->buff_desc_offset != value) {
+ mif_err("ERR: buff_desc_offset was damaged: 0x%X (expected: 0x%X)\n",
+ value, modem->buff_desc_offset);
+ goto data_error;
+ }
+ }
+
+ if (ld->capability_check && modem->offset_capability_offset) {
+ value = ioread32(mld->capability_offset);
+ if (modem->capability_offset != value) {
+ mif_err("ERR: capability_offset was damaged: 0x%X (expected: 0x%X)\n",
+ value, modem->capability_offset);
+ goto data_error;
+ }
+ }
+
+ mif_info("offset data is ok\n");
+ return 0;
+
+data_error:
+ mif_err("offset data is damaged\n");
+ panic("CPIF shmem offset data is damaged\n");
+ return -EFAULT;
+}
+
+/* change the modem state & notify io devices about this change */
+void change_modem_state(struct modem_ctl *mc, enum modem_state state)
+{
+ enum modem_state old_state;
+ struct io_device *iod;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mc->lock, flags);
+ old_state = mc->phone_state;
+ if (state == old_state) {
+ spin_unlock_irqrestore(&mc->lock, flags);
+ return; /* no need to wakeup */
+ }
+ mc->phone_state = state;
+ spin_unlock_irqrestore(&mc->lock, flags);
+
+ mif_info("%s->state changed (%s -> %s)\n", mc->name,
+ cp_state_str(old_state), cp_state_str(state));
+
+ list_for_each_entry(iod, &mc->modem_state_notify_list, list) {
+ if (atomic_read(&iod->opened) > 0)
+ wake_up(&iod->wq);
+ }
+}
diff --git a/modem_ctrl.h b/modem_ctrl.h
new file mode 100644
index 0000000..4d1a736
--- /dev/null
+++ b/modem_ctrl.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Samsung Electronics.
+ *
+ */
+
+#ifndef __MODEM_CTRL_H__
+#define __MODEM_CTRL_H__
+
+#define MIF_INIT_TIMEOUT (15 * HZ)
+
+#if IS_ENABLED(CONFIG_SEC_MODEM_S5100)
+struct msi_reg_type {
+ u32 msi_data;
+ u32 msi_check;
+ u32 err_report;
+ u32 reserved;
+ u32 boot_stage;
+ u32 img_addr_lo;
+ u32 img_addr_hi;
+ u32 img_size;
+ u32 otp_version;
+ u32 unused[3];
+ u32 flag_cafe;
+ u32 sub_boot_stage;
+ u32 db_loop_cnt;
+ u32 db_received;
+ u32 boot_size;
+};
+
+enum boot_stage_bit_s5400 {
+ BOOT_STAGE_5400_ROM_BIT,
+ BOOT_STAGE_5400_PCI_LINKUP_START_BIT,
+ BOOT_STAGE_5400_PCI_LTSSM_DISABLE_BIT,
+ BOOT_STAGE_5400_PCI_PHY_INIT_DONE_BIT,
+ BOOT_STAGE_5400_PCI_DBI_DONE_BIT,
+ BOOT_STAGE_5400_PCI_MSI_START_BIT,
+ BOOT_STAGE_5400_PCI_WAIT_DOORBELL_BIT,
+ BOOT_STAGE_5400_DOWNLOAD_PBL_BIT,
+ BOOT_STAGE_5400_DOWNLOAD_PSP_BL1_DONE_BIT,
+ BOOT_STAGE_5400_DOWNLOAD_HOST_BL1_DONE_BIT,
+ BOOT_STAGE_5400_DOWNLOAD_HOST_BL1B_DONE_BIT,
+ BOOT_STAGE_5400_DOWNLOAD_PBL_DONE_BIT,
+ BOOT_STAGE_5400_BL1_WAIT_DOORBELL_BIT,
+ BOOT_STAGE_5400_BL1_DOWNLOAD_DONE_BIT,
+ BOOT_STAGE_5400_RESERVED_BIT,
+ /* Not documented, but this is the last stage */
+ BOOT_STAGE_5400_DONE_BIT,
+};
+enum boot_stage_bit_s5300 {
+ BOOT_STAGE_5300_ROM_BIT,
+ BOOT_STAGE_5300_PCI_LINKUP_START_BIT,
+ BOOT_STAGE_5300_PCI_PHY_INIT_DONE_BIT,
+ BOOT_STAGE_5300_PCI_DBI_DONE_BIT,
+ BOOT_STAGE_5300_PCI_LTSSM_DISABLE_BIT,
+ BOOT_STAGE_5300_PCI_LTSSM_ENABLE_BIT,
+ BOOT_STAGE_5300_PCI_MSI_START_BIT,
+ BOOT_STAGE_5300_PCI_WAIT_DOORBELL_BIT,
+ BOOT_STAGE_5300_DOWNLOAD_PBL_BIT,
+ BOOT_STAGE_5300_DOWNLOAD_PBL_DONE_BIT,
+ BOOT_STAGE_5300_SECURITY_START_BIT,
+ BOOT_STAGE_5300_CHECK_BL1_ID_BIT,
+ BOOT_STAGE_5300_JUMP_BL1_BIT,
+ /* Not documented, but this is the last stage */
+ BOOT_STAGE_5300_DONE_BIT,
+};
+
+/* Every bits of boot_stage_bit are filled */
+#define BOOT_STAGE_5400_DONE_MASK (BIT(BOOT_STAGE_5400_DONE_BIT + 1) - 1)
+#define BOOT_STAGE_5300_DONE_MASK (BIT(BOOT_STAGE_5300_DONE_BIT + 1) - 1)
+#define BOOT_STAGE_BL1_DOWNLOAD_DONE_MASK (BIT(BOOT_STAGE_5400_BL1_DOWNLOAD_DONE_BIT + 1) - 1)
+
+#define CLEAR_MSI_REG_FIELD(mld, field) \
+ do { \
+ iowrite32(0, (mld)->msi_reg_base + offsetof(struct msi_reg_type, field)); \
+ } while (0)
+
+#endif
+
+void modem_ctrl_set_kerneltime(struct modem_ctl *mc);
+int modem_ctrl_check_offset_data(struct modem_ctl *mc);
+void change_modem_state(struct modem_ctl *mc, enum modem_state state);
+
+#if IS_ENABLED(CONFIG_SEC_MODEM_S5100)
+int s5100_force_crash_exit_ext(enum crash_type type);
+int s5100_poweron_pcie(struct modem_ctl *mc, enum link_mode mode);
+int s5100_try_gpio_cp_wakeup(struct modem_ctl *mc);
+void s5100_set_pcie_irq_affinity(struct modem_ctl *mc);
+int s5100_set_outbound_atu(struct modem_ctl *mc, struct cp_btl *btl,
+ loff_t *pos, u32 map_size);
+#endif
+
+#endif /* __MODEM_CTRL_H__ */
diff --git a/modem_ctrl_s5000ap.c b/modem_ctrl_s5000ap.c
new file mode 100644
index 0000000..9ab471c
--- /dev/null
+++ b/modem_ctrl_s5000ap.c
@@ -0,0 +1,1051 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Samsung Electronics.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/shm_ipc.h>
+#include "mcu_ipc.h"
+#include <soc/google/cal-if.h>
+#if IS_ENABLED(CONFIG_EXYNOS_PMU_IF)
+#include <soc/google/exynos-pmu-if.h>
+#else
+#include <soc/google/exynos-pmu.h>
+#endif
+#include <soc/google/modem_notifier.h>
+#include "modem_prj.h"
+#include "modem_utils.h"
+#include "modem_ctrl.h"
+#include "link_device_memory.h"
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE)
+#include "s51xx_pcie.h"
+#endif
+#if IS_ENABLED(CONFIG_CP_LCD_NOTIFIER)
+#include "../../../video/fbdev/exynos/dpu30/decon.h"
+static int s5000ap_lcd_notifier(struct notifier_block *notifier,
+ unsigned long event, void *v);
+#endif /* CONFIG_CP_LCD_NOTIFIER */
+
+/*
+ * CP_WDT interrupt handler
+ */
+#if IS_ENABLED(CONFIG_SOC_S5E9925) && IS_ENABLED(CONFIG_SOC_S5E9925_EVT0)
+#define PMU_CP_INT_IN 0x3940
+#define CP_SCANDUMP_MASK (0x1 << 7)
+#elif IS_ENABLED(CONFIG_SOC_S5E9925)
+#define PMU_CP_INT_IN 0x3930
+#define CP_SCANDUMP_MASK (0x1 << 7)
+#elif IS_ENABLED(CONFIG_SOC_S5E8825)
+#define PMU_CP_INT_IN 0x3540
+#define CP_SCANDUMP_MASK (0x1 << 7)
+#endif
+static irqreturn_t cp_wdt_handler(int irq, void *arg)
+{
+ struct modem_ctl *mc = (struct modem_ctl *)arg;
+ enum modem_state new_state;
+ struct link_device *ld = get_current_link(mc->bootd);
+#if IS_ENABLED(CONFIG_SOC_S5E9925) || IS_ENABLED(CONFIG_SOC_S5E8825)
+ u32 val;
+#endif
+
+ mif_info("%s: CP_WDT occurred\n", mc->name);
+ mif_disable_irq(&mc->irq_cp_wdt);
+
+#if IS_ENABLED(CONFIG_SOC_S5E9925) || IS_ENABLED(CONFIG_SOC_S5E8825)
+ mif_info("enable CP scandump request\n");
+ exynos_pmu_read(PMU_CP_INT_IN, &val);
+ if (val & CP_SCANDUMP_MASK) {
+ mif_info("cp scandump request detected\n");
+ dbg_snapshot_expire_watchdog();
+ return IRQ_HANDLED;
+ }
+#endif
+
+ if (mc->phone_state == STATE_ONLINE)
+ modem_notify_event(MODEM_EVENT_WATCHDOG, mc);
+
+ mif_stop_logging();
+
+ new_state = STATE_CRASH_WATCHDOG;
+ ld->crash_reason.type = CRASH_REASON_CP_WDOG_CRASH;
+
+ mif_info("new_state:%s\n", cp_state_str(new_state));
+
+ change_modem_state(mc, new_state);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * ACTIVE mailbox interrupt handler
+ */
+static irqreturn_t cp_active_handler(int irq, void *arg)
+{
+ struct modem_ctl *mc = (struct modem_ctl *)arg;
+ int cp_on = cal_cp_status();
+ int cp_active = 0;
+ struct modem_data *modem = mc->mdm_data;
+ struct mem_link_device *mld = modem->mld;
+ enum modem_state old_state = mc->phone_state;
+ enum modem_state new_state = mc->phone_state;
+
+ cp_active = extract_ctrl_msg(&mld->cp2ap_united_status, mc->sbi_lte_active_mask,
+ mc->sbi_lte_active_pos);
+
+ mif_info("old_state:%s cp_on:%d cp_active:%d\n",
+ cp_state_str(old_state), cp_on, cp_active);
+
+ if (!cp_active) {
+ if (cp_on > 0) {
+ new_state = STATE_OFFLINE;
+ complete_all(&mc->off_cmpl);
+ } else {
+ mif_info("don't care!!!\n");
+ }
+ }
+
+ if (old_state != new_state) {
+ mif_info("new_state = %s\n", cp_state_str(new_state));
+
+ if (old_state == STATE_ONLINE)
+ modem_notify_event(MODEM_EVENT_RESET, mc);
+
+ change_modem_state(mc, new_state);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int hw_rev;
+#if IS_ENABLED(CONFIG_HW_REV_DETECT)
+#if defined(MODULE)
+/* GKI TODO */
+#else /* MODULE */
+static int __init console_setup(char *str)
+{
+ get_option(&str, &hw_rev);
+ mif_info("hw_rev:0x%x\n", hw_rev);
+
+ return 0;
+}
+__setup("androidboot.revision=", console_setup);
+
+static int __init set_hw_revision(char *str)
+{
+ get_option(&str, &hw_rev);
+ mif_info("Hardware revision:0x%x\n", hw_rev);
+
+ return 0;
+}
+__setup("revision=", set_hw_revision);
+#endif /* MODULE */
+#else /* CONFIG_HW_REV_DETECT */
+static int get_system_rev(struct device_node *np)
+{
+ int value, cnt, gpio_cnt;
+ unsigned int gpio_hw_rev, hw_rev = 0;
+
+ gpio_cnt = of_gpio_count(np);
+ if (gpio_cnt < 0) {
+ mif_err("failed to get gpio_count from DT(%d)\n", gpio_cnt);
+ return 0;
+ }
+
+ for (cnt = 0; cnt < gpio_cnt; cnt++) {
+ gpio_hw_rev = of_get_gpio(np, cnt);
+ if (!gpio_is_valid(gpio_hw_rev)) {
+ mif_err("gpio_hw_rev%d: Invalied gpio\n", cnt);
+ return -EINVAL;
+ }
+
+ value = gpio_get_value(gpio_hw_rev);
+ hw_rev |= (value & 0x1) << cnt;
+ }
+
+ return hw_rev;
+}
+#endif /* CONFIG_HW_REV_DETECT */
+
+#if IS_ENABLED(CONFIG_GPIO_DS_DETECT)
+static int get_ds_detect(struct device_node *np)
+{
+ unsigned int gpio_ds_det;
+
+ gpio_ds_det = of_get_named_gpio(np, "mif,gpio_ds_det", 0);
+ if (!gpio_is_valid(gpio_ds_det)) {
+ mif_err("gpio_ds_det: Invalid gpio\n");
+ return 0;
+ }
+
+ return gpio_get_value(gpio_ds_det);
+}
+#else
+static int ds_detect = 2;
+module_param(ds_detect, int, 0664);
+MODULE_PARM_DESC(ds_detect, "Dual SIM detect");
+
+static ssize_t ds_detect_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ds_detect);
+}
+
+static ssize_t ds_detect_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ int value;
+
+ ret = kstrtoint(buf, 0, &value);
+ if (ret != 0) {
+ mif_err("invalid value:%d with %d\n", value, ret);
+ return -EINVAL;
+ }
+
+ ds_detect = value;
+ mif_info("set ds_detect: %d\n", ds_detect);
+
+ return count;
+}
+static DEVICE_ATTR_RW(ds_detect);
+
+static struct attribute *sim_attrs[] = {
+ &dev_attr_ds_detect.attr,
+ NULL,
+};
+
+static const struct attribute_group sim_group = {
+ .attrs = sim_attrs,
+ .name = "sim",
+};
+
+static int get_ds_detect(struct device_node *np)
+{
+ if (ds_detect > 2 || ds_detect < 1)
+ ds_detect = 2;
+
+ mif_info("Dual SIM detect = %d\n", ds_detect);
+ return ds_detect - 1;
+}
+
+#endif
+
+static int init_control_messages(struct modem_ctl *mc)
+{
+ struct platform_device *pdev = to_platform_device(mc->dev);
+ struct device_node *np = pdev->dev.of_node;
+ struct modem_data *modem = mc->mdm_data;
+ struct mem_link_device *mld = modem->mld;
+ struct link_device *ld = get_current_link(mc->iod);
+ unsigned int sbi_sys_rev_mask, sbi_sys_rev_pos;
+ int ds_det;
+#if IS_ENABLED(CONFIG_CP_BTL)
+ unsigned int sbi_ext_backtrace_mask, sbi_ext_backtrace_pos;
+ unsigned int sbi_ext_backtrace_ext_mask, sbi_ext_backtrace_ext_pos;
+#endif
+
+ if (modem->offset_cmsg_offset)
+ iowrite32(modem->cmsg_offset, mld->cmsg_offset);
+ if (modem->offset_srinfo_offset)
+ iowrite32(modem->srinfo_offset, mld->srinfo_offset);
+ if (modem->offset_clk_table_offset)
+ iowrite32(modem->clk_table_offset, mld->clk_table_offset);
+ if (modem->offset_buff_desc_offset)
+ iowrite32(modem->buff_desc_offset, mld->buff_desc_offset);
+ if (ld->capability_check && modem->offset_capability_offset)
+ iowrite32(modem->capability_offset, mld->capability_offset);
+
+ set_ctrl_msg(&mld->ap2cp_united_status, 0);
+ set_ctrl_msg(&mld->cp2ap_united_status, 0);
+ set_ctrl_msg(&mld->ap2cp_msg, 0);
+ set_ctrl_msg(&mld->cp2ap_msg, 0);
+
+ if (ld->capability_check) {
+ int part;
+
+ for (part = 0; part < AP_CP_CAP_PARTS; part++) {
+ iowrite32(0, mld->ap_capability_offset[part]);
+ iowrite32(0, mld->cp_capability_offset[part]);
+ }
+ }
+
+ if (!np) {
+ mif_err("non-DT project, can't set mailbox regs\n");
+ return -1;
+ }
+
+#if IS_ENABLED(CONFIG_CP_BTL)
+ mif_info("btl enable:%d\n", mc->mdm_data->btl.enabled);
+ mif_dt_read_u32(np, "sbi_ext_backtrace_mask", sbi_ext_backtrace_mask);
+ mif_dt_read_u32(np, "sbi_ext_backtrace_pos", sbi_ext_backtrace_pos);
+ update_ctrl_msg(&mld->ap2cp_united_status, mc->mdm_data->btl.enabled,
+ sbi_ext_backtrace_mask, sbi_ext_backtrace_pos);
+
+ if (mc->mdm_data->btl.support_extension) {
+ mif_info("btl extension enable:%d\n", mc->mdm_data->btl.extension_enabled);
+ mif_dt_read_u32(np, "sbi_ext_backtrace_ext_mask", sbi_ext_backtrace_ext_mask);
+ mif_dt_read_u32(np, "sbi_ext_backtrace_ext_pos", sbi_ext_backtrace_ext_pos);
+ update_ctrl_msg(&mld->ap2cp_united_status, mc->mdm_data->btl.extension_enabled,
+ sbi_ext_backtrace_ext_mask, sbi_ext_backtrace_ext_pos);
+ }
+#endif
+
+ mif_dt_read_u32(np, "sbi_sys_rev_mask", sbi_sys_rev_mask);
+ mif_dt_read_u32(np, "sbi_sys_rev_pos", sbi_sys_rev_pos);
+
+ ds_det = get_ds_detect(np);
+ if (ds_det < 0) {
+ mif_err("ds_det error:%d\n", ds_det);
+ return -EINVAL;
+ }
+
+ update_ctrl_msg(&mld->ap2cp_united_status, ds_det, mc->sbi_ds_det_mask,
+ mc->sbi_ds_det_pos);
+ mif_info("ds_det:%d\n", ds_det);
+
+#if !IS_ENABLED(CONFIG_HW_REV_DETECT)
+ hw_rev = get_system_rev(np);
+#endif
+ if ((hw_rev < 0) || (hw_rev > sbi_sys_rev_mask)) {
+ mif_err("hw_rev error:0x%x. set to 0\n", hw_rev);
+ hw_rev = 0;
+ }
+ update_ctrl_msg(&mld->ap2cp_united_status, hw_rev, sbi_sys_rev_mask,
+ sbi_sys_rev_pos);
+ mif_info("hw_rev:0x%x\n", hw_rev);
+
+ return 0;
+}
+
+static bool _is_first_boot = true;
+static int power_on_cp(struct modem_ctl *mc)
+{
+ mif_info("+++\n");
+
+ change_modem_state(mc, STATE_OFFLINE);
+
+ if (cal_cp_status() == 0) {
+ if (_is_first_boot) {
+ mif_info("First init\n");
+ cal_cp_disable_dump_pc_no_pg();
+ cal_cp_init();
+ _is_first_boot = false;
+ } else {
+ mif_err("Not first time, but power is down\n");
+ }
+ }
+
+ mif_info("---\n");
+ return 0;
+}
+
+static int power_off_cp(struct modem_ctl *mc)
+{
+ mif_info("+++\n");
+
+ cp_mbox_set_interrupt(CP_MBOX_IRQ_IDX_0, mc->int_cp_wakeup);
+ usleep_range(5000, 10000);
+
+ cal_cp_disable_dump_pc_no_pg();
+ cal_cp_reset_assert();
+
+ mif_info("---\n");
+ return 0;
+}
+
+static int power_shutdown_cp(struct modem_ctl *mc)
+{
+ unsigned long timeout = msecs_to_jiffies(1000);
+ unsigned long remain;
+
+ mif_info("+++\n");
+
+ if (mc->phone_state == STATE_OFFLINE || cal_cp_status() == 0)
+ goto exit;
+
+ reinit_completion(&mc->off_cmpl);
+ remain = wait_for_completion_timeout(&mc->off_cmpl, timeout);
+ if (remain == 0)
+ change_modem_state(mc, STATE_OFFLINE);
+
+exit:
+ cp_mbox_set_interrupt(CP_MBOX_IRQ_IDX_0, mc->int_cp_wakeup);
+ usleep_range(5000, 10000);
+
+ cal_cp_disable_dump_pc_no_pg();
+ cal_cp_reset_assert();
+
+ mif_info("---\n");
+ return 0;
+}
+
+static int power_reset_cp(struct modem_ctl *mc)
+{
+ struct link_device *ld = get_current_link(mc->iod);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+
+ mif_info("+++\n");
+
+ /* 2cp dump WA */
+ if (timer_pending(&mld->crash_ack_timer))
+ del_timer(&mld->crash_ack_timer);
+ atomic_set(&mld->forced_cp_crash, 0);
+
+ if (ld->sbd_ipc && hrtimer_active(&mld->sbd_print_timer))
+ hrtimer_cancel(&mld->sbd_print_timer);
+
+ if (mc->phone_state == STATE_OFFLINE) {
+ mif_info("already offline\n");
+ return 0;
+ }
+
+ if (mc->phone_state == STATE_ONLINE)
+ modem_notify_event(MODEM_EVENT_RESET, mc);
+
+ change_modem_state(mc, STATE_RESET);
+ msleep(STATE_RESET_INTERVAL_MS);
+ change_modem_state(mc, STATE_OFFLINE);
+
+ if (cal_cp_status()) {
+ mif_info("CP aleady Init, try reset\n");
+ cp_mbox_set_interrupt(CP_MBOX_IRQ_IDX_0, mc->int_cp_wakeup);
+ usleep_range(5000, 10000);
+
+ cal_cp_disable_dump_pc_no_pg();
+ cal_cp_reset_assert();
+ usleep_range(5000, 10000);
+ cal_cp_reset_release();
+
+ cp_mbox_reset();
+ }
+
+ mif_info("---\n");
+ return 0;
+}
+
+static int power_reset_dump_cp(struct modem_ctl *mc)
+{
+ struct link_device *ld = get_current_link(mc->iod);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+
+ mif_info("+++\n");
+
+ /* 2cp dump WA */
+ if (timer_pending(&mld->crash_ack_timer))
+ del_timer(&mld->crash_ack_timer);
+ atomic_set(&mld->forced_cp_crash, 0);
+
+ if (ld->sbd_ipc && hrtimer_active(&mld->sbd_print_timer))
+ hrtimer_cancel(&mld->sbd_print_timer);
+
+ /* mc->phone_state = STATE_OFFLINE; */
+ if (mc->phone_state == STATE_OFFLINE) {
+ mif_info("already offline\n");
+ return 0;
+ }
+
+ if (mc->phone_state == STATE_ONLINE)
+ modem_notify_event(MODEM_EVENT_RESET, mc);
+
+ /* Change phone state to STATE_CRASH_EXIT */
+ change_modem_state(mc, STATE_CRASH_EXIT);
+
+ if (cal_cp_status()) {
+ mif_info("CP aleady Init, try reset\n");
+ cp_mbox_set_interrupt(CP_MBOX_IRQ_IDX_0, mc->int_cp_wakeup);
+ usleep_range(5000, 10000);
+
+ cal_cp_enable_dump_pc_no_pg();
+ cal_cp_reset_assert();
+ usleep_range(5000, 10000);
+ cal_cp_reset_release();
+
+ cp_mbox_reset();
+ }
+
+ mif_info("---\n");
+ return 0;
+}
+
+static int start_normal_boot(struct modem_ctl *mc)
+{
+ struct link_device *ld = get_current_link(mc->iod);
+ struct modem_data *modem = mc->mdm_data;
+ struct mem_link_device *mld = modem->mld;
+ int cnt = 200;
+ int ret = 0;
+ int cp_status = 0;
+
+ mif_info("+++\n");
+
+ if (init_control_messages(mc))
+ mif_err("Failed to initialize mbox regs\n");
+
+ if (ld->link_prepare_normal_boot)
+ ld->link_prepare_normal_boot(ld, mc->bootd);
+
+ change_modem_state(mc, STATE_BOOTING);
+
+ if (ld->link_start_normal_boot) {
+ mif_info("link_start_normal_boot\n");
+ ld->link_start_normal_boot(ld, mc->iod);
+ }
+
+ mif_info("cp_united_status:0x%08x\n", get_ctrl_msg(&mld->cp2ap_united_status));
+ mif_info("ap_united_status:0x%08x\n", get_ctrl_msg(&mld->ap2cp_united_status));
+
+ ret = modem_ctrl_check_offset_data(mc);
+ if (ret) {
+ mif_err("modem_ctrl_check_offset_data() error:%d\n", ret);
+ return ret;
+ }
+
+ if (mc->ap2cp_cfg_ioaddr) {
+ mif_info("Before setting AP2CP_CFG:0x%08x\n",
+ __raw_readl(mc->ap2cp_cfg_ioaddr));
+ __raw_writel(1, mc->ap2cp_cfg_ioaddr);
+ ret = __raw_readl(mc->ap2cp_cfg_ioaddr);
+ if (ret != 1) {
+ mif_err("AP2CP_CFG setting is not correct:%d\n", ret);
+ return -1;
+ }
+ mif_info("AP2CP_CFG is ok:0x%08x\n", ret);
+ }
+
+ while (extract_ctrl_msg(&mld->cp2ap_united_status, mld->sbi_cp_status_mask,
+ mld->sbi_cp_status_pos) == 0) {
+ if (--cnt > 0) {
+ usleep_range(10000, 20000);
+ } else {
+ mif_err("cp_status is not set by CP bootloader:0x%08x\n",
+ get_ctrl_msg(&mld->cp2ap_united_status));
+ return -EACCES;
+ }
+ }
+
+ mif_disable_irq(&mc->irq_cp_wdt);
+
+ ret = modem_ctrl_check_offset_data(mc);
+ if (ret) {
+ mif_err("modem_ctrl_check_offset_data() error:%d\n", ret);
+ return ret;
+ }
+
+ cp_status = extract_ctrl_msg(&mld->cp2ap_united_status,
+ mld->sbi_cp_status_mask, mld->sbi_cp_status_pos);
+ mif_info("cp_status=%u\n", cp_status);
+
+ update_ctrl_msg(&mld->ap2cp_united_status, 1, mc->sbi_ap_status_mask,
+ mc->sbi_ap_status_pos);
+ mif_info("ap_status=%u\n", extract_ctrl_msg(&mld->ap2cp_united_status,
+ mc->sbi_ap_status_mask, mc->sbi_ap_status_pos));
+
+ update_ctrl_msg(&mld->ap2cp_united_status, 1, mc->sbi_pda_active_mask,
+ mc->sbi_pda_active_pos);
+ mif_info("ap_united_status:0x%08x\n", get_ctrl_msg(&mld->ap2cp_united_status));
+
+ mif_info("---\n");
+ return 0;
+}
+
+static int complete_normal_boot(struct modem_ctl *mc)
+{
+ unsigned long remain;
+ int err = 0;
+#if IS_ENABLED(CONFIG_CP_LCD_NOTIFIER)
+ int ret;
+ struct modem_data *modem = mc->mdm_data;
+ struct mem_link_device *mld = modem->mld;
+#endif
+
+ mif_info("+++\n");
+
+ err = modem_ctrl_check_offset_data(mc);
+ if (err) {
+ mif_err("modem_ctrl_check_offset_data() error:%d\n", err);
+ goto exit;
+ }
+
+ reinit_completion(&mc->init_cmpl);
+ remain = wait_for_completion_timeout(&mc->init_cmpl, MIF_INIT_TIMEOUT);
+ if (remain == 0) {
+ mif_err("T-I-M-E-O-U-T\n");
+ err = -EAGAIN;
+ goto exit;
+ }
+
+ err = modem_ctrl_check_offset_data(mc);
+ if (err) {
+ mif_err("modem_ctrl_check_offset_data() error:%d\n", err);
+ goto exit;
+ }
+
+ mif_enable_irq(&mc->irq_cp_wdt);
+
+ change_modem_state(mc, STATE_ONLINE);
+
+#if IS_ENABLED(CONFIG_CP_LCD_NOTIFIER)
+ if (mc->lcd_notifier.notifier_call == NULL) {
+ mif_info("Register lcd notifier\n");
+ mc->lcd_notifier.notifier_call = s5000ap_lcd_notifier;
+ ret = register_lcd_status_notifier(&mc->lcd_notifier);
+ if (ret) {
+ mif_err("failed to register LCD notifier\n");
+ return ret;
+ }
+ }
+
+ mif_info("Set LCD_ON status\n");
+ update_ctrl_msg(&mld->ap2cp_united_status, 1, mc->sbi_lcd_status_mask,
+ mc->sbi_lcd_status_pos);
+#endif /* CONFIG_CP_LCD_NOTIFIER */
+
+ mif_info("---\n");
+
+exit:
+ return err;
+}
+
+static int trigger_cp_crash(struct modem_ctl *mc)
+{
+ struct link_device *ld = get_current_link(mc->bootd);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ u32 crash_type = ld->crash_reason.type;
+
+ mif_info("+++\n");
+
+ ld->link_trigger_cp_crash(mld, crash_type, "Forced crash is called");
+
+ mif_info("---\n");
+ return 0;
+}
+
+/*
+ * Notify AP crash status to CP
+ */
+static struct modem_ctl *g_mc;
+int modem_force_crash_exit_ext(void)
+{
+ struct link_device *ld = get_current_link(g_mc->bootd);
+
+ ld->crash_reason.type = CRASH_REASON_MIF_FORCED;
+
+ if (!g_mc) {
+ mif_debug("g_mc is null\n");
+ return -1;
+ }
+
+ mif_info("Make forced crash exit\n");
+ g_mc->ops.trigger_cp_crash(g_mc);
+
+ return 0;
+}
+EXPORT_SYMBOL(modem_force_crash_exit_ext);
+
+#if IS_ENABLED(CONFIG_CP_UART_NOTI)
+#if IS_ENABLED(CONFIG_PMU_UART_SWITCH)
+void send_uart_noti_to_modem(int val)
+{
+ struct modem_data *modem;
+ struct mem_link_device *mld;
+
+ if (!g_mc) {
+ mif_err("g_mc is NULL!\n");
+ return;
+ }
+
+ modem = g_mc->mdm_data;
+ mld = modem->mld;
+
+#if IS_ENABLED(CONFIG_PMU_UART_SWITCH)
+ switch (val) {
+ case MODEM_CTRL_UART_CP:
+ change_to_cp_uart();
+ break;
+ case MODEM_CTRL_UART_AP:
+ change_to_ap_uart();
+ break;
+ default:
+ mif_err("Invalid val:%d\n", val);
+ return;
+ }
+#endif
+
+ update_ctrl_msg(&mld->ap2cp_united_status, val, g_mc->sbi_uart_noti_mask,
+ g_mc->sbi_uart_noti_pos);
+ mif_info("val:%d ap_united_status:0x%08x\n", val, get_ctrl_msg(&mld->ap2cp_united_status));
+ cp_mbox_set_interrupt(CP_MBOX_IRQ_IDX_0, g_mc->int_uart_noti);
+}
+EXPORT_SYMBOL(send_uart_noti_to_modem);
+#endif /* CONFIG_PMU_UART_SWITCH */
+#endif /* CONFIG_CP_UART_NOTI */
+
+static int start_dump_boot(struct modem_ctl *mc)
+{
+ struct link_device *ld = get_current_link(mc->bootd);
+ struct modem_data *modem = mc->mdm_data;
+ struct mem_link_device *mld = modem->mld;
+ int cnt = 200;
+ int ret = 0;
+ int cp_status = 0;
+
+ mif_info("+++\n");
+
+ /* Change phone state to STATE_CRASH_EXIT */
+ change_modem_state(mc, STATE_CRASH_EXIT);
+
+ if (init_control_messages(mc))
+ mif_err("Failed to initialize mbox regs\n");
+
+ if (!ld->link_start_dump_boot) {
+ mif_err("%s: link_start_dump_boot is null\n", ld->name);
+ return -EFAULT;
+ }
+ ret = ld->link_start_dump_boot(ld, mc->bootd);
+ if (ret) {
+ mif_err("link_start_dump_boot() error:%d\n", ret);
+ return ret;
+ }
+
+ mif_info("cp_united_status:0x%08x\n", get_ctrl_msg(&mld->cp2ap_united_status));
+ mif_info("ap_united_status:0x%08x\n", get_ctrl_msg(&mld->ap2cp_united_status));
+
+ ret = modem_ctrl_check_offset_data(mc);
+ if (ret) {
+ mif_err("modem_ctrl_check_offset_data() error:%d\n", ret);
+ return ret;
+ }
+
+ if (mc->ap2cp_cfg_ioaddr) {
+ mif_info("Before setting AP2CP_CFG:0x%08x\n",
+ __raw_readl(mc->ap2cp_cfg_ioaddr));
+ __raw_writel(1, mc->ap2cp_cfg_ioaddr);
+ ret = __raw_readl(mc->ap2cp_cfg_ioaddr);
+ if (ret != 1) {
+ mif_err("AP2CP_CFG setting is not correct:%d\n", ret);
+ return -1;
+ }
+ mif_info("AP2CP_CFG is ok:0x%08x\n", ret);
+ } else {
+ cal_cp_reset_release();
+ }
+
+ while (extract_ctrl_msg(&mld->cp2ap_united_status, mld->sbi_cp_status_mask,
+ mld->sbi_cp_status_pos) == 0) {
+ if (--cnt > 0) {
+ usleep_range(10000, 20000);
+ } else {
+ mif_err("cp_status error:%d\n", extract_ctrl_msg(&mld->cp2ap_united_status,
+ mld->sbi_cp_status_mask, mld->sbi_cp_status_pos));
+ return -EACCES;
+ }
+ }
+
+ cp_status = extract_ctrl_msg(&mld->cp2ap_united_status,
+ mld->sbi_cp_status_mask, mld->sbi_cp_status_pos);
+ mif_info("cp_status=%u\n", cp_status);
+
+ update_ctrl_msg(&mld->ap2cp_united_status, 1, mc->sbi_ap_status_mask,
+ mc->sbi_ap_status_pos);
+ mif_info("ap_status=%u\n", extract_ctrl_msg(&mld->ap2cp_united_status,
+ mc->sbi_ap_status_mask, mc->sbi_cp_status_pos));
+
+ update_ctrl_msg(&mld->ap2cp_united_status, 1, mc->sbi_pda_active_mask,
+ mc->sbi_pda_active_pos);
+ mif_info("ap_united_status:0x%08x\n", get_ctrl_msg(&mld->ap2cp_united_status));
+
+ ret = modem_ctrl_check_offset_data(mc);
+ if (ret) {
+ mif_err("modem_ctrl_check_offset_data() error:%d\n", ret);
+ return ret;
+ }
+
+ mif_info("---\n");
+ return 0;
+}
+
+static int suspend_cp(struct modem_ctl *mc)
+{
+ struct modem_data *modem = mc->mdm_data;
+ struct mem_link_device *mld = modem->mld;
+
+ mld->link_dev.stop_timers(mld);
+
+ modem_ctrl_set_kerneltime(mc);
+
+ mif_info("%s: pda_active:0\n", mc->name);
+
+ update_ctrl_msg(&mld->ap2cp_united_status, 0, mc->sbi_pda_active_mask,
+ mc->sbi_pda_active_pos);
+
+ cp_mbox_set_interrupt(CP_MBOX_IRQ_IDX_0, mc->int_pda_active);
+
+ return 0;
+}
+
+static int resume_cp(struct modem_ctl *mc)
+{
+ struct modem_data *modem = mc->mdm_data;
+ struct mem_link_device *mld = modem->mld;
+
+ modem_ctrl_set_kerneltime(mc);
+
+ mif_info("%s: pda_active:1\n", mc->name);
+
+ update_ctrl_msg(&mld->ap2cp_united_status, 1, mc->sbi_pda_active_mask,
+ mc->sbi_pda_active_pos);
+
+ cp_mbox_set_interrupt(CP_MBOX_IRQ_IDX_0, mc->int_pda_active);
+
+ mld->link_dev.start_timers(mld);
+
+ return 0;
+}
+
+static void s5000ap_get_ops(struct modem_ctl *mc)
+{
+ mc->ops.power_on = power_on_cp;
+ mc->ops.power_off = power_off_cp;
+ mc->ops.power_shutdown = power_shutdown_cp;
+ mc->ops.power_reset = power_reset_cp;
+ mc->ops.power_reset_dump = power_reset_dump_cp;
+
+ mc->ops.start_normal_boot = start_normal_boot;
+ mc->ops.complete_normal_boot = complete_normal_boot;
+
+ mc->ops.trigger_cp_crash = trigger_cp_crash;
+ mc->ops.start_dump_boot = start_dump_boot;
+
+ mc->ops.suspend = suspend_cp;
+ mc->ops.resume = resume_cp;
+}
+
+static void s5000ap_get_pdata(struct modem_ctl *mc, struct modem_data *modem)
+{
+ struct modem_mbox *mbx = modem->mbx;
+
+ mc->int_pda_active = mbx->int_ap2cp_active;
+ mc->int_cp_wakeup = mbx->int_ap2cp_wakeup;
+ mc->int_uart_noti = mbx->int_ap2cp_uart_noti;
+ mc->irq_phone_active = mbx->irq_cp2ap_active;
+
+ mc->sbi_lte_active_mask = modem->sbi_lte_active_mask;
+ mc->sbi_lte_active_pos = modem->sbi_lte_active_pos;
+ mc->sbi_cp_status_mask = modem->sbi_cp_status_mask;
+ mc->sbi_cp_status_pos = modem->sbi_cp_status_pos;
+
+ mc->sbi_pda_active_mask = modem->sbi_pda_active_mask;
+ mc->sbi_pda_active_pos = modem->sbi_pda_active_pos;
+ mc->sbi_ap_status_mask = modem->sbi_ap_status_mask;
+ mc->sbi_ap_status_pos = modem->sbi_ap_status_pos;
+
+ mc->sbi_uart_noti_mask = modem->sbi_uart_noti_mask;
+ mc->sbi_uart_noti_pos = modem->sbi_uart_noti_pos;
+
+ mc->sbi_crash_type_mask = modem->sbi_crash_type_mask;
+ mc->sbi_crash_type_pos = modem->sbi_crash_type_pos;
+
+ mc->sbi_ds_det_mask = modem->sbi_ds_det_mask;
+ mc->sbi_ds_det_pos = modem->sbi_ds_det_pos;
+
+#if IS_ENABLED(CONFIG_CP_LCD_NOTIFIER)
+ mc->sbi_lcd_status_mask = modem->sbi_lcd_status_mask;
+ mc->sbi_lcd_status_pos = modem->sbi_lcd_status_pos;
+ mc->int_lcd_status = mbx->int_ap2cp_lcd_status;
+#endif
+}
+
+static int send_panic_to_cp_notifier(struct notifier_block *nb,
+ unsigned long action, void *nb_data)
+{
+ struct modem_data *modem;
+
+ if (!g_mc) {
+ mif_err("g_mc is null\n");
+ return -1;
+ }
+
+ modem = g_mc->mdm_data;
+ if (!modem->mld) {
+ mif_err("modem->mld is null\n");
+ return -1;
+ }
+
+ mif_info("Send CMD_KERNEL_PANIC message to CP\n");
+ send_ipc_irq(modem->mld, cmd2int(CMD_KERNEL_PANIC));
+
+ return NOTIFY_DONE;
+}
+
+#if IS_ENABLED(CONFIG_EXYNOS_ITMON)
+static int cp_itmon_notifier(struct notifier_block *nb,
+ unsigned long action, void *nb_data)
+{
+ struct modem_ctl *modemctl;
+ struct itmon_notifier *itmon_data = nb_data;
+
+ modemctl = container_of(nb, struct modem_ctl, itmon_nb);
+
+ if (IS_ERR_OR_NULL(itmon_data))
+ return NOTIFY_DONE;
+
+ if (itmon_data->port &&
+ (strncmp("CP_", itmon_data->port, sizeof("CP_") - 1) == 0 ||
+ strncmp("MODEM", itmon_data->port, sizeof("MODEM") - 1) == 0) &&
+ itmon_data->errcode == 1) { /* force cp crash when decode error */
+ mif_info("CP itmon notifier: cp crash request complete\n");
+ modem_force_crash_exit_ext();
+ }
+
+ return NOTIFY_DONE;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_CP_LCD_NOTIFIER)
+static int s5000ap_lcd_notifier(struct notifier_block *notifier,
+ unsigned long event, void *v)
+{
+ struct modem_ctl *mc =
+ container_of(notifier, struct modem_ctl, lcd_notifier);
+ struct modem_data *modem = mc->mdm_data;
+ struct mem_link_device *mld = modem->mld;
+
+ switch (event) {
+ case LCD_OFF:
+ mif_info("LCD_OFF Notification\n");
+ modem_ctrl_set_kerneltime(mc);
+ update_ctrl_msg(&mld->ap2cp_united_status, 0,
+ mc->sbi_lcd_status_mask,
+ mc->sbi_lcd_status_pos);
+ cp_mbox_set_interrupt(CP_MBOX_IRQ_IDX_0, mc->int_lcd_status);
+ break;
+
+ case LCD_ON:
+ mif_info("LCD_ON Notification\n");
+ modem_ctrl_set_kerneltime(mc);
+ update_ctrl_msg(&mld->ap2cp_united_status, 1,
+ mc->sbi_lcd_status_mask,
+ mc->sbi_lcd_status_pos);
+ cp_mbox_set_interrupt(CP_MBOX_IRQ_IDX_0, mc->int_lcd_status);
+ break;
+
+ default:
+ mif_info("lcd_event %ld\n", event);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+#endif /* CONFIG_CP_LCD_NOTIFIER */
+
+int s5000ap_init_modemctl_device(struct modem_ctl *mc, struct modem_data *pdata)
+{
+ struct platform_device *pdev = to_platform_device(mc->dev);
+ struct device_node *np = pdev->dev.of_node;
+ int ret = 0;
+ unsigned int irq_num;
+ unsigned long flags = IRQF_NO_SUSPEND | IRQF_NO_THREAD | IRQF_ONESHOT;
+
+ mif_info("+++\n");
+
+ /* To notify AP crash status to CP */
+ g_mc = mc;
+
+ s5000ap_get_ops(mc);
+ s5000ap_get_pdata(mc, pdata);
+ dev_set_drvdata(mc->dev, mc);
+
+ /* Register CP_WDT */
+ irq_num = platform_get_irq(pdev, 0);
+ if (irq_num < 0) {
+ mif_err("platform_get_irq failed with(%d)", irq_num);
+ ret = irq_num;
+ goto err_irq;
+ }
+ mif_init_irq(&mc->irq_cp_wdt, irq_num, "cp_wdt", flags);
+ ret = mif_request_irq(&mc->irq_cp_wdt, cp_wdt_handler, mc);
+ if (ret) {
+ mif_err("Failed to request_irq with(%d)", ret);
+ goto err_irq;
+ }
+ /* CP_WDT interrupt must be enabled only after CP booting */
+ mif_disable_irq(&mc->irq_cp_wdt);
+
+ /* Register LTE_ACTIVE mailbox interrupt */
+ ret = cp_mbox_register_handler(CP_MBOX_IRQ_IDX_0, mc->irq_phone_active,
+ cp_active_handler, mc);
+ if (ret) {
+ mif_err("Failed to cp_mbox_register_handler %u with(%d)",
+ mc->irq_phone_active, ret);
+ goto err_mbox_register;
+ }
+
+ init_completion(&mc->init_cmpl);
+ init_completion(&mc->off_cmpl);
+#if IS_ENABLED(CONFIG_CP_PKTPROC_CLAT)
+ init_completion(&mc->clatinfo_ack);
+#endif
+
+ /* AP2CP_CFG */
+ mif_dt_read_u32_noerr(np, "ap2cp_cfg_addr", mc->ap2cp_cfg_addr);
+ if (mc->ap2cp_cfg_addr) {
+ mif_info("AP2CP_CFG:0x%08x\n", mc->ap2cp_cfg_addr);
+ mc->ap2cp_cfg_ioaddr = devm_ioremap(mc->dev, mc->ap2cp_cfg_addr, SZ_64);
+ if (mc->ap2cp_cfg_ioaddr == NULL) {
+ mif_err("%s: AP2CP_CFG ioremap failed.\n", __func__);
+ ret = -EACCES;
+ goto err_cfg_ioaddr;
+ }
+ }
+
+ /* Register panic notifier_call*/
+ mc->send_panic_nb.notifier_call = send_panic_to_cp_notifier;
+ ret = atomic_notifier_chain_register(&panic_notifier_list, &mc->send_panic_nb);
+ if (ret < 0) {
+ mif_err("failed to register panic notifier_call\n");
+ goto err_cfg_ioaddr;
+ }
+
+#if !IS_ENABLED(CONFIG_GPIO_DS_DETECT)
+ if (sysfs_create_group(&pdev->dev.kobj, &sim_group))
+ mif_err("failed to create sysfs node related sim\n");
+#endif
+
+#if IS_ENABLED(CONFIG_EXYNOS_ITMON)
+ mc->itmon_nb.notifier_call = cp_itmon_notifier;
+ itmon_notifier_chain_register(&mc->itmon_nb);
+#endif
+ mif_info("---\n");
+ return 0;
+
+err_cfg_ioaddr:
+ cp_mbox_unregister_handler(CP_MBOX_IRQ_IDX_0, mc->irq_phone_active, cp_active_handler);
+err_mbox_register:
+ mif_free_irq(&mc->irq_cp_wdt, mc);
+err_irq:
+ g_mc = NULL;
+ return ret;
+}
+
+void s5000ap_uninit_modemctl_device(struct modem_ctl *mc, struct modem_data *pdata)
+{
+#if IS_ENABLED(CONFIG_EXYNOS_ITMON)
+ itmon_notifier_chain_unregister(&mc->itmon_nb);
+#endif
+#if !IS_ENABLED(CONFIG_GPIO_DS_DIRECT)
+ sysfs_remove_group(&mc->dev.kobj, &sim_group);
+#endif
+ atomic_notifier_chain_unregister(&panic_notifier_list, &mc->send_panic_nb);
+ cp_mbox_unregister_handler(CP_MBOX_IRQ_IDX_0, mc->irq_phone_active, cp_active_handler);
+ mif_free_irq(&mc->irq_cp_wdt, mc);
+ g_mc = NULL;
+}
diff --git a/modem_ctrl_s5100.c b/modem_ctrl_s5100.c
new file mode 100644
index 0000000..0dc6bdb
--- /dev/null
+++ b/modem_ctrl_s5100.c
@@ -0,0 +1,3156 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010 Samsung Electronics.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/clk.h>
+#include <linux/pci.h>
+#include <linux/regulator/consumer.h>
+#include <soc/google/acpm_mfd.h>
+#include <soc/google/modem_notifier.h>
+#include <linux/reboot.h>
+#include <linux/suspend.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/panic_notifier.h>
+#include <linux/s5910.h>
+
+#include <linux/exynos-pci-ctrl.h>
+#include <linux/shm_ipc.h>
+#include <dt-bindings/pci/pci.h>
+#include <misc/sbbm.h>
+
+#include "modem_prj.h"
+#include "modem_utils.h"
+#include "modem_ctrl.h"
+#include "link_device.h"
+#include "link_device_memory.h"
+#include "s51xx_pcie.h"
+#if IS_ENABLED(CONFIG_CP_PMIC)
+#include "cp_pmic.h"
+#endif
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+#include "link_device_pcie_iommu.h"
+#endif
+#if IS_ENABLED(CONFIG_CP_LCD_NOTIFIER)
+#include "../../../video/fbdev/exynos/dpu30/decon.h"
+static int s5100_lcd_notifier(struct notifier_block *notifier,
+ unsigned long event, void *v);
+#endif /* CONFIG_CP_LCD_NOTIFIER */
+
+#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
+
+#define RUNTIME_PM_AFFINITY_CORE 2
+
+#define DEFAULT_TP_THRESHOLD 500 /* Mbps */
+#define DEFAULT_TP_HYSTERESIS 100 /* Mbps */
+#define CRASH_WAKELOCK_TIMEOUT_MS 10000
+
+static struct modem_ctl *g_mc;
+
+static int s5100_poweroff_pcie(struct modem_ctl *mc, bool force_off);
+
+static int s5100_reboot_handler(struct notifier_block *nb,
+ unsigned long l, void *p)
+{
+ struct modem_ctl *mc = container_of(nb, struct modem_ctl, reboot_nb);
+
+ mif_info("Now is device rebooting..\n");
+
+ mutex_lock(&mc->pcie_check_lock);
+ mc->device_reboot = true;
+ mutex_unlock(&mc->pcie_check_lock);
+
+ return 0;
+}
+
+static void print_mc_state(struct modem_ctl *mc)
+{
+ int pwr = mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_AP2CP_CP_PWR], false);
+ int reset = mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_AP2CP_NRESET], false);
+ int pshold = mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_CP2AP_PS_HOLD], false);
+
+ int ap_wakeup = mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_CP2AP_WAKEUP], false);
+ int cp_wakeup = mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_AP2CP_WAKEUP], false);
+
+ int dump = mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_AP2CP_DUMP_NOTI], false);
+ int ap_status = mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_AP2CP_AP_ACTIVE], false);
+ int phone_active = mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_CP2AP_CP_ACTIVE], false);
+ int wrst = mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_CP2AP_CP_WRST_N], false);
+ int partial_reset = mif_gpio_get_value(
+ &mc->cp_gpio[CP_GPIO_AP2CP_PARTIAL_RST_N],false);
+
+ logbuffer_log(mc->log,
+ "%s: %ps:GPIO pwr:%d rst:%d phd:%d c2aw:%d a2cw:%d dmp:%d ap_act:%d cp_act:%d wrst:%d prst:%d",
+ mc->name, CALLER, pwr, reset, pshold, ap_wakeup, cp_wakeup, dump,
+ ap_status, phone_active, wrst, partial_reset);
+}
+
+static void print_msi_space(struct modem_ctl *mc)
+{
+ struct link_device *ld = get_current_link(mc->bootd);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ void __iomem *msi_address = mld->msi_reg_base;
+ int i = 0;
+
+ logbuffer_log(mc->log, "<%ps> offset: 0 4 8 C\n", CALLER);
+ for (i = 0x30; i < 0x50; i += 0x10) {
+ logbuffer_log(mc->log,
+ "MSI %04x: %08x %08x %08x %08x\n",
+ i,
+ ioread32(msi_address + i + 0x0),
+ ioread32(msi_address + i + 0x4),
+ ioread32(msi_address + i + 0x8),
+ ioread32(msi_address + i + 0xC));
+ }
+}
+
+void print_ep_config_space(struct modem_ctl *mc)
+{
+ int i;
+ u32 val1, val2, val3, val4;
+
+ logbuffer_log(mc->log, "<%ps> offset: 0 4 8 C\n", CALLER);
+ for (i = 0x100; i < 0x120; i += 0x10) {
+ pci_read_config_dword(mc->s51xx_pdev, i, &val1);
+ pci_read_config_dword(mc->s51xx_pdev, i + 0x4, &val2);
+ pci_read_config_dword(mc->s51xx_pdev, i + 0x8, &val3);
+ pci_read_config_dword(mc->s51xx_pdev, i + 0xC, &val4);
+ logbuffer_log(mc->log, "EP_CFG %04x: %08x %08x %08x %08x\n",
+ i, val1, val2, val3, val4);
+ }
+}
+
+void print_doorbell_region(struct modem_ctl *mc)
+{
+ struct s51xx_pcie *s51xx_pcie = pci_get_drvdata(mc->s51xx_pdev);
+ u32 i;
+
+ logbuffer_log(mc->log, "doorbell_addr = %#lx (PHYSICAL %#lx)\n",
+ (unsigned long)s51xx_pcie->doorbell_addr,
+ (unsigned long)s51xx_pcie->dbaddr_base);
+
+ logbuffer_log(mc->log, "<%ps> offset: 0 4 8 C\n", CALLER);
+ for (i = 0x9200; i < 0x9280; i+= 0x10) {
+ logbuffer_log(mc->log,
+ "DB %04x: %08x %08x %08x %08x\n",
+ i,
+ ioread32(s51xx_pcie->doorbell_addr + i + 0x0),
+ ioread32(s51xx_pcie->doorbell_addr + i + 0x4),
+ ioread32(s51xx_pcie->doorbell_addr + i + 0x8),
+ ioread32(s51xx_pcie->doorbell_addr + i + 0xC));
+ }
+}
+
+static void pcie_clean_dislink(struct modem_ctl *mc)
+{
+#if IS_ENABLED(CONFIG_CPIF_AP_SUSPEND_DURING_VOICE_CALL)
+ if (mc->pcie_voice_call_on) {
+ modem_notify_event(MODEM_EVENT_RESET, mc);
+ mc->pcie_voice_call_on = false;
+ } else {
+ modem_notify_event(MODEM_EVENT_OFFLINE, mc);
+ }
+#endif
+
+ if (mc->pcie_powered_on)
+ s5100_poweroff_pcie(mc, true);
+
+ if (!mc->pcie_powered_on)
+ mif_err("Link is disconnected!!!\n");
+}
+
+static void cp2ap_wakeup_work(struct work_struct *work)
+{
+ struct modem_ctl *mc = container_of(work, struct modem_ctl, wakeup_work);
+ static ktime_t cp2ap_wakeup_time;
+ unsigned long flags;
+
+ if (mc->phone_state == STATE_CRASH_EXIT)
+ return;
+
+ cp2ap_wakeup_time = ktime_get_boottime();
+
+ spin_lock_irqsave(&mc->power_stats_lock, flags);
+ if (mc->cp_power_stats.suspended) {
+ mc->cp_power_stats.last_exit_timestamp_usec = ktime_to_us(cp2ap_wakeup_time);
+ mc->cp_power_stats.duration_usec += (mc->cp_power_stats.last_exit_timestamp_usec -
+ mc->cp_power_stats.last_entry_timestamp_usec);
+ }
+ mc->cp_power_stats.suspended = false;
+ spin_unlock_irqrestore(&mc->power_stats_lock, flags);
+
+ s5100_poweron_pcie(mc, LINK_MODE_ADAPTIVE_SPEED_BOOTED);
+}
+
+static void cp2ap_suspend_work(struct work_struct *work)
+{
+ struct modem_ctl *mc = container_of(work, struct modem_ctl, suspend_work);
+ static ktime_t cp2ap_suspend_time;
+ unsigned long flags;
+
+ if (mc->phone_state == STATE_CRASH_EXIT)
+ return;
+
+ cp2ap_suspend_time = ktime_get_boottime();
+
+ spin_lock_irqsave(&mc->power_stats_lock, flags);
+ if (!mc->cp_power_stats.suspended) {
+ mc->cp_power_stats.last_entry_timestamp_usec = ktime_to_us(cp2ap_suspend_time);
+ mc->cp_power_stats.count++;
+ }
+ mc->cp_power_stats.suspended = true;
+ spin_unlock_irqrestore(&mc->power_stats_lock, flags);
+
+ s5100_poweroff_pcie(mc, false);
+}
+
+static ssize_t power_stats_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct modem_ctl *mc = dev_get_drvdata(dev);
+ ssize_t count = 0;
+ unsigned long flags;
+ u64 adjusted_duration_usec = mc->cp_power_stats.duration_usec;
+
+ spin_lock_irqsave(&mc->power_stats_lock, flags);
+ if (mc->cp_power_stats.suspended) {
+ u64 now_usec = ktime_to_us(ktime_get_boottime());
+ adjusted_duration_usec += now_usec -
+ mc->cp_power_stats.last_entry_timestamp_usec;
+ }
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count, "SLEEP:\n");
+ count += scnprintf(&buf[count], PAGE_SIZE - count, " count: 0x%llx\n",
+ mc->cp_power_stats.count);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, " duration_usec: 0x%llx\n",
+ adjusted_duration_usec);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, " last_entry_timestamp_usec: 0x%llx\n",
+ mc->cp_power_stats.last_entry_timestamp_usec);
+ count += scnprintf(&buf[count], PAGE_SIZE - count, " last_exit_timestamp_usec: 0x%llx\n",
+ mc->cp_power_stats.last_exit_timestamp_usec);
+ spin_unlock_irqrestore(&mc->power_stats_lock, flags);
+
+ return count;
+}
+
+#define MAX_PCIE_EVENT_HISTORY 10
+static int pcie_linkdown_history[MAX_PCIE_EVENT_HISTORY];
+static int pcie_linkdown_count;
+static int pcie_cto_history[MAX_PCIE_EVENT_HISTORY];
+static int pcie_cto_count;
+static ssize_t pcie_event_stats_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct modem_ctl *mc = dev_get_drvdata(dev);
+ ssize_t count = 0, i = 0;
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "Total linkdown retries: %d\n",
+ mc->pcie_linkdown_retry_cnt_all);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "Total CPL timeout retries: %d\n",
+ mc->pcie_cto_retry_cnt_all);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "Previous linkdown retries: %d\n",
+ mc->pcie_linkdown_retry_cnt);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "Previous CPL timeout retries: %d\n",
+ mc->pcie_cto_retry_cnt);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "Previous %d linkdown retries:",
+ MAX_PCIE_EVENT_HISTORY);
+ for (i = 0; i < MAX_PCIE_EVENT_HISTORY; i++) {
+ count += scnprintf(&buf[count], PAGE_SIZE - count, " %d",
+ pcie_linkdown_history[i]);
+ }
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "\nTotal linkdown retries recorded: %d\n",
+ pcie_linkdown_count);
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "Previous %d CPL timeout retries:", MAX_PCIE_EVENT_HISTORY);
+ for (i = 0; i < MAX_PCIE_EVENT_HISTORY; i++) {
+ count += scnprintf(&buf[count], PAGE_SIZE - count, " %d",
+ pcie_cto_history[i]);
+ }
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "\nTotal CPL timeout retries recorded: %d\n",
+ pcie_cto_count);
+
+ return count;
+}
+
+static ssize_t sbb_debug_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct modem_ctl *mc = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", mc->sbb_debug);
+}
+
+static ssize_t sbb_debug_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ int value;
+ struct modem_ctl *mc = dev_get_drvdata(dev);
+
+ ret = kstrtoint(buf, 0, &value);
+ if (ret != 0)
+ return -EINVAL;
+
+ if (value < 0 || value > 1)
+ return -EINVAL;
+
+ mc->sbb_debug = value;
+
+ return count;
+}
+
+static DEVICE_ATTR_RO(power_stats);
+static DEVICE_ATTR_RO(pcie_event_stats);
+static DEVICE_ATTR_RW(sbb_debug);
+
+static struct attribute *modem_attrs[] = {
+ &dev_attr_power_stats.attr,
+ &dev_attr_pcie_event_stats.attr,
+ &dev_attr_sbb_debug.attr,
+ NULL,
+};
+
+static const struct attribute_group modem_group = {
+ .attrs = modem_attrs,
+ .name = "modem",
+};
+
+#if IS_ENABLED(CONFIG_CPIF_AP_SUSPEND_DURING_VOICE_CALL)
+static void voice_call_on_work(struct work_struct *work)
+{
+ struct modem_ctl *mc = container_of(work, struct modem_ctl, call_on_work);
+
+ mutex_lock(&mc->pcie_check_lock);
+ if (!mc->pcie_voice_call_on)
+ goto exit;
+
+ if (mc->pcie_powered_on &&
+ (s51xx_check_pcie_link_status(mc->pcie_ch_num) != 0)) {
+ if (cpif_wake_lock_active(mc->ws)) {
+ mif_info("voice call on release wakelock\n");
+ cpif_wake_unlock(mc->ws);
+ }
+ }
+
+exit:
+ mif_info("wakelock active = %d, voice status = %d\n",
+ cpif_wake_lock_active(mc->ws), mc->pcie_voice_call_on);
+ mutex_unlock(&mc->pcie_check_lock);
+}
+
+static void voice_call_off_work(struct work_struct *work)
+{
+ struct modem_ctl *mc = container_of(work, struct modem_ctl, call_off_work);
+
+ mutex_lock(&mc->pcie_check_lock);
+ if (mc->pcie_voice_call_on)
+ goto exit;
+
+ if (mc->pcie_powered_on &&
+ (s51xx_check_pcie_link_status(mc->pcie_ch_num) != 0)) {
+ if (!cpif_wake_lock_active(mc->ws)) {
+ mif_info("voice call off acquire wakelock\n");
+ cpif_wake_lock(mc->ws);
+ }
+ }
+
+exit:
+ mif_info("wakelock active = %d, voice status = %d\n",
+ cpif_wake_lock_active(mc->ws), mc->pcie_voice_call_on);
+ mutex_unlock(&mc->pcie_check_lock);
+}
+#endif
+
+/* It means initial GPIO level. */
+static int check_link_order = 1;
+static irqreturn_t ap_wakeup_handler(int irq, void *data)
+{
+ struct modem_ctl *mc = data;
+ int gpio_val = mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_CP2AP_WAKEUP], true);
+ unsigned long flags;
+
+ if (mc->sbb_debug)
+ SBBM_SIGNAL_UPDATE(SBB_SIG_MODEM_CP2AP_WAKE_ISR, 1);
+
+ mif_disable_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_WAKEUP]);
+
+ if (mc->mdm_data->mif_off_during_volte) {
+ int wrst_gpio_val = mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_CP2AP_CP_WRST_N], true);
+ /* To avoid holding on to the wakesource in case of a race condition */
+ if (wrst_gpio_val || !mc->pcie_voice_call_on)
+ cpif_wake_unlock(mc->ws_wrst);
+ }
+
+ if (mc->device_reboot) {
+ mif_err("skip : device is rebooting..!!!\n");
+ goto irq_handled;
+ }
+
+ if (gpio_val == check_link_order)
+ mif_err("cp2ap_wakeup val is the same with before : %d\n", gpio_val);
+ check_link_order = gpio_val;
+
+ spin_lock_irqsave(&mc->pcie_pm_lock, flags);
+ if (mc->pcie_pm_suspended) {
+ if (gpio_val == 1) {
+ /* try to block system suspend */
+ if (!cpif_wake_lock_active(mc->ws))
+ cpif_wake_lock(mc->ws);
+ }
+
+ mif_err("cp2ap_wakeup work pending. gpio_val : %d\n", gpio_val);
+ mc->pcie_pm_resume_wait = true;
+ mc->pcie_pm_resume_gpio_val = gpio_val;
+
+ spin_unlock_irqrestore(&mc->pcie_pm_lock, flags);
+ goto irq_handled;
+ }
+ spin_unlock_irqrestore(&mc->pcie_pm_lock, flags);
+
+ mc->apwake_irq_chip->irq_set_type(
+ irq_get_irq_data(mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_WAKEUP].num),
+ (gpio_val == 1 ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH));
+ mif_enable_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_WAKEUP]);
+
+ queue_work_on(RUNTIME_PM_AFFINITY_CORE, mc->wakeup_wq,
+ (gpio_val == 1 ? &mc->wakeup_work : &mc->suspend_work));
+
+irq_handled:
+ if (mc->sbb_debug)
+ SBBM_SIGNAL_UPDATE(SBB_SIG_MODEM_CP2AP_WAKE_ISR, 0);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t cp_active_handler(int irq, void *data)
+{
+ struct modem_ctl *mc = data;
+ struct link_device *ld;
+ struct mem_link_device *mld;
+ int cp_active;
+ enum modem_state old_state;
+ enum modem_state new_state;
+ struct legacy_link_device *bl;
+ struct legacy_ipc_device *ipc_dev;
+ int i;
+
+ if (mc == NULL) {
+ mif_err_limited("modem_ctl is NOT initialized - IGNORING interrupt\n");
+ goto irq_done;
+ }
+
+ ld = get_current_link(mc->iod);
+ mld = to_mem_link_device(ld);
+
+ if (mc->s51xx_pdev == NULL) {
+ mif_err_limited("S5100 is NOT initialized - IGNORING interrupt\n");
+ goto irq_done;
+ }
+
+ if (mc->phone_state != STATE_ONLINE) {
+ mif_err_limited("Phone_state is NOT ONLINE - IGNORING interrupt\n");
+ goto irq_done;
+ }
+
+ print_mc_state(mc);
+
+ cp_active = mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_CP2AP_CP_ACTIVE], true);
+ mif_err("[PHONE_ACTIVE Handler] state:%s cp_active:%d\n",
+ cp_state_str(mc->phone_state), cp_active);
+
+ if (cp_active == 1)
+ mif_err("ERROR - cp_active is not low, state:%s cp_active:%d\n",
+ cp_state_str(mc->phone_state), cp_active);
+
+ if (timer_pending(&mld->crash_ack_timer))
+ del_timer(&mld->crash_ack_timer);
+
+ mif_stop_logging();
+
+ old_state = mc->phone_state;
+ new_state = STATE_CRASH_EXIT;
+
+ if (ld->crash_reason.type == CRASH_REASON_NONE)
+ ld->crash_reason.type = CRASH_REASON_CP_ACT_CRASH;
+
+ mc->s5100_cp_reset_required = false;
+ mif_info("Set s5100_cp_reset_required to 0\n");
+
+ if (old_state != new_state) {
+ mif_err("new_state = %s\n", cp_state_str(new_state));
+
+ if (old_state == STATE_ONLINE)
+ modem_notify_event(MODEM_EVENT_EXIT, mc);
+
+ change_modem_state(mc, new_state);
+ }
+
+ bl = &mld->legacy_link_dev;
+
+ for (i = 0; i < IPC_MAP_MAX; i++) {
+ ipc_dev = bl->dev[i];
+ mif_info("%s TX: head:%d tail:%d, RX: head: %d tail:%d\n",
+ ipc_dev->name, get_txq_head(ipc_dev), get_txq_tail(ipc_dev),
+ get_rxq_head(ipc_dev), get_rxq_tail(ipc_dev));
+ }
+
+ atomic_set(&mld->forced_cp_crash, 0);
+
+irq_done:
+ mif_disable_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_ACTIVE]);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t cp_wrst_handler(int irq, void *data)
+{
+ struct modem_ctl *mc = data;
+ int gpio_val = mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_CP2AP_CP_WRST_N], true);
+
+ mif_disable_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_WRST_N]);
+
+ if (gpio_val == 0) {
+ logbuffer_log(mc->log, "acquire wrst lock\n");
+ cpif_wake_lock(mc->ws_wrst);
+ } else {
+ logbuffer_log(mc->log, "release wrst lock\n");
+ cpif_wake_unlock(mc->ws_wrst);
+ }
+
+ mc->cp_wrst_irq_chip->irq_set_type(
+ irq_get_irq_data(mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_WRST_N].num),
+ (gpio_val == 1 ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH));
+ mif_enable_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_WRST_N]);
+
+ return IRQ_HANDLED;
+}
+
+static int register_cp_wrst_interrupt(struct modem_ctl *mc)
+{
+ int ret;
+
+ if (mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_WRST_N].registered)
+ return 0;
+
+ mif_info("Register CP_WRST interrupt.\n");
+ mif_init_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_WRST_N],
+ mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_WRST_N].num,
+ "cp_wrst", IRQF_TRIGGER_LOW);
+
+ ret = mif_request_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_WRST_N],
+ cp_wrst_handler, mc);
+ if (ret)
+ mif_err("%s: ERR! request_irq(%s#%d) fail (%d)\n",
+ mc->name, mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_WRST_N].name,
+ mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_WRST_N].num, ret);
+
+ return ret;
+}
+
+static int register_phone_active_interrupt(struct modem_ctl *mc)
+{
+ int ret;
+
+ if (mc == NULL)
+ return -EINVAL;
+
+ if (mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_ACTIVE].registered)
+ return 0;
+
+ mif_info("Register PHONE ACTIVE interrupt.\n");
+ mif_init_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_ACTIVE],
+ mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_ACTIVE].num,
+ "phone_active", IRQF_TRIGGER_LOW);
+
+ ret = mif_request_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_ACTIVE], cp_active_handler, mc);
+ if (ret) {
+ mif_err("%s: ERR! request_irq(%s#%d) fail (%d)\n",
+ mc->name, mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_ACTIVE].name,
+ mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_ACTIVE].num, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int register_cp2ap_wakeup_interrupt(struct modem_ctl *mc)
+{
+ int ret;
+
+ if (mc == NULL)
+ return -EINVAL;
+
+ if (mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_WAKEUP].registered) {
+ mif_info("Set IRQF_TRIGGER_LOW to cp2ap_wakeup gpio\n");
+ check_link_order = 1;
+ ret = mc->apwake_irq_chip->irq_set_type(
+ irq_get_irq_data(mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_WAKEUP].num),
+ IRQF_TRIGGER_LOW);
+ return ret;
+ }
+
+ mif_info("Register CP2AP WAKEUP interrupt.\n");
+ mif_init_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_WAKEUP],
+ mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_WAKEUP].num,
+ "cp2ap_wakeup", IRQF_TRIGGER_LOW);
+
+ ret = mif_request_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_WAKEUP], ap_wakeup_handler, mc);
+ if (ret) {
+ mif_err("%s: ERR! request_irq(%s#%d) fail (%d)\n",
+ mc->name, mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_WAKEUP].name,
+ mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_WAKEUP].num, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static ssize_t tp_threshold_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct modem_ctl *mc = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", mc->tp_threshold);
+}
+
+static ssize_t tp_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 threshold;
+ struct modem_ctl *mc = dev_get_drvdata(dev);
+
+ if (!buf || sscanf(buf, "%10u", &threshold) != 1)
+ return -EINVAL;
+
+ dev_info(dev, "Dynamic PCIe link speed: Change threshold to %dMbps\n", threshold);
+ mc->tp_threshold = threshold;
+
+ return count;
+}
+
+static ssize_t tp_hysteresis_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct modem_ctl *mc = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", mc->tp_hysteresis);
+}
+
+static ssize_t tp_hysteresis_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 hysteresis;
+ struct modem_ctl *mc = dev_get_drvdata(dev);
+
+ if (!buf || sscanf(buf, "%10u", &hysteresis) != 1)
+ return -EINVAL;
+
+ dev_info(dev, "Dynamic PCIe link speed: Change hysteresis to %dMbps\n", hysteresis);
+ mc->tp_hysteresis = hysteresis;
+
+ return count;
+}
+
+static ssize_t dynamic_spd_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct modem_ctl *mc = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", mc->pcie_dynamic_spd_enabled);
+}
+
+static ssize_t dynamic_spd_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int enable;
+ struct modem_ctl *mc = dev_get_drvdata(dev);
+
+ if (!buf || sscanf(buf, "%10u", &enable) != 1)
+ return -EINVAL;
+
+ dev_info(dev, "Dynamic PCIe link speed is %s\n", enable ? "enabled" : "not enabled");
+ mc->pcie_dynamic_spd_enabled = enable;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(tp_threshold);
+static DEVICE_ATTR_RW(tp_hysteresis);
+static DEVICE_ATTR_RW(dynamic_spd_enable);
+
+static struct attribute *dynamic_pcie_spd_attrs[] = {
+ &dev_attr_tp_threshold.attr,
+ &dev_attr_tp_hysteresis.attr,
+ &dev_attr_dynamic_spd_enable.attr,
+ NULL,
+};
+
+static const struct attribute_group dynamic_pcie_spd_group = {
+ .attrs = dynamic_pcie_spd_attrs,
+ .name = "dynamic_pcie_spd",
+};
+
+static int ds_detect = 2;
+module_param(ds_detect, int, 0664);
+MODULE_PARM_DESC(ds_detect, "Dual SIM detect");
+
+static ssize_t ds_detect_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ds_detect);
+}
+
+static ssize_t ds_detect_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ int value;
+
+ ret = kstrtoint(buf, 0, &value);
+ if (ret != 0) {
+ mif_err("invalid value:%d with %d\n", value, ret);
+ return -EINVAL;
+ }
+
+ ds_detect = value;
+ mif_info("set ds_detect: %d\n", ds_detect);
+
+ return count;
+}
+static DEVICE_ATTR_RW(ds_detect);
+
+static struct attribute *sim_attrs[] = {
+ &dev_attr_ds_detect.attr,
+ NULL,
+};
+
+static const struct attribute_group sim_group = {
+ .attrs = sim_attrs,
+ .name = "sim",
+};
+
+static ssize_t s5100_wake_lock_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct modem_ctl *mc = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", cpif_wake_lock_active(mc->ws));
+}
+
+static ssize_t s5100_wake_lock_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct modem_ctl *mc = dev_get_drvdata(dev);
+ long op_num;
+
+ if (kstrtol(buf, 0, &op_num))
+ return -EINVAL;
+
+ if (op_num)
+ cpif_wake_lock(mc->ws);
+ else
+ cpif_wake_unlock(mc->ws);
+
+ return count;
+}
+
+static ssize_t s5100_wrst_wake_lock_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct modem_ctl *mc = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", cpif_wake_lock_active(mc->ws_wrst));
+}
+
+static ssize_t s5100_wrst_wake_lock_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct modem_ctl *mc = dev_get_drvdata(dev);
+ long op_num;
+
+ if (kstrtol(buf, 0, &op_num))
+ return -EINVAL;
+
+ if (op_num)
+ cpif_wake_lock(mc->ws_wrst);
+ else
+ cpif_wake_unlock(mc->ws_wrst);
+
+ return count;
+}
+
+DEVICE_ATTR_RW(s5100_wake_lock);
+DEVICE_ATTR_RW(s5100_wrst_wake_lock);
+
+static int get_ds_detect(void)
+{
+ if (ds_detect > 2 || ds_detect < 1)
+ ds_detect = 2;
+
+ mif_info("Dual SIM detect = %d\n", ds_detect);
+ return ds_detect - 1;
+}
+
+static int init_control_messages(struct modem_ctl *mc)
+{
+ struct modem_data *modem = mc->mdm_data;
+ struct link_device *ld = get_current_link(mc->iod);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ int ds_det;
+
+ if (modem->offset_cmsg_offset)
+ iowrite32(modem->cmsg_offset, mld->cmsg_offset);
+ if (modem->offset_srinfo_offset)
+ iowrite32(modem->srinfo_offset, mld->srinfo_offset);
+ if (ld->capability_check && modem->offset_capability_offset)
+ iowrite32(modem->capability_offset, mld->capability_offset);
+
+ set_ctrl_msg(&mld->ap2cp_united_status, 0);
+ set_ctrl_msg(&mld->cp2ap_united_status, 0);
+
+ if (ld->capability_check) {
+ int part;
+
+ for (part = 0; part < AP_CP_CAP_PARTS; part++) {
+ iowrite32(0, mld->ap_capability_offset[part]);
+ iowrite32(0, mld->cp_capability_offset[part]);
+ }
+ }
+
+ ds_det = get_ds_detect();
+ if (ds_det < 0) {
+ mif_err("ds_det error:%d\n", ds_det);
+ return -EINVAL;
+ }
+
+ update_ctrl_msg(&mld->ap2cp_united_status, ds_det, mc->sbi_ds_det_mask,
+ mc->sbi_ds_det_pos);
+ mif_info("ds_det:%d\n", ds_det);
+
+ return 0;
+}
+
+static void set_pcie_msi_int(struct link_device *ld, bool enabled)
+{
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ int irq;
+ bool *irq_wake;
+#if IS_ENABLED(CONFIG_CP_PKTPROC)
+ struct pktproc_adaptor *ppa = &mld->pktproc;
+ unsigned int q_idx = 0;
+#endif
+
+ if (!mld->msi_irq_base)
+ return;
+
+ irq = mld->msi_irq_base;
+ irq_wake = &mld->msi_irq_base_wake;
+
+ do {
+ if (enabled) {
+ int err;
+
+ if (!mld->msi_irq_enabled)
+ enable_irq(irq);
+
+ if (!*irq_wake) {
+ err = enable_irq_wake(irq);
+ *irq_wake = !err;
+ }
+ } else {
+ if (mld->msi_irq_enabled)
+ disable_irq(irq);
+
+ if (*irq_wake) {
+ disable_irq_wake(irq);
+ *irq_wake = false;
+ }
+ }
+
+ irq = 0;
+#if IS_ENABLED(CONFIG_CP_PKTPROC)
+ if (q_idx < ppa->num_queue) {
+ struct pktproc_queue *q = ppa->q[q_idx];
+
+ irq = q->irq;
+ irq_wake = &q->msi_irq_wake;
+ q_idx++;
+ }
+#endif
+ } while (irq);
+
+ mld->msi_irq_enabled = enabled;
+}
+
+static int request_pcie_int(struct link_device *ld, struct platform_device *pdev)
+{
+#define DOORBELL_INT_MASK(x) ((x) | 0x10000)
+
+ int ret, base_irq;
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ struct device *dev = &pdev->dev;
+ struct modem_ctl *mc = ld->mc;
+ struct modem_data *modem = mc->mdm_data;
+ int irq_offset = 0;
+
+ /* Doorbell */
+ mld->intval_ap2cp_msg = DOORBELL_INT_MASK(modem->mbx->int_ap2cp_msg);
+ mld->intval_ap2cp_pcie_link_ack = DOORBELL_INT_MASK(modem->mbx->int_ap2cp_pcie_link_ack);
+
+ /* MSI */
+ base_irq = s51xx_pcie_request_msi_int(mc->s51xx_pdev, 4);
+ if (base_irq <= 0) {
+ mif_err("Can't get MSI IRQ!!!\n");
+ return -EFAULT;
+ }
+ mif_info("MSI base_irq(%d)\n", base_irq);
+
+ ret = devm_request_irq(dev, base_irq + irq_offset, shmem_irq_handler,
+ IRQF_SHARED, "mif_cp2ap_msg", mld);
+ if (ret) {
+ mif_err("Can't request cp2ap_msg interrupt!!!\n");
+ return -EIO;
+ }
+ irq_offset++;
+
+ ret = devm_request_irq(dev, base_irq + irq_offset, shmem_tx_state_handler,
+ IRQF_SHARED, "mif_cp2ap_status", mld);
+ if (ret) {
+ mif_err("Can't request cp2ap_status interrupt!!!\n");
+ return -EIO;
+ }
+ irq_offset++;
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC)
+ if (mld->pktproc.use_exclusive_irq) {
+ struct pktproc_adaptor *ppa = &mld->pktproc;
+ unsigned int i;
+
+ for (i = 0; i < ppa->num_queue; i++) {
+ struct pktproc_queue *q = ppa->q[i];
+
+ ret = pcie_register_separated_msi_vector(mc->pcie_ch_num, q->irq_handler,
+ q, &q->irq);
+ if (ret < 0) {
+ mif_err("register_separated_msi_vector for pktproc q[%u] err:%d\n",
+ i, ret);
+ q->irq = 0;
+ return -EIO;
+ }
+ }
+ }
+#endif
+
+ mld->msi_irq_base = base_irq;
+ mld->msi_irq_enabled = true;
+ set_pcie_msi_int(ld, true);
+
+ return base_irq;
+}
+
+static int register_pcie(struct link_device *ld)
+{
+ struct modem_ctl *mc = ld->mc;
+ struct platform_device *pdev = to_platform_device(mc->dev);
+ static int is_registered;
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ u32 cp_num = ld->mdm_data->cp_num;
+
+#if IS_ENABLED(CONFIG_GS_S2MPU)
+ u32 shmem_idx;
+ int ret;
+ struct device_node *s2mpu_dn;
+#endif
+ mif_info("CP EP driver initialization start.\n");
+
+ if (!mld->msi_reg_base && (mld->attrs & LINK_ATTR_XMIT_BTDLR_PCIE))
+ mld->msi_reg_base = cp_shmem_get_region(cp_num, SHMEM_MSI);
+
+#if IS_ENABLED(CONFIG_GS_S2MPU)
+
+ s2mpu_dn = of_parse_phandle(mc->dev->of_node, "s2mpu", 0);
+ if (!s2mpu_dn) {
+ mif_err("Failed to find s2mpu from device tree\n");
+ return -EINVAL;
+ }
+
+ mc->s2mpu = s2mpu_fwnode_to_info(&s2mpu_dn->fwnode);
+ if (!mc->s2mpu) {
+ mif_err("Failed to get S2MPU\n");
+ return -EPROBE_DEFER;
+ }
+
+ for (shmem_idx = 0 ; shmem_idx < MAX_CP_SHMEM ; shmem_idx++) {
+ if (shmem_idx == SHMEM_MSI && !(mld->attrs & LINK_ATTR_XMIT_BTDLR_PCIE))
+ continue;
+
+ if (cp_shmem_get_base(cp_num, shmem_idx)) {
+ ret = s2mpu_open(mc->s2mpu,
+ cp_shmem_get_base(cp_num, shmem_idx),
+ cp_shmem_get_size(cp_num, shmem_idx),
+ DMA_BIDIRECTIONAL);
+ if (ret) {
+ mif_err("S2MPU open failed error=%d\n", ret);
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* Also setup AoC window for voice calls */
+ ret = s2mpu_open(mc->s2mpu,
+ AOC_PCIE_WINDOW_START, AOC_PCIE_WINDOW_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ if (ret) {
+ mif_err("S2MPU AoC open failed error=%d\n", ret);
+ return -EINVAL;
+ }
+
+#endif
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+ if (exynos_pcie_is_sysmmu_enabled(mc->pcie_ch_num))
+ cpif_pcie_iommu_enable_regions(mld);
+#endif
+
+ msleep(200);
+
+ s5100_poweron_pcie(mc, LINK_MODE_MIN_SPEED_BOOTING);
+
+ if (is_registered == 0) {
+ /* initialize the pci_dev for modem_ctl */
+ mif_info("s51xx_pcie_init start\n");
+ s51xx_pcie_init(mc);
+ if (!mc->s51xx_pdev) {
+ mif_err("s51xx_pdev is NULL. Check if CP wake up is done.\n");
+ return -EINVAL;
+ }
+
+ /* debug: check MSI 32bit or 64bit - should be set as 32bit before this point*/
+ // debug: pci_read_config_dword(s51xx_pcie.s51xx_pdev, 0x50, &msi_val);
+ // debug: mif_err("MSI Control Reg : 0x%x\n", msi_val);
+
+ request_pcie_int(ld, pdev);
+ first_save_s51xx_status(mc->s51xx_pdev);
+
+ is_registered = 1;
+ } else {
+ if (mc->phone_state == STATE_CRASH_RESET) {
+ print_msi_register(mc->s51xx_pdev);
+ enable_irq(mld->msi_irq_base);
+ }
+ }
+
+ print_msi_register(mc->s51xx_pdev);
+ mc->pcie_registered = true;
+
+ mif_info("CP EP driver initialization end.\n");
+
+ return 0;
+}
+
+static void gpio_power_off_cp(struct modem_ctl *mc)
+{
+#if IS_ENABLED(CONFIG_CP_WRESET_WA)
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_NRESET], 0, 50);
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_CP_PWR], 0, 0);
+#else
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_WAKEUP], 1, 10);
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_WAKEUP], 0, 0);
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_NRESET], 0, 0);
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_CP_WRST_N], 0, 0);
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_CP_PWR], 0, 30);
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_PM_WRST_N], 0, 50);
+#endif
+}
+
+static void gpio_power_off_cp_with_s5910_on(struct modem_ctl *mc)
+{
+#if IS_ENABLED(CONFIG_CP_WRESET_WA)
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_NRESET], 0, 50);
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_CP_PWR], 0, 0);
+#else
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_WAKEUP], 1, 10);
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_WAKEUP], 0, 0);
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_NRESET], 0, 0);
+
+ /* Turn on S5910 clock buffer after AP resets CP */
+ if (mc->cp_ever_powered_on && mc->s5910_dev) {
+ udelay(10);
+ s5910_check_lpm_mode(mc->s5910_dev);
+ s5910_turn_on_sequence(mc->s5910_dev);
+ udelay(200);
+ s5910_check_lpm_mode(mc->s5910_dev);
+ }
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_CP_WRST_N], 0, 0);
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_CP_PWR], 0, 30);
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_PM_WRST_N], 0, 50);
+#endif
+}
+
+static void gpio_power_offon_cp(struct modem_ctl *mc)
+{
+ gpio_power_off_cp_with_s5910_on(mc);
+
+ mc->cp_ever_powered_on = true;
+
+#if IS_ENABLED(CONFIG_CP_WRESET_WA)
+ udelay(50);
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_CP_PWR], 1, 50);
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_NRESET], 1, 50);
+#else
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_PM_WRST_N], 1, 10);
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_CP_PWR], 1, 10);
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_NRESET], 1, 10);
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_CP_WRST_N], 1, 0);
+#endif
+}
+
+static void gpio_power_wreset_cp(struct modem_ctl *mc)
+{
+#if !IS_ENABLED(CONFIG_CP_WRESET_WA)
+ int i = 0, val;
+
+ mif_info("warm reset sequence start\n");
+ val = mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_CP2AP_CP_WRST_N],
+ false);
+ if (!val)
+ mif_err("cp2ap_cp_wrst level is low before warm reset\n");
+
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_PM_WRST_N], 1, 5);
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_CP_WRST_N], 0, 0);
+ if (!val)
+ udelay(1000);
+
+ while (i++ < 20) {
+ if (!mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_CP2AP_CP_WRST_N],
+ false))
+ break;
+ mif_info("Wait for cp2ap_cp_wrst pulled to low\n");
+ usleep_range(1000, 1100);
+ }
+
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_PM_WRST_N], 0, 5);
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_CP_WRST_N], 1, 45);
+
+ mif_info("warm reset sequence end\n");
+#endif
+}
+
+static void gpio_power_preset_cp(struct modem_ctl *mc)
+{
+ print_mc_state(mc);
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_PARTIAL_RST_N], 0, 50);
+ print_mc_state(mc);
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_PARTIAL_RST_N], 1, 50);
+ print_mc_state(mc);
+}
+
+static void clear_boot_stage(struct modem_ctl *mc)
+{
+ struct link_device *ld = get_current_link(mc->iod);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ u32 boot_stage, sub_boot_stage;
+
+ if (mld->attrs & LINK_ATTR_XMIT_BTDLR_PCIE) {
+ if (!mld->msi_reg_base) {
+ u32 cp_num = ld->mdm_data->cp_num;
+ mld->msi_reg_base = cp_shmem_get_region(cp_num, SHMEM_MSI);
+ if (!mld->msi_reg_base) {
+ mif_err("Failed to get valid msi reg base.\n");
+ return;
+ }
+ }
+
+ /* Clear the boot_stage and sub_boot_stage fields */
+ CLEAR_MSI_REG_FIELD(mld, boot_stage);
+ boot_stage = ioread32(mld->msi_reg_base +
+ offsetof(struct msi_reg_type, boot_stage));
+ CLEAR_MSI_REG_FIELD(mld, sub_boot_stage);
+ sub_boot_stage = ioread32(mld->msi_reg_base +
+ offsetof(struct msi_reg_type, sub_boot_stage));
+
+ /* Clear other boot stage related fields */
+ CLEAR_MSI_REG_FIELD(mld, flag_cafe);
+ CLEAR_MSI_REG_FIELD(mld, otp_version);
+ CLEAR_MSI_REG_FIELD(mld, db_loop_cnt);
+ CLEAR_MSI_REG_FIELD(mld, db_received);
+ CLEAR_MSI_REG_FIELD(mld, boot_size);
+
+ mif_info("Clear boot_stage, sub_boot_stage to %X, %X\n", boot_stage,
+ sub_boot_stage);
+ }
+}
+
+static int power_on_cp(struct modem_ctl *mc)
+{
+ struct link_device *ld = get_current_link(mc->iod);
+ struct modem_data __maybe_unused *modem = mc->mdm_data;
+ struct mem_link_device *mld = to_mem_link_device(ld);
+
+ mif_info("%s: +++\n", mc->name);
+
+ clear_boot_stage(mc);
+
+ mif_disable_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_ACTIVE]);
+ mif_disable_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_WAKEUP]);
+
+ drain_workqueue(mc->wakeup_wq);
+
+ print_mc_state(mc);
+
+ if (!cpif_wake_lock_active(mc->ws))
+ cpif_wake_lock(mc->ws);
+
+ if (mc->phone_state != STATE_OFFLINE) {
+ change_modem_state(mc, STATE_RESET);
+ msleep(STATE_RESET_INTERVAL_MS);
+ }
+ change_modem_state(mc, STATE_OFFLINE);
+
+ pcie_clean_dislink(mc);
+
+ mc->pcie_registered = false;
+
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_AP_ACTIVE], 1, 0);
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_DUMP_NOTI], 0, 0);
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_PARTIAL_RST_N], 1, 0);
+
+ /* Clear shared memory */
+ init_ctrl_msg(&mld->ap2cp_msg);
+ init_ctrl_msg(&mld->cp2ap_msg);
+
+ print_mc_state(mc);
+ gpio_power_offon_cp(mc);
+ mif_info("GPIO status after S5100 Power on\n");
+ print_mc_state(mc);
+
+ mif_info("---\n");
+
+ return 0;
+}
+
+static int power_off_cp(struct modem_ctl *mc)
+{
+ mif_info("%s: +++\n", mc->name);
+
+ if (mc->phone_state == STATE_OFFLINE)
+ goto exit;
+
+ change_modem_state(mc, STATE_OFFLINE);
+
+ pcie_clean_dislink(mc);
+
+ gpio_power_off_cp(mc);
+ print_mc_state(mc);
+
+exit:
+ mif_info("---\n");
+
+ return 0;
+}
+
+static int power_shutdown_cp(struct modem_ctl *mc)
+{
+ int i;
+
+ mif_err("%s: +++\n", mc->name);
+
+ if (mc->phone_state == STATE_OFFLINE)
+ goto exit;
+
+ mif_disable_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_ACTIVE]);
+ mif_disable_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_WAKEUP]);
+
+ drain_workqueue(mc->wakeup_wq);
+
+ /* wait for cp_active for 3 seconds */
+ for (i = 0; i < 150; i++) {
+ if (mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_CP2AP_CP_ACTIVE], false) == 1) {
+ mif_err("PHONE_ACTIVE pin is HIGH...\n");
+ break;
+ }
+ msleep(20);
+ }
+
+ if (mc->s5910_dev) {
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_NRESET], 0, 50);
+ s5910_shutdown_sequence(mc->s5910_dev);
+ }
+
+ if (mc->variant == MODEM_SEC_5400)
+ pcie_poweroff(mc->pcie_ch_num);
+
+ gpio_power_off_cp(mc);
+ print_mc_state(mc);
+
+ if (mc->variant != MODEM_SEC_5400)
+ pcie_clean_dislink(mc);
+
+exit:
+ mif_err("---\n");
+ return 0;
+}
+
+static int power_reset_partial_cp(struct modem_ctl *mc)
+{
+ struct s51xx_pcie *s51xx_pcie = NULL;
+#if IS_ENABLED(CONFIG_LINK_DEVICE_WITH_SBD_ARCH)
+ struct link_device *ld = get_current_link(mc->iod);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+
+ if (ld->sbd_ipc && hrtimer_active(&mld->sbd_print_timer))
+ hrtimer_cancel(&mld->sbd_print_timer);
+#endif
+ mif_info("%s: +++\n", mc->name);
+
+ mc->phone_state = STATE_CRASH_EXIT;
+ mif_disable_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_ACTIVE]);
+ mif_disable_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_WAKEUP]);
+ drain_workqueue(mc->wakeup_wq);
+ pcie_clean_dislink(mc);
+
+ if (mc->s51xx_pdev != NULL)
+ s51xx_pcie = pci_get_drvdata(mc->s51xx_pdev);
+
+ if (s51xx_pcie && s51xx_pcie->link_status == 1) {
+ mif_info("link_satus:%d\n", s51xx_pcie->link_status);
+ s51xx_pcie_save_state(mc->s51xx_pdev);
+ pcie_clean_dislink(mc);
+ }
+
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_DUMP_NOTI], 0, 0);
+
+ mif_info("s5100_cp_reset_required:%d\n", mc->s5100_cp_reset_required);
+ if (mc->s5100_cp_reset_required)
+ gpio_power_offon_cp(mc);
+ else
+ gpio_power_preset_cp(mc);
+
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_AP_ACTIVE], 1, 0);
+ print_mc_state(mc);
+
+ mif_info("---\n");
+
+ return 0;
+}
+
+static int power_reset_dump_cp(struct modem_ctl *mc, bool silent)
+{
+ struct s51xx_pcie *s51xx_pcie = NULL;
+ struct link_device *ld = get_current_link(mc->iod);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ u32 otp_version;
+
+ clear_boot_stage(mc);
+
+ if (ld->sbd_ipc && hrtimer_active(&mld->sbd_print_timer))
+ hrtimer_cancel(&mld->sbd_print_timer);
+
+ mc->phone_state = STATE_CRASH_EXIT;
+ mif_disable_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_ACTIVE]);
+ mif_disable_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_WAKEUP]);
+
+ /* Prevent AP from suspending during crashdump */
+ if (!cpif_wake_lock_active(mc->ws)) {
+ mif_info("Acquiring wakelock during modem crash, timeout: %dms!\n",
+ CRASH_WAKELOCK_TIMEOUT_MS);
+ cpif_wake_lock_timeout(mc->ws, msecs_to_jiffies(CRASH_WAKELOCK_TIMEOUT_MS));
+ }
+
+ drain_workqueue(mc->wakeup_wq);
+ pcie_clean_dislink(mc);
+
+ if (mc->s51xx_pdev != NULL)
+ s51xx_pcie = pci_get_drvdata(mc->s51xx_pdev);
+
+ if (s51xx_pcie && s51xx_pcie->link_status == 1) {
+ mif_info("link_satus:%d\n", s51xx_pcie->link_status);
+ s51xx_pcie_save_state(mc->s51xx_pdev);
+ pcie_clean_dislink(mc);
+ }
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_GPIO_WA)
+ if (mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_DUMP_NOTI], 1, 10))
+ mif_gpio_toggle_value(&mc->cp_gpio[CP_GPIO_AP2CP_AP_ACTIVE], 50);
+#else
+ if (silent)
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_DUMP_NOTI], 0, 0);
+ else
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_DUMP_NOTI], 1, 0);
+#endif
+
+ mif_info("s5100_cp_reset_required:%d\n", mc->s5100_cp_reset_required);
+ if (mc->s5100_cp_reset_required)
+ gpio_power_offon_cp(mc);
+ else
+ gpio_power_wreset_cp(mc);
+
+#if IS_ENABLED(CONFIG_CP_PMIC)
+ if (mc->pmic_dev) {
+ otp_version = pmic_get_otp(mc->pmic_dev);
+ if (otp_version < 0)
+ mif_info("PMIC OTP Version read fail\n");
+ else
+ mif_info("PMIC OTP Version %#x\n", otp_version);
+
+ /* Execute PMIC wreset sequence after toggling CP_PMIC_WRST */
+ pmic_warm_reset_sequence(mc->pmic_dev);
+ }
+#endif
+
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_AP_ACTIVE], 1, 0);
+ print_mc_state(mc);
+
+ if (cpif_wake_lock_active(mc->ws)) {
+ mif_info("Release wakelock after modem crash\n");
+ cpif_wake_unlock(mc->ws);
+ }
+
+ return 0;
+}
+
+static int power_reset_warm_cp(struct modem_ctl *mc)
+{
+ struct s51xx_pcie *s51xx_pcie = NULL;
+ u32 otp_version;
+#if IS_ENABLED(CONFIG_LINK_DEVICE_WITH_SBD_ARCH)
+ struct link_device *ld = get_current_link(mc->iod);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+
+ if (ld->sbd_ipc && hrtimer_active(&mld->sbd_print_timer))
+ hrtimer_cancel(&mld->sbd_print_timer);
+#endif
+ mif_info("%s: +++\n", mc->name);
+
+ mc->phone_state = STATE_CRASH_EXIT;
+ mif_disable_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_ACTIVE]);
+ mif_disable_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_WAKEUP]);
+
+ /* Prevent AP from suspending during crashdump */
+ if (!cpif_wake_lock_active(mc->ws)) {
+ mif_info("Acquiring wakelock during modem power reset, timeout: %dms!\n",
+ CRASH_WAKELOCK_TIMEOUT_MS);
+ cpif_wake_lock_timeout(mc->ws, msecs_to_jiffies(CRASH_WAKELOCK_TIMEOUT_MS));
+ }
+
+ drain_workqueue(mc->wakeup_wq);
+
+ if (mc->phone_state != STATE_OFFLINE) {
+ change_modem_state(mc, STATE_RESET);
+ msleep(STATE_RESET_INTERVAL_MS);
+ }
+ change_modem_state(mc, STATE_OFFLINE);
+
+ pcie_clean_dislink(mc);
+
+ if (mc->s51xx_pdev != NULL)
+ s51xx_pcie = pci_get_drvdata(mc->s51xx_pdev);
+
+ if (s51xx_pcie && s51xx_pcie->link_status == 1) {
+ mif_info("link_satus:%d\n", s51xx_pcie->link_status);
+ s51xx_pcie_save_state(mc->s51xx_pdev);
+ pcie_clean_dislink(mc);
+ }
+
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_DUMP_NOTI], 0, 0);
+
+ mif_info("s5100_cp_reset_required:%d\n", mc->s5100_cp_reset_required);
+ if (mc->s5100_cp_reset_required)
+ gpio_power_offon_cp(mc);
+ else
+ gpio_power_wreset_cp(mc);
+
+ otp_version = pmic_get_otp(mc->pmic_dev);
+ if (otp_version < 0)
+ mif_info("PMIC OTP Version read fail\n");
+ else
+ mif_info("PMIC OTP Version %#x\n", otp_version);
+
+#if IS_ENABLED(CONFIG_CP_PMIC)
+ /* Execute PMIC warm reset sequence after toggling CP_PMIC_WRST. */
+ if (mc->pmic_dev)
+ pmic_warm_reset_sequence(mc->pmic_dev);
+#endif
+
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_AP_ACTIVE], 1, 0);
+ print_mc_state(mc);
+
+ if (cpif_wake_lock_active(mc->ws)) {
+ mif_info("Release wakelock after modem power reset!\n");
+ cpif_wake_unlock(mc->ws);
+ }
+
+ mif_info("---\n");
+ return 0;
+}
+
+static int power_reset_cp(struct modem_ctl *mc)
+{
+ struct s51xx_pcie *s51xx_pcie = NULL;
+ struct link_device *ld = get_current_link(mc->iod);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+
+ mif_info("%s: +++\n", mc->name);
+
+ if (ld->sbd_ipc && hrtimer_active(&mld->sbd_print_timer))
+ hrtimer_cancel(&mld->sbd_print_timer);
+
+ mc->phone_state = STATE_OFFLINE;
+ pcie_clean_dislink(mc);
+
+ if (mc->s51xx_pdev != NULL)
+ s51xx_pcie = pci_get_drvdata(mc->s51xx_pdev);
+
+ if (s51xx_pcie && s51xx_pcie->link_status == 1) {
+ /* save_s5100_status(); */
+ mif_info("link_satus:%d\n", s51xx_pcie->link_status);
+ pcie_clean_dislink(mc);
+ }
+
+ gpio_power_offon_cp(mc);
+ print_mc_state(mc);
+
+ mif_info("---\n");
+
+ return 0;
+}
+
+static int silent_reset_cp(struct modem_ctl *mc)
+{
+ struct link_device *ld = get_current_link(mc->iod);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ int ret = 0;
+
+ mif_info("%s: +++\n", mc->name);
+
+ if (!cpif_wake_lock_active(mc->ws))
+ cpif_wake_lock(mc->ws);
+
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_DUMP_NOTI], 0, 0);
+
+ /* Clear shared memory */
+ init_ctrl_msg(&mld->ap2cp_msg);
+ init_ctrl_msg(&mld->cp2ap_msg);
+
+ if (init_control_messages(mc))
+ mif_err("Failed to initialize control messages\n");
+
+ /* 2cp dump WA */
+ if (timer_pending(&mld->crash_ack_timer))
+ del_timer(&mld->crash_ack_timer);
+ atomic_set(&mld->forced_cp_crash, 0);
+
+ mif_info("Set link mode to LINK_MODE_BOOT.\n");
+
+ if (ld->link_prepare_normal_boot)
+ ld->link_prepare_normal_boot(ld, mc->bootd);
+
+ change_modem_state(mc, STATE_BOOTING);
+ mc->phone_state = STATE_BOOTING;
+
+ if (ld->link_start_normal_boot) {
+ mif_info("link_start_normal_boot\n");
+ ld->link_start_normal_boot(ld, mc->iod);
+ }
+
+ ret = modem_ctrl_check_offset_data(mc);
+ if (ret) {
+ mif_err("modem_ctrl_check_offset_data() error:%d\n", ret);
+ return ret;
+ }
+
+ mif_info("---\n");
+
+ return 0;
+}
+
+static int check_cp_status(struct modem_ctl *mc, unsigned int count, bool check_msi)
+{
+#define STATUS_NAME(msi) (msi ? "boot_stage" : "CP2AP_WAKEUP")
+
+ struct link_device *ld = get_current_link(mc->bootd);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ bool check_done = false;
+ int cnt = 0;
+ int val;
+
+ do {
+ if (check_msi) {
+ val = (int)ioread32(mld->msi_reg_base +
+ offsetof(struct msi_reg_type, boot_stage));
+ if (((mc->variant == MODEM_SEC_5400) && (val == BOOT_STAGE_5400_DONE_MASK)) ||
+ ((mc->variant == MODEM_SEC_5300) && (val == BOOT_STAGE_5300_DONE_MASK))) {
+ check_done = true;
+ break;
+ }
+ } else {
+ val = mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_CP2AP_WAKEUP], false);
+ if (val == 1) {
+ check_done = true;
+ break;
+ }
+ }
+
+ mif_info_limited("%s == 0x%X (cnt %d)\n", STATUS_NAME(check_msi), val, cnt);
+ msleep(20);
+ } while (++cnt < count);
+
+ if (!check_done) {
+ mif_err("ERR! %s == 0x%X (cnt %d)\n", STATUS_NAME(check_msi), val, cnt);
+ return -EFAULT;
+ }
+
+ mif_info("%s == 0x%X (cnt %d)\n", STATUS_NAME(check_msi), val, cnt);
+ if (cnt == 0)
+ msleep(20);
+
+ return 0;
+}
+
+static int check_boot_status(struct modem_ctl *mc, unsigned int count, bool check_bl1)
+{
+ struct link_device *ld = get_current_link(mc->bootd);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ bool check_done = false;
+ int cnt = 0;
+ u32 val, otp_ver, sub_boot_stage;
+
+ do {
+ /* ensure that CP updates the value */
+ msleep(20);
+
+ val = ioread32(mld->msi_reg_base +
+ offsetof(struct msi_reg_type, boot_stage));
+ otp_ver = ioread32(mld->msi_reg_base +
+ offsetof(struct msi_reg_type, otp_version));
+ sub_boot_stage = ioread32(mld->msi_reg_base +
+ offsetof(struct msi_reg_type, sub_boot_stage));
+ mif_info_limited("boot_stage: %X sub_boot_stage: %X otp_version: %X (cnt %d)\n",
+ val, sub_boot_stage, otp_ver, cnt);
+ if (val == (check_bl1 ? BOOT_STAGE_BL1_DOWNLOAD_DONE_MASK
+ : BOOT_STAGE_5400_DONE_MASK)) {
+ check_done = true;
+ break;
+ }
+ } while (++cnt < count);
+
+ if (!check_done) {
+ mif_err("ERR! boot_stage == 0x%X (cnt %d)\n", val, cnt);
+ goto status_error;
+ }
+
+ mif_info("boot_stage == 0x%X (cnt %d)\n", val, cnt);
+ if (cnt == 0)
+ msleep(10);
+
+ return 0;
+
+status_error:
+ print_msi_space(mc);
+ if (mc->pcie_powered_on && pcie_check_link_status(mc->pcie_ch_num)) {
+ print_ep_config_space(mc);
+ print_doorbell_region(mc);
+ }
+ return -EFAULT;
+}
+
+static int set_cp_rom_boot_img(struct mem_link_device *mld)
+{
+ struct link_device *ld = &mld->link_dev;
+ struct modem_ctl *mc = ld->mc;
+ struct modem_data *modem = mc->mdm_data;
+ unsigned long boot_img_addr;
+
+ if (!(mld->attrs & LINK_ATTR_XMIT_BTDLR_PCIE)) {
+ mif_err("Invalid attr:0x%lx\n", mld->attrs);
+ return -EPERM;
+ }
+
+ if (!mld->msi_reg_base) {
+ mif_err("MSI region is not assigned yet\n");
+ return -EINVAL;
+ }
+
+ boot_img_addr = cp_shmem_get_base(modem->cp_num, SHMEM_IPC) + mld->boot_img_offset;
+
+ iowrite32(PADDR_LO(boot_img_addr),
+ mld->msi_reg_base + offsetof(struct msi_reg_type, img_addr_lo));
+ iowrite32(PADDR_HI(boot_img_addr),
+ mld->msi_reg_base + offsetof(struct msi_reg_type, img_addr_hi));
+ iowrite32(mld->boot_img_size,
+ mld->msi_reg_base + offsetof(struct msi_reg_type, img_size));
+
+ mif_info("boot_img addr:0x%lX size:0x%X\n", boot_img_addr, mld->boot_img_size);
+
+ s51xx_pcie_send_doorbell_int(mc->s51xx_pdev, mld->intval_ap2cp_msg);
+
+ return 0;
+}
+
+static void debug_cp_rom_boot_img(struct mem_link_device *mld)
+{
+ unsigned char str[64 * 3];
+ u8 __iomem *img_base;
+ u32 img_size;
+
+ if (!(mld->attrs & LINK_ATTR_XMIT_BTDLR_PCIE)) {
+ mif_err("Invalid attr:0x%lx\n", mld->attrs);
+ return;
+ }
+
+ img_base = mld->base + mld->boot_img_offset;
+ img_size = ioread32(mld->msi_reg_base + offsetof(struct msi_reg_type, img_size));
+
+ mif_err("boot_stage:0x%X err_report:0x%X img_lo:0x%X img_hi:0x%X img_size:0x%X\n",
+ ioread32(mld->msi_reg_base + offsetof(struct msi_reg_type, boot_stage)),
+ ioread32(mld->msi_reg_base + offsetof(struct msi_reg_type, err_report)),
+ ioread32(mld->msi_reg_base + offsetof(struct msi_reg_type, img_addr_lo)),
+ ioread32(mld->msi_reg_base + offsetof(struct msi_reg_type, img_addr_hi)),
+ img_size);
+
+ if (img_size > 64)
+ img_size = 64;
+
+ dump2hex(str, (img_size ? img_size * 3 : 1), img_base, img_size);
+ mif_err("img_content:%s\n", str);
+}
+
+static int start_normal_boot(struct modem_ctl *mc)
+{
+ struct link_device *ld = get_current_link(mc->bootd);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ int ret = 0;
+
+ mif_info("+++\n");
+
+ if (init_control_messages(mc))
+ mif_err("Failed to initialize control messages\n");
+
+ /* 2cp dump WA */
+ if (timer_pending(&mld->crash_ack_timer))
+ del_timer(&mld->crash_ack_timer);
+ atomic_set(&mld->forced_cp_crash, 0);
+
+ mif_info("Set link mode to LINK_MODE_BOOT.\n");
+
+ if (ld->link_prepare_normal_boot)
+ ld->link_prepare_normal_boot(ld, mc->bootd);
+
+ change_modem_state(mc, STATE_BOOTING);
+
+ mif_info("Disable phone actvie interrupt.\n");
+ mif_disable_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_ACTIVE]);
+
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_AP_ACTIVE], 1, 0);
+ mc->phone_state = STATE_BOOTING;
+
+ if (ld->link_start_normal_boot) {
+ mif_info("link_start_normal_boot\n");
+ ld->link_start_normal_boot(ld, mc->iod);
+ }
+
+ ret = modem_ctrl_check_offset_data(mc);
+ if (ret) {
+ mif_err("modem_ctrl_check_offset_data() error:%d\n", ret);
+ return ret;
+ }
+
+ if (mld->attrs & LINK_ATTR_XMIT_BTDLR_PCIE) {
+ register_pcie(ld);
+ if (mc->s51xx_pdev && mc->pcie_registered)
+ set_cp_rom_boot_img(mld);
+
+ ret = check_cp_status(mc, 200, true);
+ if (ret < 0)
+ goto status_error;
+
+ s5100_poweroff_pcie(mc, false);
+
+ ret = check_cp_status(mc, 200, false);
+ if (ret < 0)
+ goto status_error;
+
+ s5100_poweron_pcie(mc, LINK_MODE_MAX_SPEED_BOOTING);
+ } else {
+ ret = check_cp_status(mc, 200, false);
+ if (ret < 0)
+ goto status_error;
+
+ register_pcie(ld);
+ }
+
+status_error:
+ if (ret < 0) {
+ mif_err("ERR! check_cp_status fail (err %d)\n", ret);
+ if (mld->attrs & LINK_ATTR_XMIT_BTDLR_PCIE)
+ debug_cp_rom_boot_img(mld);
+ if (cpif_wake_lock_active(mc->ws))
+ cpif_wake_unlock(mc->ws);
+
+ return ret;
+ }
+
+ mif_info("---\n");
+ return 0;
+}
+
+static int complete_normal_boot(struct modem_ctl *mc)
+{
+ int err = 0;
+ unsigned long remain;
+#if IS_ENABLED(CONFIG_CP_LCD_NOTIFIER)
+ int __maybe_unused ret;
+ struct modem_data __maybe_unused *modem = mc->mdm_data;
+ struct mem_link_device __maybe_unused *mld = modem->mld;
+#endif
+
+ mif_info("+++\n");
+
+ reinit_completion(&mc->init_cmpl);
+ remain = wait_for_completion_timeout(&mc->init_cmpl, MIF_INIT_TIMEOUT);
+ if (remain == 0) {
+ mif_err("T-I-M-E-O-U-T\n");
+ err = -EAGAIN;
+ goto exit;
+ }
+
+ /* Enable L1.2 after CP boot */
+ s51xx_pcie_l1ss_ctrl(1, mc->pcie_ch_num);
+
+ /* Read cp_active before enabling irq */
+ mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_CP2AP_CP_ACTIVE], true);
+
+ err = register_phone_active_interrupt(mc);
+ if (err)
+ mif_err("Err: register_phone_active_interrupt:%d\n", err);
+ mif_enable_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_ACTIVE]);
+
+ err = register_cp2ap_wakeup_interrupt(mc);
+ if (err)
+ mif_err("Err: register_cp2ap_wakeup_interrupt:%d\n", err);
+ mif_enable_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_WAKEUP]);
+
+ if (mc->mdm_data->mif_off_during_volte) {
+ register_cp_wrst_interrupt(mc);
+ mif_disable_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_WRST_N]);
+ }
+
+ print_mc_state(mc);
+
+ mc->device_reboot = false;
+
+ change_modem_state(mc, STATE_ONLINE);
+
+ print_mc_state(mc);
+
+#if IS_ENABLED(CONFIG_CP_LCD_NOTIFIER)
+ if (mc->lcd_notifier.notifier_call == NULL) {
+ mif_info("Register lcd notifier\n");
+ mc->lcd_notifier.notifier_call = s5100_lcd_notifier;
+ ret = register_lcd_status_notifier(&mc->lcd_notifier);
+ if (ret) {
+ mif_err("failed to register LCD notifier\n");
+ return ret;
+ }
+ }
+#endif /* CONFIG_CP_LCD_NOTIFIER */
+
+ mif_info("---\n");
+
+exit:
+ return err;
+}
+
+static int start_normal_boot_bl1(struct modem_ctl *mc)
+{
+ struct link_device *ld = get_current_link(mc->bootd);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ int ret = 0;
+
+ mif_info("+++\n");
+
+ if (init_control_messages(mc))
+ mif_err("Failed to initialize control messages\n");
+
+ /* 2cp dump WA */
+ if (timer_pending(&mld->crash_ack_timer))
+ del_timer(&mld->crash_ack_timer);
+ atomic_set(&mld->forced_cp_crash, 0);
+
+ mif_info("Set link mode to LINK_MODE_BOOT.\n");
+
+ if (ld->link_prepare_normal_boot)
+ ld->link_prepare_normal_boot(ld, mc->bootd);
+
+ change_modem_state(mc, STATE_BOOTING);
+
+ mif_info("Disable phone actvie interrupt.\n");
+ mif_disable_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_ACTIVE]);
+
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_AP_ACTIVE], 1, 0);
+ mc->phone_state = STATE_BOOTING;
+
+ if (ld->link_start_normal_boot) {
+ mif_info("link_start_normal_boot\n");
+ ld->link_start_normal_boot(ld, mc->iod);
+ }
+
+ ret = modem_ctrl_check_offset_data(mc);
+ if (ret) {
+ mif_err("modem_ctrl_check_offset_data() error:%d\n", ret);
+ return ret;
+ }
+
+ if (mld->attrs & LINK_ATTR_XMIT_BTDLR_PCIE) {
+ register_pcie(ld);
+ if (mc->s51xx_pdev && mc->pcie_registered)
+ set_cp_rom_boot_img(mld);
+
+ ret = check_boot_status(mc, 200, true);
+ if (ret < 0)
+ goto status_error;
+ }
+
+status_error:
+ if (ret < 0) {
+ mif_err("ERR! check_cp_status fail (err %d)\n", ret);
+ if (mld->attrs & LINK_ATTR_XMIT_BTDLR_PCIE)
+ debug_cp_rom_boot_img(mld);
+ if (cpif_wake_lock_active(mc->ws))
+ cpif_wake_unlock(mc->ws);
+
+ return ret;
+ }
+
+ mif_info("---\n");
+ return 0;
+}
+
+static int start_normal_boot_bootloader(struct modem_ctl *mc)
+{
+ struct link_device *ld = get_current_link(mc->bootd);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ int ret = 0;
+ int val;
+
+ mif_info("+++\n");
+
+ if (mld->attrs & LINK_ATTR_XMIT_BTDLR_PCIE) {
+ if (mc->s51xx_pdev && mc->pcie_registered)
+ set_cp_rom_boot_img(mld);
+
+ ret = check_boot_status(mc, 200, false);
+ if (ret < 0)
+ goto status_error;
+
+ iowrite32(0, mld->msi_reg_base + offsetof(struct msi_reg_type, boot_stage));
+ val = (int)ioread32(mld->msi_reg_base + offsetof(struct msi_reg_type, boot_stage));
+ mif_info("Clear boot_stage == 0x%X\n", val);
+
+ s5100_poweroff_pcie(mc, false);
+
+ ret = check_cp_status(mc, 200, false);
+ if (ret < 0)
+ goto status_error;
+
+ s5100_poweron_pcie(mc, LINK_MODE_MAX_SPEED_BOOTING);
+ }
+
+status_error:
+ if (ret < 0) {
+ mif_err("ERR! check_cp_status fail (err %d)\n", ret);
+ if (mld->attrs & LINK_ATTR_XMIT_BTDLR_PCIE)
+ debug_cp_rom_boot_img(mld);
+ if (cpif_wake_lock_active(mc->ws))
+ cpif_wake_unlock(mc->ws);
+
+ return ret;
+ }
+
+ mif_info("---\n");
+ return 0;
+}
+
+static int trigger_cp_crash_internal(struct modem_ctl *mc)
+{
+ struct link_device *ld = get_current_link(mc->bootd);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ u32 crash_type;
+ char reason[CP_CRASH_INFO_SIZE] = "Forced crash call";
+
+ if (ld->crash_reason.type == CRASH_REASON_NONE)
+ ld->crash_reason.type = CRASH_REASON_MIF_FORCED;
+ crash_type = ld->crash_reason.type;
+
+ if (strlen(ld->crash_reason.string) > 0) {
+ scnprintf(reason, CP_CRASH_INFO_SIZE, "Forced crash call by %s",
+ ld->crash_reason.string);
+ }
+
+
+ mif_err("+++\n");
+
+ if (mc->device_reboot) {
+ mif_err("skip cp crash : device is rebooting..!!!\n");
+ goto exit;
+ }
+
+ print_mc_state(mc);
+ pcie_print_rc_msi_register(mc->pcie_ch_num);
+
+ if (mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_CP2AP_CP_ACTIVE], true) == 1) {
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_GPIO_WA)
+ if (atomic_inc_return(&mc->dump_toggle_issued) > 1) {
+ atomic_dec(&mc->dump_toggle_issued);
+ goto exit;
+ }
+
+ if (mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_DUMP_NOTI], 1, 10))
+ mif_gpio_toggle_value(&mc->cp_gpio[CP_GPIO_AP2CP_AP_ACTIVE], 50);
+
+ atomic_dec(&mc->dump_toggle_issued);
+#else
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_DUMP_NOTI], 1, 0);
+#endif
+ } else {
+ mif_err("do not need to set dump_noti\n");
+ }
+
+ ld->link_trigger_cp_crash(mld, crash_type, reason);
+
+exit:
+ mif_err("---\n");
+ return 0;
+}
+
+static void trigger_cp_crash_work(struct work_struct *ws)
+{
+ struct modem_ctl *mc = container_of(ws, struct modem_ctl, crash_work);
+
+ trigger_cp_crash_internal(mc);
+}
+
+static int trigger_cp_crash(struct modem_ctl *mc)
+{
+ queue_work(mc->crash_wq, &mc->crash_work);
+ return 0;
+}
+
+int s5100_force_crash_exit_ext(enum crash_type type)
+{
+ struct link_device *ld = get_current_link(g_mc->bootd);
+
+ ld->crash_reason.type = type;
+
+ if (g_mc)
+ g_mc->ops.trigger_cp_crash(g_mc);
+
+ return 0;
+}
+
+static int s5100_force_crash_exit_ext_reason(const char *buf)
+{
+ struct link_device *ld = get_current_link(g_mc->bootd);
+
+ if (strlen(buf) > 0)
+ strncpy(ld->crash_reason.string, buf, CP_CRASH_INFO_SIZE);
+
+ return s5100_force_crash_exit_ext(CRASH_REASON_MIF_FORCED);
+}
+
+static int s5100_force_crash_notifier(struct notifier_block *nb,
+ unsigned long action, void *nb_data)
+{
+ const char *buf = nb_data;
+
+ return s5100_force_crash_exit_ext_reason(buf);
+}
+
+int s5100_send_panic_noti_ext(void)
+{
+ struct modem_data *modem;
+
+ if (g_mc) {
+ modem = g_mc->mdm_data;
+ if (modem->mld) {
+ mif_err("Send CMD_KERNEL_PANIC message to CP\n");
+ send_ipc_irq(modem->mld, cmd2int(CMD_KERNEL_PANIC));
+ }
+ }
+
+ return 0;
+}
+
+static int start_dump_boot(struct modem_ctl *mc)
+{
+ struct link_device *ld = get_current_link(mc->bootd);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ int err = 0;
+
+ mif_err("+++\n");
+
+ /* Change phone state to CRASH_EXIT */
+ mc->phone_state = STATE_CRASH_EXIT;
+
+ /* Prevent AP from suspending during crashdump */
+ if (!cpif_wake_lock_active(mc->ws)) {
+ mif_info("Acquiring wakelock during modem crash, timeout: %dms!\n",
+ CRASH_WAKELOCK_TIMEOUT_MS);
+ cpif_wake_lock_timeout(mc->ws, msecs_to_jiffies(CRASH_WAKELOCK_TIMEOUT_MS));
+ }
+
+ if (!ld->link_start_dump_boot) {
+ mif_err("%s: link_start_dump_boot is null\n", ld->name);
+ err = -EFAULT;
+ goto error;
+ }
+
+ err = ld->link_start_dump_boot(ld, mc->bootd);
+ if (err)
+ goto error;
+
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_AP_ACTIVE], 1, 0);
+ /* do not handle cp2ap_wakeup irq during dump process */
+ mif_disable_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_WAKEUP]);
+
+ if (mld->attrs & LINK_ATTR_XMIT_BTDLR_PCIE) {
+ register_pcie(ld);
+ if (mc->s51xx_pdev && mc->pcie_registered)
+ set_cp_rom_boot_img(mld);
+
+ err = check_cp_status(mc, 200, true);
+ if (err < 0)
+ goto status_error;
+
+ s5100_poweroff_pcie(mc, false);
+
+ err = check_cp_status(mc, 200, false);
+ if (err < 0)
+ goto status_error;
+
+ s5100_poweron_pcie(mc, LINK_MODE_MAX_SPEED_BOOTING);
+ } else {
+ err = check_cp_status(mc, 200, false);
+ if (err < 0)
+ goto status_error;
+
+ register_pcie(ld);
+ }
+
+status_error:
+ if (err < 0) {
+ mif_err("ERR! check_cp_status fail (err %d)\n", err);
+ if (mld->attrs & LINK_ATTR_XMIT_BTDLR_PCIE)
+ debug_cp_rom_boot_img(mld);
+ return err;
+ }
+
+ mif_err("---\n");
+error:
+ if (cpif_wake_lock_active(mc->ws)) {
+ mif_info("Release wakelock after modem crash!\n");
+ cpif_wake_unlock(mc->ws);
+ }
+
+ return err;
+}
+
+static int start_dump_boot_bl1(struct modem_ctl *mc)
+{
+ struct link_device *ld = get_current_link(mc->bootd);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ int ret = 0;
+
+ mif_err("+++\n");
+
+ /* Change phone state to CRASH_EXIT */
+ mc->phone_state = STATE_CRASH_EXIT;
+
+ /* Prevent AP from suspending during crashdump */
+ if (!cpif_wake_lock_active(mc->ws)) {
+ mif_info("Acquiring wakelock during modem crash, timeout: %dms!\n",
+ CRASH_WAKELOCK_TIMEOUT_MS);
+ cpif_wake_lock_timeout(mc->ws, msecs_to_jiffies(CRASH_WAKELOCK_TIMEOUT_MS));
+ }
+
+ if (!ld->link_start_dump_boot) {
+ mif_err("%s: link_start_dump_boot is null\n", ld->name);
+ ret = -EFAULT;
+ goto error;
+ }
+
+ ret = ld->link_start_dump_boot(ld, mc->bootd);
+ if (ret)
+ goto error;
+
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_AP_ACTIVE], 1, 0);
+ /* do not handle cp2ap_wakeup irq during dump process */
+ mif_disable_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_WAKEUP]);
+
+ if (mld->attrs & LINK_ATTR_XMIT_BTDLR_PCIE) {
+ register_pcie(ld);
+ if (mc->s51xx_pdev && mc->pcie_registered)
+ set_cp_rom_boot_img(mld);
+
+ ret = check_boot_status(mc, 200, true);
+ if (ret < 0)
+ goto status_error;
+ }
+
+status_error:
+ if (ret < 0) {
+ mif_err("ERR! check_cp_status fail (err %d)\n", ret);
+ if (mld->attrs & LINK_ATTR_XMIT_BTDLR_PCIE)
+ debug_cp_rom_boot_img(mld);
+ return ret;
+ }
+error:
+ if (cpif_wake_lock_active(mc->ws)) {
+ mif_info("Release wakelock after modem crash!\n");
+ cpif_wake_unlock(mc->ws);
+ }
+
+ mif_err("---\n");
+ return ret;
+}
+
+static int start_dump_boot_bootloader(struct modem_ctl *mc)
+{
+ struct link_device *ld = get_current_link(mc->bootd);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ int ret = 0;
+ int val;
+
+ mif_err("+++\n");
+
+ if (mld->attrs & LINK_ATTR_XMIT_BTDLR_PCIE) {
+ if (mc->s51xx_pdev && mc->pcie_registered)
+ set_cp_rom_boot_img(mld);
+
+ ret = check_boot_status(mc, 200, false);
+ if (ret < 0)
+ goto status_error;
+
+ iowrite32(0, mld->msi_reg_base + offsetof(struct msi_reg_type, boot_stage));
+ val = (int)ioread32(mld->msi_reg_base + offsetof(struct msi_reg_type, boot_stage));
+ mif_info("Clear boot_stage == 0x%X\n", val);
+
+ s5100_poweroff_pcie(mc, false);
+
+ ret = check_cp_status(mc, 200, false);
+ if (ret < 0)
+ goto status_error;
+
+ s5100_poweron_pcie(mc, LINK_MODE_MAX_SPEED_BOOTING);
+ }
+
+status_error:
+ if (ret < 0) {
+ mif_err("ERR! check_cp_status fail (err %d)\n", ret);
+ if (mld->attrs & LINK_ATTR_XMIT_BTDLR_PCIE)
+ debug_cp_rom_boot_img(mld);
+ return ret;
+ }
+
+ mif_err("---\n");
+ return ret;
+}
+
+
+static int start_dump_boot_partial(struct modem_ctl *mc)
+{
+ struct link_device *ld = get_current_link(mc->bootd);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ int err = 0;
+
+ mif_err("+++\n");
+
+ if (ld->link_prepare_normal_boot)
+ ld->link_prepare_normal_boot(ld, mc->bootd);
+
+ change_modem_state(mc, STATE_BOOTING);
+ mif_info("Disable phone actvie interrupt.\n");
+ mif_disable_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_ACTIVE]);
+
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_AP_ACTIVE], 1, 0);
+
+ mc->phone_state = STATE_BOOTING;
+
+ /* Prevent AP from suspending during crashdump */
+ if (!cpif_wake_lock_active(mc->ws)) {
+ mif_info("Acquiring wakelock during partial reset, timeout: %dms!\n",
+ CRASH_WAKELOCK_TIMEOUT_MS);
+ cpif_wake_lock_timeout(mc->ws, msecs_to_jiffies(CRASH_WAKELOCK_TIMEOUT_MS));
+ }
+
+ if (!ld->link_start_partial_boot) {
+ mif_err("%s: link_start_partial_boot is null\n", ld->name);
+ err = -EFAULT;
+ goto error;
+ }
+
+ err = ld->link_start_partial_boot(ld, mc->bootd);
+ if (err)
+ goto error;
+
+ if (mld->attrs & LINK_ATTR_XMIT_BTDLR_PCIE) {
+ err = check_cp_status(mc, 200, false);
+ if (err < 0)
+ goto status_error;
+
+ s5100_poweron_pcie(mc, LINK_MODE_MAX_SPEED_BOOTING);
+ } else {
+ mif_err("ERR! LINK_ATTR_XMIT_BTDLR_PCIE is not set\n");
+ err = -EFAULT;
+ goto error;
+ }
+
+status_error:
+ if (err < 0) {
+ mif_err("ERR! check_cp_status fail (err %d)\n", err);
+ if (mld->attrs & LINK_ATTR_XMIT_BTDLR_PCIE)
+ debug_cp_rom_boot_img(mld);
+ }
+
+error:
+ if (cpif_wake_lock_active(mc->ws)) {
+ mif_info("Release wakelock after partial reset!\n");
+ cpif_wake_unlock(mc->ws);
+ }
+
+ mif_err("---\n");
+ return err;
+}
+
+static int s5100_poweroff_pcie(struct modem_ctl *mc, bool force_off)
+{
+ struct link_device *ld = get_current_link(mc->iod);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ bool force_crash = false;
+ bool in_pcie_recovery = false;
+ unsigned long flags;
+
+ mutex_lock(&mc->pcie_onoff_lock);
+ mutex_lock(&mc->pcie_check_lock);
+ mif_debug("+++\n");
+
+ if (!mc->pcie_powered_on &&
+ (s51xx_check_pcie_link_status(mc->pcie_ch_num) == 0)) {
+ mif_info("Skip pci power off: already powered off\n");
+ goto exit;
+ }
+
+ /* If power_off is called when PCIe is not active,
+ * setting force_off to true*/
+ if (pcie_get_sudden_linkdown_state(mc->pcie_ch_num) || pcie_get_cpl_timeout_state(mc->pcie_ch_num)) {
+ force_off = true;
+ }
+ /* CP reads Tx RP (or tail) after CP2AP_WAKEUP = 1.
+ * skip pci power off if CP2AP_WAKEUP = 1 or Tx pending.
+ */
+ if (!force_off) {
+ spin_lock_irqsave(&mc->pcie_tx_lock, flags);
+ /* wait Tx done if it is running */
+ spin_unlock_irqrestore(&mc->pcie_tx_lock, flags);
+ msleep(30);
+ if (check_mem_link_tx_pending(mld) ||
+ mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_CP2AP_WAKEUP], true) == 1) {
+ mif_info("Skip pci power off: condition not met\n");
+ goto exit;
+ }
+ }
+
+ set_pcie_msi_int(ld, false);
+
+ if (mc->device_reboot) {
+ mif_info("Skip pci power off: device is rebooting\n");
+ goto exit;
+ }
+
+ /* recovery status is not valid after PCI link down requests from CP */
+ if (mc->pcie_linkdown_retry_cnt > 0) {
+ mif_info("clear linkdown_retry_cnt(%d)..!!!\n", mc->pcie_linkdown_retry_cnt);
+ pcie_linkdown_history[pcie_linkdown_count++ \
+ % MAX_PCIE_EVENT_HISTORY] = mc->pcie_linkdown_retry_cnt;
+ mc->pcie_linkdown_retry_cnt = 0;
+ }
+
+ if (mc->pcie_cto_retry_cnt > 0) {
+ mif_info("clear cto_retry_cnt(%d)..!!!\n", mc->pcie_cto_retry_cnt);
+ pcie_cto_history[pcie_cto_count++ % MAX_PCIE_EVENT_HISTORY] \
+ = mc->pcie_cto_retry_cnt;
+ mc->pcie_cto_retry_cnt = 0;
+ }
+
+ if (pcie_get_sudden_linkdown_state(mc->pcie_ch_num)) {
+ pcie_set_sudden_linkdown_state(mc->pcie_ch_num, false);
+ in_pcie_recovery = true;
+ }
+
+ if (pcie_get_cpl_timeout_state(mc->pcie_ch_num)) {
+ pcie_set_cpl_timeout_state(mc->pcie_ch_num, false);
+ in_pcie_recovery = true;
+ }
+
+ mc->pcie_powered_on = false;
+
+ if (mc->s51xx_pdev != NULL && (mc->phone_state == STATE_ONLINE ||
+ mc->phone_state == STATE_BOOTING)) {
+ mif_debug("save s5100_status - phone_state:%d\n",
+ mc->phone_state);
+ s51xx_pcie_save_state(mc->s51xx_pdev);
+ } else
+ mif_debug("ignore save_s5100_status - phone_state:%d\n",
+ mc->phone_state);
+
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_WAKEUP], 0, 5);
+ print_mc_state(mc);
+
+ pcie_poweroff(mc->pcie_ch_num);
+
+ if (cpif_wake_lock_active(mc->ws))
+ cpif_wake_unlock(mc->ws);
+
+exit:
+ mif_debug("---\n");
+ mutex_unlock(&mc->pcie_check_lock);
+ mutex_unlock(&mc->pcie_onoff_lock);
+
+ spin_lock_irqsave(&mc->pcie_tx_lock, flags);
+ if (in_pcie_recovery && !mc->reserve_doorbell_int && check_mem_link_tx_pending(mld))
+ mc->reserve_doorbell_int = true;
+
+ if ((mc->s51xx_pdev != NULL) && !mc->device_reboot && mc->reserve_doorbell_int) {
+ mif_debug("DBG: doorbell_reserved = %d\n", mc->reserve_doorbell_int);
+ if (mc->pcie_powered_on) {
+ mc->reserve_doorbell_int = false;
+ if (s51xx_pcie_send_doorbell_int(mc->s51xx_pdev,
+ mld->intval_ap2cp_msg) != 0)
+ force_crash = true;
+ } else
+ s5100_try_gpio_cp_wakeup(mc);
+ }
+ spin_unlock_irqrestore(&mc->pcie_tx_lock, flags);
+
+ if (unlikely(force_crash))
+ s5100_force_crash_exit_ext(CRASH_REASON_PCIE_DOORBELL_FAILURE_POWEROFF);
+
+ return 0;
+}
+
+int s5100_poweron_pcie(struct modem_ctl *mc, enum link_mode mode)
+{
+ struct link_device *ld;
+ struct mem_link_device *mld;
+ bool force_crash = false;
+ unsigned long flags;
+ bool boot_on = (mode == LINK_MODE_MIN_SPEED_BOOTING);
+ int speed, width;
+
+ if (mc == NULL) {
+ mif_err("Skip pci power on: mc is NULL\n");
+ return 0;
+ }
+
+ ld = get_current_link(mc->iod);
+ mld = to_mem_link_device(ld);
+
+ if (mc->phone_state == STATE_OFFLINE) {
+ mif_info("Skip pci power on: phone_state is OFFLINE\n");
+ return 0;
+ }
+
+ mutex_lock(&mc->pcie_onoff_lock);
+ mutex_lock(&mc->pcie_check_lock);
+ mif_debug("+++ mode: %d\n", mode);
+ if (mc->pcie_powered_on &&
+ (s51xx_check_pcie_link_status(mc->pcie_ch_num) != 0)) {
+ mif_info("Skip pci power on: already powered on\n");
+ goto exit;
+ }
+
+ if (!boot_on &&
+ mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_CP2AP_WAKEUP], true) == 0) {
+ mif_info("Skip pci power on: condition not met\n");
+ goto exit;
+ }
+
+ if (mc->device_reboot) {
+ mif_info("Skip pci power on: device is rebooting\n");
+ goto exit;
+ }
+
+ if (!cpif_wake_lock_active(mc->ws))
+ cpif_wake_lock(mc->ws);
+
+ if (!boot_on)
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_WAKEUP], 1, 5);
+
+ print_mc_state(mc);
+
+ spin_lock_irqsave(&mc->pcie_tx_lock, flags);
+ /* wait Tx done if it is running */
+ spin_unlock_irqrestore(&mc->pcie_tx_lock, flags);
+
+ if (pcie_get_sudden_linkdown_state(mc->pcie_ch_num))
+ pcie_set_ready_cto_recovery(mc->pcie_ch_num);
+
+ if (pcie_get_cpl_timeout_state(mc->pcie_ch_num))
+ pcie_set_ready_cto_recovery(mc->pcie_ch_num);
+
+ /* Set dynamic lane number & speed according the link up mode */
+ if (mode == LINK_MODE_MIN_SPEED_BOOTING) {
+ speed = LINK_SPEED_GEN1;
+ width = 1;
+ } else {
+ // Set default speed to GEN1 if dynamic speed adapation enabled
+ if (mode == LINK_MODE_ADAPTIVE_SPEED_BOOTED
+ && mc->pcie_dynamic_spd_enabled)
+ speed = LINK_SPEED_GEN1;
+ else
+ speed = pcie_get_max_link_speed(mc->pcie_ch_num);
+ width = pcie_get_max_link_width(mc->pcie_ch_num);
+ }
+
+ pcie_set_msi_ctrl_addr(mc->pcie_ch_num, shm_get_msi_base());
+ if (pcie_poweron(mc->pcie_ch_num, speed, width) != 0) {
+ if (boot_on) {
+ mif_err("PCIe gen1 linkup with CP ROM failed.\n");
+ logbuffer_log(mc->log, "PCIe gen1 linkup with CP ROM failed.");
+ }
+ goto exit;
+ }
+
+ if (boot_on)
+ mif_info("PCIe gen1 linkup with CP ROM succeed.\n");
+
+ mc->pcie_powered_on = true;
+
+ if (mc->s51xx_pdev != NULL) {
+ s51xx_pcie_restore_state(mc->s51xx_pdev, boot_on, mc->variant);
+
+ /* DBG: check MSI sfr setting values */
+ print_msi_register(mc->s51xx_pdev);
+ } else {
+ mif_err("DBG: MSI sfr not set up, yet(s5100_pdev is NULL)");
+ }
+
+ set_pcie_msi_int(ld, true);
+
+ if ((mc->s51xx_pdev != NULL) && mc->pcie_registered && (mc->phone_state != STATE_CRASH_EXIT)) {
+ /* DBG */
+ logbuffer_log(mc->log, "DBG: doorbell: pcie_registered = %d", mc->pcie_registered);
+ if (s51xx_pcie_send_doorbell_int(mc->s51xx_pdev,
+ mld->intval_ap2cp_pcie_link_ack) != 0) {
+ /* DBG */
+ mif_err("DBG: s5100pcie_send_doorbell_int() func. is failed !!!\n");
+ s5100_force_crash_exit_ext(CRASH_REASON_PCIE_DOORBELL_FAILURE_POWERON);
+ }
+ }
+#if IS_ENABLED(CONFIG_CPIF_AP_SUSPEND_DURING_VOICE_CALL)
+ if (mc->pcie_voice_call_on && (mc->phone_state != STATE_CRASH_EXIT)) {
+ if (cpif_wake_lock_active(mc->ws))
+ cpif_wake_unlock(mc->ws);
+
+ mif_info("wakelock active = %d, voice status = %d\n",
+ cpif_wake_lock_active(mc->ws), mc->pcie_voice_call_on);
+ }
+#endif
+
+exit:
+ mif_debug("---\n");
+ mutex_unlock(&mc->pcie_check_lock);
+ mutex_unlock(&mc->pcie_onoff_lock);
+
+ spin_lock_irqsave(&mc->pcie_tx_lock, flags);
+ if ((mc->s51xx_pdev != NULL) && mc->pcie_powered_on && mc->reserve_doorbell_int) {
+ logbuffer_log(mc->log, "DBG: doorbell: doorbell_reserved = %d\n", mc->reserve_doorbell_int);
+ mc->reserve_doorbell_int = false;
+ if (s51xx_pcie_send_doorbell_int(mc->s51xx_pdev, mld->intval_ap2cp_msg) != 0)
+ force_crash = true;
+ }
+ spin_unlock_irqrestore(&mc->pcie_tx_lock, flags);
+
+ if (unlikely(force_crash))
+ s5100_force_crash_exit_ext(CRASH_REASON_PCIE_DOORBELL_FAILURE_POWERON);
+
+ return 0;
+}
+
+void s5100_set_pcie_irq_affinity(struct modem_ctl *mc)
+{
+ struct link_device *ld = get_current_link(mc->iod);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+#if IS_ENABLED(CONFIG_CP_PKTPROC)
+ struct pktproc_adaptor *ppa = &mld->pktproc;
+ unsigned int num_queue = 1;
+ unsigned int i;
+
+ if (ppa->use_exclusive_irq)
+ num_queue = ppa->num_queue;
+
+ for (i = 0; i < num_queue; i++) {
+ if (!ppa->q[i]->irq)
+ break;
+
+ irq_set_affinity_hint(ppa->q[i]->irq, cpumask_of(mld->msi_irq_q_cpu[i]));
+ }
+#endif
+
+ if (mld->msi_irq_base)
+ irq_set_affinity_hint(mld->msi_irq_base, cpumask_of(mld->msi_irq_base_cpu));
+}
+
+int s5100_set_outbound_atu(struct modem_ctl *mc, struct cp_btl *btl, loff_t *pos, u32 map_size)
+{
+ int ret = 0;
+ u32 atu_grp = (*pos) / map_size;
+
+ if (atu_grp != btl->last_pcie_atu_grp) {
+ ret = pcie_set_outbound_atu(
+ mc->pcie_ch_num, btl->mem.cp_p_base, (atu_grp * map_size), map_size);
+ btl->last_pcie_atu_grp = atu_grp;
+ }
+
+ return ret;
+}
+
+static int suspend_cp(struct modem_ctl *mc)
+{
+ if (!mc)
+ return 0;
+
+ do {
+#if IS_ENABLED(CONFIG_CPIF_AP_SUSPEND_DURING_VOICE_CALL)
+ if (mc->pcie_voice_call_on)
+ break;
+#endif
+
+ if (mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_CP2AP_WAKEUP], true) == 1) {
+ mif_info("abort suspend\n");
+ return -EBUSY;
+ }
+
+ if (mc->cp_ever_powered_on && (mc->phone_state != STATE_ONLINE)) {
+ mif_info("Abort suspend since CP state (%s) is not ONLINE\n",
+ cp_state_str(mc->phone_state));
+ return -EBUSY;
+ }
+ } while (0);
+
+#if !IS_ENABLED(CONFIG_CP_LCD_NOTIFIER)
+ modem_ctrl_set_kerneltime(mc);
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_AP_ACTIVE], 0, 0);
+ mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_AP2CP_AP_ACTIVE], true);
+#endif
+
+ return 0;
+}
+
+static int resume_cp(struct modem_ctl *mc)
+{
+#if IS_ENABLED(CONFIG_GS_S2MPU)
+ int ret;
+#endif
+ if (!mc)
+ return 0;
+
+ s5100_set_pcie_irq_affinity(mc);
+
+#if IS_ENABLED(CONFIG_GS_S2MPU)
+
+ if (!mc->s2mpu)
+ return 0;
+
+ ret = s2mpu_restore(mc->s2mpu);
+ if (ret) {
+ mif_err("S2MPU restore failed error=%d\n", ret);
+ return -EINVAL;
+ }
+#endif
+
+#if !IS_ENABLED(CONFIG_CP_LCD_NOTIFIER)
+ modem_ctrl_set_kerneltime(mc);
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_AP_ACTIVE], 1, 0);
+ mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_AP2CP_AP_ACTIVE], true);
+#endif
+
+ return 0;
+}
+
+static int s5100_pm_notifier(struct notifier_block *notifier,
+ unsigned long pm_event, void *v)
+{
+ struct modem_ctl *mc;
+ unsigned long flags;
+ int gpio_val;
+
+ mc = container_of(notifier, struct modem_ctl, pm_notifier);
+
+ switch (pm_event) {
+ case PM_SUSPEND_PREPARE:
+ mif_info("Suspend prepare\n");
+
+ spin_lock_irqsave(&mc->pcie_pm_lock, flags);
+ mc->pcie_pm_suspended = true;
+ spin_unlock_irqrestore(&mc->pcie_pm_lock, flags);
+ break;
+ case PM_POST_SUSPEND:
+ mif_info("Resume done\n");
+
+ spin_lock_irqsave(&mc->pcie_pm_lock, flags);
+ mc->pcie_pm_suspended = false;
+ if (mc->pcie_pm_resume_wait) {
+ mc->pcie_pm_resume_wait = false;
+ gpio_val = mc->pcie_pm_resume_gpio_val;
+
+ mif_err("cp2ap_wakeup work resume. gpio_val : %d\n", gpio_val);
+
+ mc->apwake_irq_chip->irq_set_type(
+ irq_get_irq_data(mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_WAKEUP].num),
+ (gpio_val == 1 ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH));
+ mif_enable_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_WAKEUP]);
+
+ queue_work_on(RUNTIME_PM_AFFINITY_CORE, mc->wakeup_wq,
+ (gpio_val == 1 ? &mc->wakeup_work : &mc->suspend_work));
+ }
+ spin_unlock_irqrestore(&mc->pcie_pm_lock, flags);
+ break;
+ default:
+ mif_info("pm_event %lu\n", pm_event);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+int s5100_try_gpio_cp_wakeup(struct modem_ctl *mc)
+{
+ if ((mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_AP2CP_WAKEUP], false) == 0) &&
+ (mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_CP2AP_WAKEUP], false) == 0) &&
+ (s51xx_check_pcie_link_status(mc->pcie_ch_num) == 0)) {
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_WAKEUP], 1, 0);
+ print_mc_state(mc);
+ return 0;
+ }
+ return -EPERM;
+}
+
+static void s5100_get_ops(struct modem_ctl *mc)
+{
+ mc->ops.power_on = power_on_cp;
+ mc->ops.power_off = power_off_cp;
+ mc->ops.power_shutdown = power_shutdown_cp;
+ mc->ops.power_reset = power_reset_cp;
+ mc->ops.power_reset_dump = power_reset_dump_cp;
+ mc->ops.silent_reset = silent_reset_cp;
+ mc->ops.power_reset_partial = power_reset_partial_cp;
+ mc->ops.power_reset_warm = power_reset_warm_cp;
+
+ mc->ops.start_normal_boot = start_normal_boot;
+ mc->ops.complete_normal_boot = complete_normal_boot;
+ mc->ops.start_normal_boot_bl1 = start_normal_boot_bl1;
+ mc->ops.start_normal_boot_bootloader = start_normal_boot_bootloader;
+ mc->ops.start_dump_boot_bl1 = start_dump_boot_bl1;
+ mc->ops.start_dump_boot_bootloader = start_dump_boot_bootloader;
+ mc->ops.start_dump_boot_partial = start_dump_boot_partial;
+
+ mc->ops.start_dump_boot = start_dump_boot;
+ mc->ops.trigger_cp_crash = trigger_cp_crash;
+
+ mc->ops.suspend = suspend_cp;
+ mc->ops.resume = resume_cp;
+}
+
+static int s5100_get_pdata(struct modem_ctl *mc, struct modem_data *pdata)
+{
+ struct platform_device *pdev = to_platform_device(mc->dev);
+ struct device_node *np = pdev->dev.of_node;
+ unsigned int i;
+
+ /* label */
+ mc->cp_gpio[CP_GPIO_AP2CP_CP_PWR].label = "AP2CP_CP_PWR";
+ mc->cp_gpio[CP_GPIO_AP2CP_NRESET].label = "AP2CP_NRESET";
+ mc->cp_gpio[CP_GPIO_AP2CP_WAKEUP].label = "AP2CP_WAKEUP";
+ mc->cp_gpio[CP_GPIO_AP2CP_DUMP_NOTI].label = "AP2CP_DUMP_NOTI";
+ mc->cp_gpio[CP_GPIO_AP2CP_AP_ACTIVE].label = "AP2CP_AP_ACTIVE";
+#if !IS_ENABLED(CONFIG_CP_WRESET_WA)
+ mc->cp_gpio[CP_GPIO_AP2CP_CP_WRST_N].label = "AP2CP_CP_WRST_N";
+ mc->cp_gpio[CP_GPIO_CP2AP_CP_WRST_N].label = "CP2AP_CP_WRST_N";
+ mc->cp_gpio[CP_GPIO_AP2CP_PM_WRST_N].label = "AP2CP_PM_WRST_N";
+#endif
+ mc->cp_gpio[CP_GPIO_CP2AP_PS_HOLD].label = "CP2AP_PS_HOLD";
+ mc->cp_gpio[CP_GPIO_CP2AP_WAKEUP].label = "CP2AP_WAKEUP";
+ mc->cp_gpio[CP_GPIO_CP2AP_CP_ACTIVE].label = "CP2AP_CP_ACTIVE";
+ mc->cp_gpio[CP_GPIO_AP2CP_PARTIAL_RST_N].label = "AP2CP_PARTIAL_RST_N";
+
+ /* node name */
+ mc->cp_gpio[CP_GPIO_AP2CP_CP_PWR].node_name = "gpio_ap2cp_cp_pwr_on";
+ mc->cp_gpio[CP_GPIO_AP2CP_NRESET].node_name = "gpio_ap2cp_nreset_n";
+ mc->cp_gpio[CP_GPIO_AP2CP_WAKEUP].node_name = "gpio_ap2cp_wake_up";
+ mc->cp_gpio[CP_GPIO_AP2CP_DUMP_NOTI].node_name = "gpio_ap2cp_dump_noti";
+ mc->cp_gpio[CP_GPIO_AP2CP_AP_ACTIVE].node_name = "gpio_ap2cp_pda_active";
+#if !IS_ENABLED(CONFIG_CP_WRESET_WA)
+ mc->cp_gpio[CP_GPIO_AP2CP_CP_WRST_N].node_name = "gpio_ap2cp_cp_wrst_n";
+ mc->cp_gpio[CP_GPIO_CP2AP_CP_WRST_N].node_name = "gpio_cp2ap_cp_wrst_n";
+ mc->cp_gpio[CP_GPIO_AP2CP_PM_WRST_N].node_name = "gpio_ap2cp_pm_wrst_n";
+#endif
+ mc->cp_gpio[CP_GPIO_CP2AP_PS_HOLD].node_name = "gpio_cp2ap_cp_ps_hold";
+ mc->cp_gpio[CP_GPIO_CP2AP_WAKEUP].node_name = "gpio_cp2ap_wake_up";
+ mc->cp_gpio[CP_GPIO_CP2AP_CP_ACTIVE].node_name = "gpio_cp2ap_phone_active";
+ mc->cp_gpio[CP_GPIO_AP2CP_PARTIAL_RST_N].node_name = "gpio_ap2cp_partial_rst_n";
+
+ /* irq */
+ mc->cp_gpio[CP_GPIO_CP2AP_WAKEUP].irq_type = CP_GPIO_IRQ_CP2AP_WAKEUP;
+ mc->cp_gpio[CP_GPIO_CP2AP_CP_ACTIVE].irq_type = CP_GPIO_IRQ_CP2AP_CP_ACTIVE;
+ if (pdata->mif_off_during_volte)
+ mc->cp_gpio[CP_GPIO_CP2AP_CP_WRST_N].irq_type = CP_GPIO_IRQ_CP2AP_CP_WRST_N;
+
+ /* gpio */
+ for (i = 0; i < CP_GPIO_MAX; i++) {
+ mc->cp_gpio[i].num =
+ of_get_named_gpio(np, mc->cp_gpio[i].node_name, 0);
+
+ if (!gpio_is_valid(mc->cp_gpio[i].num))
+ continue;
+
+ mc->cp_gpio[i].valid = true;
+
+ gpio_request(mc->cp_gpio[i].num, mc->cp_gpio[i].label);
+ if (!strncmp(mc->cp_gpio[i].label, "AP2CP", 5))
+ gpio_direction_output(mc->cp_gpio[i].num, 0);
+ else
+ gpio_direction_input(mc->cp_gpio[i].num);
+
+ if (mc->cp_gpio[i].irq_type != CP_GPIO_IRQ_NONE) {
+ mc->cp_gpio_irq[mc->cp_gpio[i].irq_type].num =
+ gpio_to_irq(mc->cp_gpio[i].num);
+
+ if (i == CP_GPIO_CP2AP_CP_ACTIVE) {
+ mc->cp_gpio_irq[mc->cp_gpio[i].irq_type].not_alive =
+ pdata->cp2ap_active_not_alive;
+ }
+ }
+ }
+
+ /* validate */
+ for (i = 0; i < CP_GPIO_MAX; i++) {
+ if (!mc->cp_gpio[i].valid) {
+ mif_err("Missing some of GPIOs %s\n", mc->cp_gpio[i].node_name);
+ return -EINVAL;
+ }
+ }
+
+ /* Get PCIe Channel Number */
+ mif_dt_read_u32(np, "pci_ch_num", mc->pcie_ch_num);
+ mif_info("PCIe Channel Number:%d\n", mc->pcie_ch_num);
+
+ mc->sbi_crash_type_mask = pdata->sbi_crash_type_mask;
+ mc->sbi_crash_type_pos = pdata->sbi_crash_type_pos;
+
+ mc->sbi_ds_det_mask = pdata->sbi_ds_det_mask;
+ mc->sbi_ds_det_pos = pdata->sbi_ds_det_pos;
+
+ /* PCIe dynamic speed change parameters */
+ mc->pcie_dynamic_spd_enabled = of_property_read_bool(np, "use-dynamic-link-spd");
+
+ if (of_property_read_u32(np, "tp-threshold", &mc->tp_threshold))
+ mc->tp_threshold = DEFAULT_TP_THRESHOLD;
+
+ if (of_property_read_u32(np, "tp-hysteresis", &mc->tp_hysteresis))
+ mc->tp_hysteresis = DEFAULT_TP_HYSTERESIS;
+
+ mif_info("PCIe dynamic link speed: enable=%d, threshold=%d, hysteresis=%d\n",
+ mc->pcie_dynamic_spd_enabled,
+ mc->tp_threshold,
+ mc->tp_hysteresis);
+
+ return 0;
+}
+
+static int send_panic_to_cp_notifier(struct notifier_block *nb,
+ unsigned long action, void *nb_data)
+{
+ s5100_send_panic_noti_ext();
+ return NOTIFY_DONE;
+}
+
+#if IS_ENABLED(CONFIG_CPIF_AP_SUSPEND_DURING_VOICE_CALL)
+static int s5100_call_state_notifier(struct notifier_block *nb,
+ unsigned long action, void *nb_data)
+{
+ struct modem_ctl *mc = container_of(nb, struct modem_ctl, call_state_nb);
+
+ mif_info("call event = %lu\n", action);
+ switch (action) {
+ case MODEM_VOICE_CALL_OFF:
+ mc->pcie_voice_call_on = false;
+ if (mc->mdm_data->mif_off_during_volte) {
+ mif_disable_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_WRST_N]);
+ synchronize_irq(mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_WRST_N].num);
+ cpif_wake_unlock(mc->ws_wrst);
+ logbuffer_log(mc->log, "released wrst wakelock after voice call");
+ }
+ queue_work_on(RUNTIME_PM_AFFINITY_CORE, mc->wakeup_wq,
+ &mc->call_off_work);
+ break;
+ case MODEM_VOICE_CALL_ON:
+ mc->pcie_voice_call_on = true;
+ if (mc->mdm_data->mif_off_during_volte)
+ mif_enable_irq(&mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_WRST_N]);
+ queue_work_on(RUNTIME_PM_AFFINITY_CORE, mc->wakeup_wq,
+ &mc->call_on_work);
+ break;
+ default:
+ mif_err("undefined call event = %lu\n", action);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_CP_LCD_NOTIFIER)
+static int s5100_lcd_notifier(struct notifier_block *notifier,
+ unsigned long event, void *v)
+{
+ struct modem_ctl *mc =
+ container_of(notifier, struct modem_ctl, lcd_notifier);
+
+ switch (event) {
+ case LCD_OFF:
+ mif_info("LCD_OFF Notification\n");
+ modem_ctrl_set_kerneltime(mc);
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_AP_ACTIVE], 0, 0);
+ mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_AP2CP_AP_ACTIVE], true);
+ break;
+
+ case LCD_ON:
+ mif_info("LCD_ON Notification\n");
+ modem_ctrl_set_kerneltime(mc);
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_AP_ACTIVE], 1, 0);
+ mif_gpio_get_value(&mc->cp_gpio[CP_GPIO_AP2CP_AP_ACTIVE], true);
+ break;
+
+ default:
+ mif_info("lcd_event %ld\n", event);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+#endif /* CONFIG_CP_LCD_NOTIFIER */
+
+int s5100_init_modemctl_device(struct modem_ctl *mc, struct modem_data *pdata)
+{
+ int ret = 0;
+ struct platform_device *pdev = to_platform_device(mc->dev);
+
+ g_mc = mc;
+
+ s5100_get_ops(mc);
+ if (s5100_get_pdata(mc, pdata)) {
+ mif_err("DT error: failed to parse\n");
+ return -EINVAL;
+ }
+ dev_set_drvdata(mc->dev, mc);
+
+ mc->ws = cpif_wake_lock_register(&pdev->dev, "s5100_wake_lock");
+ if (mc->ws == NULL) {
+ mif_err("s5100_wake_lock: wakeup_source_register fail\n");
+ ret = -EINVAL;
+ goto err_wake_lock_register;
+ }
+
+ if (pdata->mif_off_during_volte) {
+ mc->ws_wrst = cpif_wake_lock_register(&pdev->dev, "s5100_wrst_wake_lock");
+ if (mc->ws_wrst == NULL) {
+ mif_err("s5100_wake_lock: wakeup_source_register fail\n");
+ ret = -EINVAL;
+ goto err_wake_lock_register;
+ }
+ }
+
+ mutex_init(&mc->pcie_onoff_lock);
+ mutex_init(&mc->pcie_check_lock);
+ spin_lock_init(&mc->pcie_tx_lock);
+ spin_lock_init(&mc->pcie_pm_lock);
+ spin_lock_init(&mc->power_stats_lock);
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_GPIO_WA)
+ atomic_set(&mc->dump_toggle_issued, 0);
+#endif
+
+ mif_gpio_set_value(&mc->cp_gpio[CP_GPIO_AP2CP_NRESET], 0, 0);
+
+ mif_info("Register GPIO interrupts\n");
+ mc->apwake_irq_chip = irq_get_chip(mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_WAKEUP].num);
+ if (mc->apwake_irq_chip == NULL) {
+ mif_err("Can't get irq_chip structure!!!!\n");
+ ret = -EINVAL;
+ goto err_irq_get_chip;
+ }
+
+ if (pdata->mif_off_during_volte) {
+ mc->cp_wrst_irq_chip = irq_get_chip(
+ mc->cp_gpio_irq[CP_GPIO_IRQ_CP2AP_CP_WRST_N].num);
+ if (mc->cp_wrst_irq_chip == NULL) {
+ mif_err("Can't get irq_chip structure!!!!\n");
+ ret = -EINVAL;
+ goto err_irq_get_chip;
+ }
+ }
+
+ mc->wakeup_wq = create_singlethread_workqueue("cp2ap_wakeup_wq");
+ if (!mc->wakeup_wq) {
+ mif_err("%s: ERR! fail to create wakeup_wq\n", mc->name);
+ ret = -EINVAL;
+ goto err_irq_get_chip;
+ }
+ INIT_WORK(&mc->wakeup_work, cp2ap_wakeup_work);
+ INIT_WORK(&mc->suspend_work, cp2ap_suspend_work);
+
+ mc->crash_wq = create_singlethread_workqueue("trigger_cp_crash_wq");
+ if (!mc->crash_wq) {
+ mif_err("%s: ERR! fail to create crash_wq\n", mc->name);
+ ret = -EINVAL;
+ goto err_crash_wq;
+ }
+ INIT_WORK(&mc->crash_work, trigger_cp_crash_work);
+
+ mc->reboot_nb.notifier_call = s5100_reboot_handler;
+ register_reboot_notifier(&mc->reboot_nb);
+
+ /* Register PM notifier_call */
+ mc->pm_notifier.notifier_call = s5100_pm_notifier;
+ ret = register_pm_notifier(&mc->pm_notifier);
+ if (ret) {
+ mif_err("failed to register PM notifier_call\n");
+ goto err_pm_notifier;
+ }
+
+ /* Register panic notifier_call*/
+ mc->send_panic_nb.notifier_call = send_panic_to_cp_notifier;
+ ret = atomic_notifier_chain_register(&panic_notifier_list, &mc->send_panic_nb);
+ if (ret < 0) {
+ mif_err("failed to register panic notifier_call\n");
+ goto err_panic_notifier;
+ }
+
+#if IS_ENABLED(CONFIG_CPIF_AP_SUSPEND_DURING_VOICE_CALL)
+ INIT_WORK(&mc->call_on_work, voice_call_on_work);
+ INIT_WORK(&mc->call_off_work, voice_call_off_work);
+
+ mc->call_state_nb.notifier_call = s5100_call_state_notifier;
+ ret = register_modem_voice_call_event_notifier(&mc->call_state_nb);
+ if (ret < 0) {
+ mif_err("failed to register modem voice call event notifier_call\n");
+ goto err_modem_vce_notifier;
+ }
+#endif
+
+ mc->force_crash_nb.notifier_call = s5100_force_crash_notifier;
+ ret = register_modem_force_crash_handler(&mc->force_crash_nb);
+ if (ret < 0) {
+ mif_err("failed to register forced crash notifier: %d\n", ret);
+ goto err_force_crash_notifier;
+ }
+
+ if (sysfs_create_group(&pdev->dev.kobj, &dynamic_pcie_spd_group))
+ mif_err("failed to create sysfs node for dynamic_pcie_spd\n");
+
+ if (sysfs_create_group(&pdev->dev.kobj, &sim_group))
+ mif_err("failed to create sysfs node related sim\n");
+
+ if (sysfs_create_group(&pdev->dev.kobj, &modem_group))
+ mif_err("failed to create sysfs node related modem\n");
+
+ ret = device_create_file(&pdev->dev, &dev_attr_s5100_wake_lock);
+ if (ret) {
+ mif_err("%s: couldn't create s5100_wake_lock(%d)\n", __func__, ret);
+ goto err_dev_create_file;
+ }
+
+ ret = device_create_file(&pdev->dev, &dev_attr_s5100_wrst_wake_lock);
+ if (ret) {
+ mif_err("%s: couldn't create s5100_wrst_wake_lock(%d)\n", __func__, ret);
+ goto err_dev_create_file;
+ }
+
+ return 0;
+
+err_dev_create_file:
+ sysfs_remove_group(&pdev->dev.kobj, &modem_group);
+ sysfs_remove_group(&pdev->dev.kobj, &sim_group);
+ sysfs_remove_group(&pdev->dev.kobj, &dynamic_pcie_spd_group);
+ unregister_modem_voice_call_event_notifier(&mc->force_crash_nb);
+err_force_crash_notifier:
+#if IS_ENABLED(CONFIG_CPIF_AP_SUSPEND_DURING_VOICE_CALL)
+ unregister_modem_voice_call_event_notifier(&mc->call_state_nb);
+err_modem_vce_notifier:
+#endif
+ atomic_notifier_chain_unregister(&panic_notifier_list, &mc->send_panic_nb);
+err_panic_notifier:
+ unregister_pm_notifier(&mc->pm_notifier);
+err_pm_notifier:
+ unregister_reboot_notifier(&mc->reboot_nb);
+ destroy_workqueue(mc->crash_wq);
+err_crash_wq:
+ destroy_workqueue(mc->wakeup_wq);
+err_irq_get_chip:
+ cpif_wake_lock_unregister(mc->ws);
+ if (pdata->mif_off_during_volte)
+ cpif_wake_lock_unregister(mc->ws_wrst);
+err_wake_lock_register:
+ g_mc = NULL;
+ return ret;
+}
+
+void s5100_uninit_modemctl_device(struct modem_ctl *mc, struct modem_data *pdata)
+{
+ struct device *dev = mc->dev;
+
+ device_remove_file(dev, &dev_attr_s5100_wake_lock);
+ sysfs_remove_group(&dev->kobj, &modem_group);
+ sysfs_remove_group(&dev->kobj, &sim_group);
+ sysfs_remove_group(&dev->kobj, &dynamic_pcie_spd_group);
+ unregister_modem_voice_call_event_notifier(&mc->force_crash_nb);
+#if IS_ENABLED(CONFIG_CPIF_AP_SUSPEND_DURING_VOICE_CALL)
+ unregister_modem_voice_call_event_notifier(&mc->call_state_nb);
+#endif
+ atomic_notifier_chain_unregister(&panic_notifier_list, &mc->send_panic_nb);
+ unregister_pm_notifier(&mc->pm_notifier);
+ unregister_reboot_notifier(&mc->reboot_nb);
+ destroy_workqueue(mc->crash_wq);
+ destroy_workqueue(mc->wakeup_wq);
+ mutex_destroy(&mc->pcie_check_lock);
+ mutex_destroy(&mc->pcie_onoff_lock);
+ cpif_wake_lock_unregister(mc->ws);
+ if (pdata->mif_off_during_volte)
+ cpif_wake_lock_unregister(mc->ws_wrst);
+ g_mc = NULL;
+}
diff --git a/modem_dump.c b/modem_dump.c
new file mode 100644
index 0000000..02ac67a
--- /dev/null
+++ b/modem_dump.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 Samsung Electronics.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/shm_ipc.h>
+
+#include "modem_prj.h"
+#include "modem_utils.h"
+#include "link_device_memory.h"
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+#include "link_device_pcie_iommu.h"
+#endif
+
+static int save_log_dump(struct io_device *iod, struct link_device *ld, u8 __iomem *base,
+ size_t size)
+{
+ struct sk_buff *skb = NULL;
+ size_t alloc_size = 0xE00;
+ size_t copied = 0;
+ int ret = 0;
+
+ if (!base) {
+ mif_err("base is null\n");
+ return -EINVAL;
+ }
+ if (!size) {
+ mif_err("size is 0\n");
+ return -EINVAL;
+ }
+
+ while (copied < size) {
+ if (size - copied < alloc_size)
+ alloc_size = size - copied;
+
+ skb = alloc_skb(alloc_size, GFP_KERNEL);
+ if (!skb) {
+ skb_queue_purge(&iod->sk_rx_q);
+ mif_err("alloc_skb() error\n");
+ return -ENOMEM;
+ }
+
+ memcpy(skb_put(skb, alloc_size), base + copied, alloc_size);
+ copied += alloc_size;
+
+ skbpriv(skb)->iod = iod;
+ skbpriv(skb)->ld = ld;
+ skbpriv(skb)->lnk_hdr = false;
+ skbpriv(skb)->sipc_ch = iod->ch;
+ skbpriv(skb)->napi = NULL;
+
+ ret = iod->recv_skb_single(iod, ld, skb);
+ if (unlikely(ret < 0)) {
+ struct modem_ctl *mc = ld->mc;
+
+ mif_err_limited("%s: %s<-%s: %s->recv_skb fail (%d)\n",
+ ld->name, iod->name, mc->name, iod->name, ret);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ }
+
+ mif_info("Complete:%zu bytes\n", copied);
+
+ return 0;
+}
+
+int cp_get_log_dump(struct io_device *iod, struct link_device *ld, unsigned long arg)
+{
+ struct mem_link_device *mld = to_mem_link_device(ld);
+ struct modem_data *modem = ld->mdm_data;
+ void __user *uarg = (void __user *)arg;
+ struct cp_log_dump log_dump;
+ u8 __iomem *base = NULL;
+ u32 size = 0;
+ int ret = 0;
+ u32 cp_num;
+
+ ret = copy_from_user(&log_dump, uarg, sizeof(log_dump));
+ if (ret) {
+ mif_err("copy_from_user() error:%d\n", ret);
+ return ret;
+ }
+ log_dump.name[sizeof(log_dump.name) - 1] = '\0';
+ mif_info("%s log name:%s index:%d\n", iod->name, log_dump.name, log_dump.idx);
+
+ cp_num = ld->mdm_data->cp_num;
+ switch (log_dump.idx) {
+ case LOG_IDX_SHMEM:
+ if (modem->legacy_raw_rx_buffer_cached) {
+ base = phys_to_virt(cp_shmem_get_base(cp_num, SHMEM_IPC));
+ size = cp_shmem_get_size(cp_num, SHMEM_IPC);
+ } else {
+ base = mld->base;
+ size = mld->size;
+ }
+
+ break;
+
+ case LOG_IDX_VSS:
+ base = mld->vss_base;
+ size = cp_shmem_get_size(cp_num, SHMEM_VSS);
+ break;
+
+ case LOG_IDX_ACPM:
+ base = mld->acpm_base;
+ size = mld->acpm_size;
+ break;
+
+#if IS_ENABLED(CONFIG_CP_BTL)
+ case LOG_IDX_CP_BTL:
+ if (!ld->mdm_data->btl.enabled) {
+ mif_info("%s CP BTL is disabled\n", iod->name);
+ return -EPERM;
+ }
+ base = ld->mdm_data->btl.mem.v_base;
+ size = ld->mdm_data->btl.mem.size;
+ break;
+#endif
+
+ case LOG_IDX_DATABUF_DL:
+ base = phys_to_virt(cp_shmem_get_base(cp_num, SHMEM_PKTPROC));
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_IOMMU)
+ if (exynos_pcie_is_sysmmu_enabled(iod->mc->pcie_ch_num)) {
+ size = mld->pktproc.buff_rgn_offset;
+ } else {
+ size = cp_shmem_get_size(cp_num, SHMEM_PKTPROC);
+ }
+#else
+ size = cp_shmem_get_size(cp_num, SHMEM_PKTPROC);
+#endif
+ break;
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC_UL)
+ case LOG_IDX_DATABUF_UL:
+ base = phys_to_virt(cp_shmem_get_base(cp_num, SHMEM_PKTPROC_UL));
+ size = cp_shmem_get_size(cp_num, SHMEM_PKTPROC_UL);
+ break;
+#endif
+
+ case LOG_IDX_L2B:
+ base = phys_to_virt(cp_shmem_get_base(cp_num, SHMEM_L2B));
+ size = cp_shmem_get_size(cp_num, SHMEM_L2B);
+ break;
+
+ case LOG_IDX_DDM:
+ base = phys_to_virt(cp_shmem_get_base(cp_num, SHMEM_DDM));
+ size = cp_shmem_get_size(cp_num, SHMEM_DDM);
+ break;
+
+ default:
+ mif_err("%s: invalid index:%d\n", iod->name, log_dump.idx);
+ return -EINVAL;
+ }
+
+ if (!base) {
+ mif_err("base is null for %s\n", log_dump.name);
+ return -EINVAL;
+ }
+ if (!size) {
+ mif_err("size is 0 for %s\n", log_dump.name);
+ return -EINVAL;
+ }
+
+ log_dump.size = size;
+ mif_info("%s log size:%d\n", iod->name, log_dump.size);
+ ret = copy_to_user(uarg, &log_dump, sizeof(log_dump));
+ if (ret) {
+ mif_err("copy_to_user() error:%d\n", ret);
+ return ret;
+ }
+
+ return save_log_dump(iod, ld, base, size);
+}
diff --git a/modem_dump.h b/modem_dump.h
new file mode 100644
index 0000000..3a0f92d
--- /dev/null
+++ b/modem_dump.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2016 Samsung Electronics.
+ *
+ */
+
+#ifndef __MODEM_DUMP_H__
+#define __MODEM_DUMP_H__
+
+#include "modem_prj.h"
+#include "link_device_memory.h"
+
+extern int cp_get_log_dump(struct io_device *iod, struct link_device *ld, unsigned long arg);
+
+#endif
diff --git a/modem_io_device.c b/modem_io_device.c
new file mode 100644
index 0000000..239cc5c
--- /dev/null
+++ b/modem_io_device.c
@@ -0,0 +1,869 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010 Samsung Electronics.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/if_arp.h>
+#include <linux/ip.h>
+#include <linux/if_ether.h>
+#include <linux/etherdevice.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <trace/events/napi.h>
+#include <net/ip.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/netdevice.h>
+#include <soc/google/exynos-modem-ctrl.h>
+
+#include "modem_prj.h"
+#include "modem_utils.h"
+#include "modem_dump.h"
+#include "modem_toe_device.h"
+
+static ssize_t waketime_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned int msec;
+ struct io_device *iod = dev_get_drvdata(dev);
+
+ msec = jiffies_to_msecs(iod->waketime);
+
+ return sysfs_emit(buf, "raw waketime : %ums\n", msec);
+}
+
+static ssize_t waketime_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long msec;
+ int ret;
+ struct io_device *iod = dev_get_drvdata(dev);
+
+ if (!iod) {
+ mif_err("INVALID IO device\n");
+ return -EINVAL;
+ }
+
+ ret = kstrtoul(buf, 10, &msec);
+ if (ret)
+ return count;
+
+ if (!msec) {
+ mif_info("%s: (%ld) is not valied, use previous value(%d)\n",
+ iod->name, msec,
+ jiffies_to_msecs(iod->mc->iod->waketime));
+ return count;
+ }
+
+ iod->waketime = msecs_to_jiffies(msec);
+ mif_info("%s: waketime = %lu ms\n", iod->name, msec);
+
+ if (iod->format == IPC_MULTI_RAW) {
+ struct modem_shared *msd = iod->msd;
+ unsigned int i;
+
+#if IS_ENABLED(CONFIG_CH_EXTENSION)
+ for (i = SIPC_CH_EX_ID_PDP_0; i <= SIPC_CH_EX_ID_PDP_MAX; i++) {
+#else
+ for (i = SIPC_CH_ID_PDP_0; i < SIPC_CH_ID_BT_DUN; i++) {
+#endif
+ iod = get_iod_with_channel(msd, i);
+ if (iod) {
+ iod->waketime = msecs_to_jiffies(msec);
+ mif_err("%s: waketime = %lu ms\n",
+ iod->name, msec);
+ }
+ }
+ }
+
+ return count;
+}
+
+static struct device_attribute attr_waketime =
+ __ATTR_RW(waketime);
+
+static ssize_t loopback_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct io_device *iod = dev_get_drvdata(dev);
+ struct modem_shared *msd = iod->msd;
+ unsigned char *ip = (unsigned char *)&msd->loopback_ipaddr;
+
+ return sysfs_emit(buf, "%u.%u.%u.%u\n", ip[0], ip[1], ip[2], ip[3]);
+}
+
+static ssize_t loopback_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct io_device *iod = dev_get_drvdata(dev);
+ struct modem_shared *msd = iod->msd;
+
+ msd->loopback_ipaddr = ipv4str_to_be32(buf, count);
+
+ return count;
+}
+
+static struct device_attribute attr_loopback =
+ __ATTR_RW(loopback);
+
+static void iodev_showtxlink(struct io_device *iod, void *args)
+{
+ char **p = args;
+ struct link_device *ld = get_current_link(iod);
+ ssize_t count = 0;
+
+ if (iod->io_typ == IODEV_NET && IS_CONNECTED(iod, ld))
+ count += scnprintf(*p + count, PAGE_SIZE - count,
+ "%s<->%s\n", iod->name, ld->name);
+
+ *p += count;
+}
+
+static ssize_t txlink_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct io_device *iod = dev_get_drvdata(dev);
+ struct modem_shared *msd = iod->msd;
+ char *p = buf;
+
+ iodevs_for_each(msd, iodev_showtxlink, &p);
+
+ return p - buf;
+}
+
+static ssize_t txlink_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ /* don't change without gpio dynamic switching */
+ return -EINVAL;
+}
+
+static struct device_attribute attr_txlink =
+ __ATTR_RW(txlink);
+
+enum gro_opt {
+ GRO_TCP_UDP,
+ GRO_TCP_ONLY,
+ GRO_NONE,
+ MAX_GRO_OPTION
+};
+
+static enum gro_opt gro_support = GRO_TCP_UDP;
+
+static ssize_t gro_option_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%u\n", gro_support);
+}
+
+static ssize_t gro_option_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+ int input;
+
+ ret = kstrtouint(buf, 0, &input);
+ if (ret || input > MAX_GRO_OPTION) {
+ mif_err("Error(%u) invalid value: gro support: %u\n",
+ input, gro_support);
+ return -EINVAL;
+ }
+ gro_support = input;
+ ret = count;
+ return ret;
+}
+
+static struct device_attribute attr_gro_option =
+ __ATTR_RW(gro_option);
+
+static int queue_skb_to_iod(struct sk_buff *skb, struct io_device *iod)
+{
+ struct sk_buff_head *rxq = &iod->sk_rx_q;
+ int len = skb->len;
+
+ if (iod->attrs & IO_ATTR_NO_CHECK_MAXQ)
+ goto enqueue;
+
+ if (rxq->qlen > MAX_IOD_RXQ_LEN) {
+ mif_err_limited("%s: application may be dead (rxq->qlen %d > %d)\n",
+ iod->name, rxq->qlen, MAX_IOD_RXQ_LEN);
+ dev_kfree_skb_any(skb);
+ goto exit;
+ }
+
+enqueue:
+ mif_debug("%s: rxq->qlen = %d\n", iod->name, rxq->qlen);
+ skb_queue_tail(rxq, skb);
+
+exit:
+ wake_up(&iod->wq);
+ return len;
+}
+
+static int gather_multi_frame(struct sipc5_link_header *hdr,
+ struct sk_buff *skb)
+{
+ struct multi_frame_control ctrl = hdr->ctrl;
+ struct io_device *iod = skbpriv(skb)->iod;
+ struct modem_ctl *mc = iod->mc;
+ struct sk_buff_head *multi_q = &iod->sk_multi_q[ctrl.id];
+ int len = skb->len;
+
+ /* If there has been no multiple frame with this ID, ... */
+ if (skb_queue_empty(multi_q)) {
+ struct sipc_fmt_hdr *fh = (struct sipc_fmt_hdr *)skb->data;
+
+ mif_err("%s<-%s: start of multi-frame (ID:%d len:%d)\n",
+ iod->name, mc->name, ctrl.id, fh->len);
+ }
+ skb_queue_tail(multi_q, skb);
+
+ if (ctrl.more) {
+ /* The last frame has not arrived yet. */
+ mif_err("%s<-%s: recv multi-frame (ID:%d rcvd:%d)\n",
+ iod->name, mc->name, ctrl.id, skb->len);
+ } else {
+ struct sk_buff_head *rxq = &iod->sk_rx_q;
+ unsigned long flags;
+
+ /* It is the last frame because the "more" bit is 0. */
+ mif_err("%s<-%s: end of multi-frame (ID:%d rcvd:%d)\n",
+ iod->name, mc->name, ctrl.id, skb->len);
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ skb_queue_splice_tail_init(multi_q, rxq);
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ wake_up(&iod->wq);
+ }
+
+ return len;
+}
+
+static int gather_multi_frame_sit(struct exynos_link_header *hdr, struct sk_buff *skb)
+{
+ u16 ctrl = hdr->cfg;
+ struct io_device *iod = skbpriv(skb)->iod;
+ struct modem_ctl *mc = iod->mc;
+ struct sk_buff_head *multi_q = &iod->sk_multi_q[exynos_multi_packet_index(ctrl)];
+ struct sk_buff_head *rxq = &iod->sk_rx_q;
+ struct sk_buff *skb_new, *skb_cur, *tmp;
+ int total_len = 0;
+ int ret = skb->len;
+
+#ifdef DEBUG_MODEM_IF_LINK_RX
+ /* If there has been no multiple frame with this ID, ... */
+ if (skb_queue_empty(multi_q)) {
+ mif_debug("%s<-%s: start of multi-frame (pkt_index:%d fr_index:%d len:%d)\n",
+ iod->name, mc->name, exynos_multi_packet_index(ctrl),
+ exynos_multi_frame_index(ctrl), hdr->len);
+ }
+#endif
+ skb_queue_tail(multi_q, skb);
+
+ /* The last frame has not arrived yet. */
+ if (!exynos_multi_last(ctrl)) {
+ mif_debug("%s<-%s: recv of multi-frame (CH_ID:0x%02x rcvd:%d)\n",
+ iod->name, mc->name, hdr->ch_id, skb->len);
+
+ return ret;
+ }
+
+ /* It is the last frame because the "more" bit is 0. */
+ mif_debug("%s<-%s: end multi-frame (CH_ID:0x%02x rcvd:%d)\n",
+ iod->name, mc->name, hdr->ch_id, skb->len);
+
+ /* check totoal multi packet size */
+ skb_queue_walk(multi_q, skb_cur)
+ total_len += skb_cur->len;
+
+ mif_debug("Total multi-frame packet size is %d\n", total_len);
+
+ skb_new = dev_alloc_skb(total_len);
+ if (unlikely(!skb_new)) {
+ mif_err("ERR - alloc_skb fail\n");
+ skb_dequeue_tail(multi_q);
+ ret = -ENOMEM;
+
+ goto out;
+ }
+
+ skb_queue_walk_safe(multi_q, skb_cur, tmp) {
+ __skb_unlink(skb_cur, multi_q);
+ memcpy(skb_put(skb_new, skb_cur->len), skb_cur->data, skb_cur->len);
+ dev_consume_skb_any(skb_cur);
+ }
+
+out:
+ skb_queue_purge(multi_q);
+ skb_queue_head_init(multi_q);
+
+ if (skb_new) {
+ skb_trim(skb_new, skb_new->len);
+ skb_queue_tail(rxq, skb_new);
+
+ wake_up(&iod->wq);
+ }
+
+ return ret;
+}
+
+static inline int rx_frame_with_link_header(struct sk_buff *skb)
+{
+ struct sipc5_link_header *hdr;
+ struct exynos_link_header *hdr_sit;
+ bool multi_frame = skbpriv(skb)->ld->is_multi_frame(skb->data);
+ int hdr_len = skbpriv(skb)->ld->get_hdr_len(skb->data);
+
+ switch (skbpriv(skb)->ld->protocol) {
+ case PROTOCOL_SIPC:
+ /* Remove SIPC5 link header */
+ hdr = (struct sipc5_link_header *)skb->data;
+ skb_pull(skb, hdr_len);
+
+ if (multi_frame)
+ return gather_multi_frame(hdr, skb);
+ else
+ return queue_skb_to_iod(skb, skbpriv(skb)->iod);
+ break;
+ case PROTOCOL_SIT:
+ hdr_sit = (struct exynos_link_header *)skb->data;
+ skb_pull(skb, EXYNOS_HEADER_SIZE);
+
+ if (multi_frame)
+ return gather_multi_frame_sit(hdr_sit, skb);
+ else
+ return queue_skb_to_iod(skb, skbpriv(skb)->iod);
+ break;
+ default:
+ mif_err("protocol error %d\n", skbpriv(skb)->ld->protocol);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rx_fmt_ipc(struct sk_buff *skb)
+{
+ if (skbpriv(skb)->lnk_hdr)
+ return rx_frame_with_link_header(skb);
+ else
+ return queue_skb_to_iod(skb, skbpriv(skb)->iod);
+}
+
+static int rx_raw_misc(struct sk_buff *skb)
+{
+ struct io_device *iod = skbpriv(skb)->iod;
+
+ if (skbpriv(skb)->lnk_hdr) {
+ /* Remove the SIPC5 link header */
+ skb_pull(skb, skbpriv(skb)->ld->get_hdr_len(skb->data));
+ }
+
+ return queue_skb_to_iod(skb, iod);
+}
+
+static bool check_gro_support(struct sk_buff *skb)
+{
+ u8 proto;
+
+ if (gro_support == GRO_NONE)
+ return false;
+
+ switch (skb->data[0] & 0xF0) {
+ case 0x40:
+ proto = ip_hdr(skb)->protocol;
+ break;
+ case 0x60:
+ proto = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return false;
+ }
+
+ switch (gro_support) {
+ case GRO_TCP_UDP:
+ return proto == IPPROTO_TCP || proto == IPPROTO_UDP;
+ case GRO_TCP_ONLY:
+ return proto == IPPROTO_TCP;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static int rx_multi_pdp(struct sk_buff *skb)
+{
+ struct link_device *ld = skbpriv(skb)->ld;
+ struct io_device *iod = skbpriv(skb)->iod;
+ struct iphdr *iphdr;
+ int len = skb->len;
+ int ret = 0;
+ struct napi_struct *napi = NULL;
+
+ skb->dev = (skbpriv(skb)->rx_clat ? iod->clat_ndev : iod->ndev);
+ if (!skb->dev || !iod->ndev) {
+ mif_err("%s: ERR! no iod->ndev\n", iod->name);
+ return -ENODEV;
+ }
+
+ if (skbpriv(skb)->lnk_hdr) {
+ /* Remove the SIPC5 link header */
+ skb_pull(skb, skbpriv(skb)->ld->get_hdr_len(skb->data));
+ }
+
+ iod->ndev->stats.rx_packets++;
+ iod->ndev->stats.rx_bytes += skb->len;
+
+ /* check the version of IP */
+ iphdr = (struct iphdr *)skb->data;
+ if (iphdr->version == IPv6)
+ skb->protocol = htons(ETH_P_IPV6);
+ else
+ skb->protocol = htons(ETH_P_IP);
+
+#ifdef DEBUG_MODEM_IF_IP_DATA
+ print_ipv4_packet(skb->data, RX);
+#endif
+#if defined(DEBUG_MODEM_IF_IODEV_RX) && defined(DEBUG_MODEM_IF_PS_DATA)
+ mif_pkt(iod->ch, "IOD-RX", skb);
+#endif
+
+ skb_reset_transport_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_mac_header(skb);
+#if IS_ENABLED(CONFIG_CPIF_TP_MONITOR)
+ tpmon_add_rx_bytes(skb);
+#endif
+
+ napi = skbpriv(skb)->napi;
+ if (!napi || !check_gro_support(skb)) {
+ ret = netif_receive_skb(skb);
+ if (ret != NET_RX_SUCCESS)
+ mif_err_limited("%s: %s<-%s: ERR! netif_receive_skb\n",
+ ld->name, iod->name, iod->mc->name);
+ } else {
+ ret = napi_gro_receive(napi, skb);
+ // Comment this part of codes to fix build errors caused by GRO_DROP
+ /*
+ if (ret == GRO_DROP)
+ mif_err_limited("%s: %s<-%s: ERR! napi_gro_receive\n",
+ ld->name, iod->name, iod->mc->name);
+ */
+ }
+ return len;
+}
+
+static int rx_demux(struct link_device *ld, struct sk_buff *skb)
+{
+ struct io_device *iod;
+ u8 ch = skbpriv(skb)->sipc_ch;
+ struct link_device *skb_ld = skbpriv(skb)->ld;
+
+ if (unlikely(ch == 0)) {
+ mif_err("%s: ERR! invalid ch# %d\n", ld->name, ch);
+ return -ENODEV;
+ }
+
+ /* IP loopback */
+ if (ch == DATA_LOOPBACK_CHANNEL && ld->msd->loopback_ipaddr)
+#if IS_ENABLED(CONFIG_CH_EXTENSION)
+ ch = SIPC_CH_EX_ID_PDP_0;
+#else
+ ch = SIPC_CH_ID_PDP_0;
+#endif
+
+ iod = link_get_iod_with_channel(ld, ch);
+ if (unlikely(!iod)) {
+ mif_err("%s: ERR! no iod with ch# %d\n", ld->name, ch);
+ return -ENODEV;
+ }
+
+ if (atomic_read(&iod->opened) <= 0) {
+ mif_err_limited("%s: ERR! %s is not opened\n",
+ ld->name, iod->name);
+ return -ENODEV;
+ }
+
+ switch (skb_ld->protocol) {
+ case PROTOCOL_SIPC:
+ if (skb_ld->is_fmt_ch(ch))
+ return rx_fmt_ipc(skb);
+ else if (skb_ld->is_ps_ch(ch))
+ return rx_multi_pdp(skb);
+ else
+ return rx_raw_misc(skb);
+ break;
+ case PROTOCOL_SIT:
+ if (skb_ld->is_fmt_ch(ch) || skb_ld->is_oem_ch(ch) || skb_ld->is_wfs0_ch(ch))
+ return rx_fmt_ipc(skb);
+ else if (skb_ld->is_ps_ch(ch) || skb_ld->is_embms_ch(ch))
+ return rx_multi_pdp(skb);
+ else
+ return rx_raw_misc(skb);
+ break;
+ default:
+ mif_err("protocol error %d\n", skb_ld->protocol);
+ return -EINVAL;
+ }
+}
+
+static int io_dev_recv_skb_single_from_link_dev(struct io_device *iod,
+ struct link_device *ld,
+ struct sk_buff *skb)
+{
+ int err;
+
+ cpif_wake_lock_timeout(iod->ws, iod->waketime ?: msecs_to_jiffies(200));
+
+ if (skbpriv(skb)->lnk_hdr && ld->aligned) {
+ /* Cut off the padding in the current SIPC5 frame */
+ skb_trim(skb, skbpriv(skb)->ld->get_frame_len(skb->data));
+ }
+
+ err = rx_demux(ld, skb);
+ if (err < 0) {
+ mif_err_limited("%s<-%s: ERR! rx_demux fail (err %d)\n",
+ iod->name, ld->name, err);
+ }
+
+ return err;
+}
+
+/*
+ * @brief called by a link device with the "recv_net_skb" method to upload each PS
+ * data packet to the network protocol stack
+ */
+static int io_dev_recv_net_skb_from_link_dev(struct io_device *iod,
+ struct link_device *ld,
+ struct sk_buff *skb)
+{
+ if (unlikely(atomic_read(&iod->opened) <= 0)) {
+ struct modem_ctl *mc = iod->mc;
+
+ mif_err_limited("%s: %s<-%s: ERR! %s is not opened\n",
+ ld->name, iod->name, mc->name, iod->name);
+ return -ENODEV;
+ }
+
+ cpif_wake_lock_timeout(iod->ws, iod->waketime ?: msecs_to_jiffies(200));
+
+ return rx_multi_pdp(skb);
+}
+
+u16 exynos_build_fr_config(struct io_device *iod, struct link_device *ld,
+ unsigned int count)
+{
+ u16 fr_cfg = 0;
+ u8 frames = 0;
+ u8 *packet_index = &iod->packet_index;
+
+ if (iod->format > IPC_DUMP)
+ return 0;
+
+ if (iod->format >= IPC_BOOT)
+ return fr_cfg |= (EXYNOS_SINGLE_MASK << 8);
+
+ if ((count + EXYNOS_HEADER_SIZE) <= SZ_2K) {
+ fr_cfg |= (EXYNOS_SINGLE_MASK << 8);
+ } else {
+ frames = count / (SZ_2K - EXYNOS_HEADER_SIZE);
+ frames = (count % (SZ_2K - EXYNOS_HEADER_SIZE)) ? frames : frames - 1;
+
+ fr_cfg |= ((EXYNOS_MULTI_START_MASK | (0x3f & ++*packet_index)) << 8) | frames;
+ }
+
+ return fr_cfg;
+}
+
+void exynos_build_header(struct io_device *iod, struct link_device *ld,
+ u8 *buff, u16 cfg, u8 ctl, size_t count)
+{
+ u16 *exynos_header = (u16 *)(buff + EXYNOS_START_OFFSET);
+ u16 *frame_seq = (u16 *)(buff + EXYNOS_FRAME_SEQ_OFFSET);
+ u16 *frag_cfg = (u16 *)(buff + EXYNOS_FRAG_CONFIG_OFFSET);
+ u16 *size = (u16 *)(buff + EXYNOS_LEN_OFFSET);
+ struct exynos_seq_num *seq_num = &(iod->seq_num);
+
+ *exynos_header = EXYNOS_START_MASK;
+ *frame_seq = ++seq_num->frame_cnt;
+ *frag_cfg = cfg;
+ *size = (u16)(EXYNOS_HEADER_SIZE + count);
+ buff[EXYNOS_CH_ID_OFFSET] = iod->ch;
+
+ if (cfg == EXYNOS_SINGLE_MASK)
+ *frag_cfg = cfg;
+
+ buff[EXYNOS_CH_SEQ_OFFSET] = ++seq_num->ch_cnt[iod->ch];
+}
+
+static inline void sipc5_inc_info_id(struct io_device *iod)
+{
+ spin_lock(&iod->info_id_lock);
+ iod->info_id = (iod->info_id + 1) & 0x7F;
+ spin_unlock(&iod->info_id_lock);
+}
+
+u8 sipc5_build_config(struct io_device *iod, struct link_device *ld,
+ unsigned int count)
+{
+ u8 cfg = SIPC5_START_MASK;
+
+ if (iod->format > IPC_DUMP)
+ return 0;
+
+ if (ld->aligned)
+ cfg |= SIPC5_PADDING_EXIST;
+
+ if (iod->max_tx_size > 0 &&
+ (count + SIPC5_MIN_HEADER_SIZE) > iod->max_tx_size) {
+ mif_info("%s: MULTI_FRAME_CFG: count=%u\n", iod->name, count);
+ cfg |= SIPC5_MULTI_FRAME_CFG;
+ sipc5_inc_info_id(iod);
+ }
+
+ return cfg;
+}
+
+void sipc5_build_header(struct io_device *iod, u8 *buff, u8 cfg,
+ unsigned int tx_bytes, unsigned int remains)
+{
+ u16 *sz16 = (u16 *)(buff + SIPC5_LEN_OFFSET);
+ u32 *sz32 = (u32 *)(buff + SIPC5_LEN_OFFSET);
+ unsigned int hdr_len = sipc5_get_hdr_len(&cfg);
+ u8 ctrl;
+
+ /* Store the config field and the channel ID field */
+ buff[SIPC5_CONFIG_OFFSET] = cfg;
+ buff[SIPC5_CH_ID_OFFSET] = iod->ch;
+
+ /* Store the frame length field */
+ if (sipc5_ext_len(buff))
+ *sz32 = (u32)(hdr_len + tx_bytes);
+ else
+ *sz16 = (u16)(hdr_len + tx_bytes);
+
+ /* Store the control field */
+ if (sipc5_multi_frame(buff)) {
+ ctrl = (remains > 0) ? 1 << 7 : 0;
+ ctrl |= iod->info_id;
+ buff[SIPC5_CTRL_OFFSET] = ctrl;
+ mif_info("MULTI: ctrl=0x%x(tx_bytes:%u, remains:%u)\n",
+ ctrl, tx_bytes, remains);
+ }
+}
+
+static int dummy_net_open(struct net_device *ndev)
+{
+ return -EINVAL;
+}
+static const struct net_device_ops dummy_net_ops = {
+ .ndo_open = dummy_net_open,
+};
+
+static int cpif_cdev_create_device(struct io_device *iod, const struct file_operations *fops)
+{
+ int ret = 0;
+ static u32 idx;
+
+ cdev_init(&iod->cdev, fops);
+ iod->cdev.owner = THIS_MODULE;
+
+ ret = cdev_add(&iod->cdev, iod->msd->cdev_major + idx, 1);
+ if (IS_ERR_VALUE((unsigned long)ret)) {
+ mif_err("cdev_add() for %s failed:%d\n", iod->name, ret);
+ return ret;
+ }
+ idx++;
+
+ iod->cdevice = device_create(iod->msd->cdev_class, NULL, iod->cdev.dev, iod,
+ "%s", iod->name);
+ if (IS_ERR_OR_NULL(iod->cdevice)) {
+ mif_err("device_create() for %s failed\n", iod->name);
+ ret = -ENOMEM;
+ cdev_del(&iod->cdev);
+ return ret;
+ }
+
+ return ret;
+}
+
+int sipc5_init_io_device(struct io_device *iod, struct mem_link_device *mld)
+{
+ int ret = 0;
+ int i;
+ struct vnet *vnet;
+ unsigned int txqs = 1, rxqs = 1;
+
+ if (iod->attrs & IO_ATTR_SBD_IPC)
+ iod->sbd_ipc = true;
+
+ if (iod->attrs & IO_ATTR_NO_LINK_HEADER)
+ iod->link_header = false;
+ else
+ iod->link_header = true;
+
+ /* Get data from link device */
+ iod->recv_skb_single = io_dev_recv_skb_single_from_link_dev;
+ iod->recv_net_skb = io_dev_recv_net_skb_from_link_dev;
+
+ /* Register misc or net device */
+ switch (iod->io_typ) {
+ case IODEV_BOOTDUMP:
+ init_waitqueue_head(&iod->wq);
+ skb_queue_head_init(&iod->sk_rx_q);
+
+ ret = cpif_cdev_create_device(iod, get_bootdump_io_fops());
+ if (ret)
+ mif_info("%s: ERR! cpif_cdev_create_device failed\n", iod->name);
+ break;
+
+ case IODEV_IPC:
+ init_waitqueue_head(&iod->wq);
+ skb_queue_head_init(&iod->sk_rx_q);
+
+ ret = cpif_cdev_create_device(iod, get_ipc_io_fops());
+ if (ret)
+ mif_info("%s: ERR! cpif_cdev_create_device failed\n", iod->name);
+
+ if (iod->ch == SIPC_CH_ID_CPLOG1) {
+ iod->ndev = alloc_netdev(sizeof(struct vnet), iod->name,
+ NET_NAME_UNKNOWN, vnet_setup);
+ if (!iod->ndev) {
+ mif_info("%s: ERR! alloc_netdev fail\n", iod->name);
+ return -ENOMEM;
+ }
+
+ iod->ndev->netdev_ops = &dummy_net_ops;
+ ret = register_netdev(iod->ndev);
+ if (ret) {
+ mif_info("%s: ERR! register_netdev fail\n", iod->name);
+ free_netdev(iod->ndev);
+ }
+
+ vnet = netdev_priv(iod->ndev);
+ vnet->iod = iod;
+ mif_info("iod:%s, both registered\n", iod->name);
+ }
+ break;
+
+ case IODEV_NET:
+#if IS_ENABLED(CONFIG_MODEM_IF_QOS)
+ txqs = mld->pktproc_ul.num_queue;
+#endif
+#if IS_ENABLED(CONFIG_CP_PKTPROC)
+ rxqs = mld->pktproc.num_queue;
+#endif
+ skb_queue_head_init(&iod->sk_rx_q);
+ INIT_LIST_HEAD(&iod->node_ndev);
+
+ iod->ndev = alloc_netdev_mqs(sizeof(struct vnet),
+ iod->name, NET_NAME_UNKNOWN, vnet_setup,
+ txqs, rxqs);
+ if (!iod->ndev) {
+ mif_info("%s: ERR! alloc_netdev fail\n", iod->name);
+ return -ENOMEM;
+ }
+
+ ret = register_netdev(iod->ndev);
+ if (ret) {
+ mif_info("%s: ERR! register_netdev fail\n", iod->name);
+ free_netdev(iod->ndev);
+ }
+
+ mif_debug("iod 0x%pK\n", iod);
+ vnet = netdev_priv(iod->ndev);
+ mif_debug("vnet 0x%pK\n", vnet);
+ vnet->iod = iod;
+
+#if IS_ENABLED(CONFIG_CPIF_TP_MONITOR)
+ INIT_LIST_HEAD(&iod->node_all_ndev);
+ tpmon_add_net_node(&iod->node_all_ndev);
+#endif
+ break;
+
+ case IODEV_DUMMY:
+ skb_queue_head_init(&iod->sk_rx_q);
+
+ ret = cpif_cdev_create_device(iod, NULL);
+ if (ret)
+ mif_info("%s: ERR! cpif_cdev_create_device fail\n", iod->name);
+
+ ret = device_create_file(iod->cdevice, &attr_waketime);
+ if (ret)
+ mif_info("%s: ERR! device_create_file fail\n",
+ iod->name);
+
+ ret = device_create_file(iod->cdevice, &attr_loopback);
+ if (ret)
+ mif_err("failed to create `loopback file' : %s\n",
+ iod->name);
+
+ ret = device_create_file(iod->cdevice, &attr_txlink);
+ if (ret)
+ mif_err("failed to create `txlink file' : %s\n",
+ iod->name);
+
+ ret = device_create_file(iod->cdevice, &attr_gro_option);
+ if (ret)
+ mif_err("failed to create `gro_option file' : %s\n",
+ iod->name);
+ break;
+
+ default:
+ mif_info("%s: ERR! wrong io_type %d\n", iod->name, iod->io_typ);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < NUM_SIPC_MULTI_FRAME_IDS; i++)
+ skb_queue_head_init(&iod->sk_multi_q[i]);
+
+ return ret;
+}
+
+void sipc5_deinit_io_device(struct io_device *iod)
+{
+ mif_err("%s: io_typ=%d\n", iod->name, iod->io_typ);
+
+ cpif_wake_lock_unregister(iod->ws);
+
+ /* De-register char or net device */
+ switch (iod->io_typ) {
+ case IODEV_BOOTDUMP:
+ device_destroy(iod->msd->cdev_class, iod->cdev.dev);
+ cdev_del(&iod->cdev);
+ break;
+
+ case IODEV_IPC:
+ if (iod->ch == SIPC_CH_ID_CPLOG1) {
+ unregister_netdev(iod->ndev);
+ free_netdev(iod->ndev);
+ }
+
+ device_destroy(iod->msd->cdev_class, iod->cdev.dev);
+ cdev_del(&iod->cdev);
+ break;
+
+ case IODEV_NET:
+ unregister_netdev(iod->ndev);
+ free_netdev(iod->ndev);
+ break;
+
+ case IODEV_DUMMY:
+ device_remove_file(iod->cdevice, &attr_waketime);
+ device_remove_file(iod->cdevice, &attr_loopback);
+ device_remove_file(iod->cdevice, &attr_txlink);
+ device_remove_file(iod->cdevice, &attr_gro_option);
+
+ device_destroy(iod->msd->cdev_class, iod->cdev.dev);
+ cdev_del(&iod->cdev);
+ break;
+ }
+}
diff --git a/modem_main.c b/modem_main.c
new file mode 100644
index 0000000..168518a
--- /dev/null
+++ b/modem_main.c
@@ -0,0 +1,1011 @@
+// SPDX-License-Identifier: GPL-2.0
+/* linux/drivers/modem/modem.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Copyright (C) 2010 Samsung Electronics.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/if_arp.h>
+#include <linux/device.h>
+
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/proc_fs.h>
+#include <linux/of_gpio.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/dma-map-ops.h>
+#include <uapi/linux/in.h>
+#include <linux/inet.h>
+#include <net/ipv6.h>
+#include <soc/google/exynos-modem-ctrl.h>
+#include <soc/google/modem_notifier.h>
+
+#include <linux/s5910.h>
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_SHMEM)
+#include <linux/shm_ipc.h>
+#include "mcu_ipc.h"
+#endif
+
+#include "modem_prj.h"
+#include "modem_variation.h"
+#include "modem_utils.h"
+#include "modem_toe_device.h"
+
+#if IS_ENABLED(CONFIG_MODEM_IF_LEGACY_QOS)
+#include "cpif_qos_info.h"
+#endif
+
+#if IS_ENABLED(CONFIG_CP_PMIC)
+#include "cp_pmic.h"
+#endif
+
+#define FMT_WAKE_TIME (msecs_to_jiffies(300))
+#define RAW_WAKE_TIME (HZ*6)
+#define NET_WAKE_TIME (HZ/2)
+
+static struct modem_shared *create_modem_shared_data(
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct modem_shared *msd;
+ int size = MAX_MIF_BUFF_SIZE;
+
+ msd = devm_kzalloc(dev, sizeof(struct modem_shared), GFP_KERNEL);
+ if (!msd)
+ return NULL;
+
+ /* initialize link device list */
+ INIT_LIST_HEAD(&msd->link_dev_list);
+ INIT_LIST_HEAD(&msd->activated_ndev_list);
+
+ /* initialize tree of io devices */
+ msd->iodevs_tree_fmt = RB_ROOT;
+
+ msd->storage.cnt = 0;
+ msd->storage.addr = devm_kcalloc(dev, MAX_MIF_BUFF_SIZE +
+ (MAX_MIF_SEPA_SIZE * 2), sizeof(*msd->storage.addr), GFP_KERNEL);
+ if (!msd->storage.addr) {
+ mif_err("IPC logger buff alloc failed!!\n");
+ devm_kfree(dev, msd);
+ return NULL;
+ }
+ memset(msd->storage.addr, 0, size + (MAX_MIF_SEPA_SIZE * 2));
+ memcpy(msd->storage.addr, MIF_SEPARATOR, strlen(MIF_SEPARATOR));
+ msd->storage.addr += MAX_MIF_SEPA_SIZE;
+ memcpy(msd->storage.addr, &size, sizeof(int));
+ msd->storage.addr += MAX_MIF_SEPA_SIZE;
+ spin_lock_init(&msd->lock);
+ spin_lock_init(&msd->active_list_lock);
+
+ return msd;
+}
+
+static struct modem_ctl *create_modemctl_device(struct platform_device *pdev,
+ struct modem_shared *msd)
+{
+ struct device *dev = &pdev->dev;
+ struct modem_data *pdata = pdev->dev.platform_data;
+ struct modem_ctl *modemctl;
+ int ret;
+
+ /* create modem control device */
+ modemctl = devm_kzalloc(dev, sizeof(struct modem_ctl), GFP_KERNEL);
+ if (!modemctl) {
+ mif_err("%s: modemctl devm_kzalloc fail\n", pdata->name);
+ mif_err("%s: xxx\n", pdata->name);
+ return NULL;
+ }
+
+ modemctl->dev = dev;
+ modemctl->name = pdata->name;
+ modemctl->mdm_data = pdata;
+ if (!strncmp(modemctl->name, "s5400", 5))
+ modemctl->variant = MODEM_SEC_5400;
+ else
+ modemctl->variant = MODEM_SEC_5300;
+
+ modemctl->msd = msd;
+
+ modemctl->phone_state = STATE_OFFLINE;
+
+ INIT_LIST_HEAD(&modemctl->modem_state_notify_list);
+ spin_lock_init(&modemctl->lock);
+ spin_lock_init(&modemctl->tx_timer_lock);
+ init_completion(&modemctl->init_cmpl);
+ init_completion(&modemctl->off_cmpl);
+
+ /* init modemctl device for getting modemctl operations */
+ ret = call_modem_init_func(modemctl, pdata);
+ if (ret) {
+ mif_err("%s: call_modem_init_func fail (err %d)\n",
+ pdata->name, ret);
+ mif_err("%s: xxx\n", pdata->name);
+ devm_kfree(dev, modemctl);
+ return NULL;
+ }
+
+ mif_info("%s is created!!!\n", pdata->name);
+
+ return modemctl;
+}
+
+static struct io_device *create_io_device(struct platform_device *pdev,
+ struct modem_io_t *io_t, struct modem_shared *msd,
+ struct modem_ctl *modemctl, struct modem_data *pdata)
+{
+ int ret;
+ struct device *dev = &pdev->dev;
+ struct io_device *iod;
+
+ iod = devm_kzalloc(dev, sizeof(struct io_device), GFP_KERNEL);
+ if (!iod) {
+ mif_err("iod == NULL\n");
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&iod->list);
+ RB_CLEAR_NODE(&iod->node_fmt);
+
+ iod->name = io_t->name;
+ iod->ch = io_t->ch;
+ iod->format = io_t->format;
+ iod->io_typ = io_t->io_type;
+ iod->link_type = io_t->link_type;
+ iod->attrs = io_t->attrs;
+ iod->max_tx_size = io_t->ul_buffer_size;
+ iod->ipc_version = pdata->ipc_version;
+ atomic_set(&iod->opened, 0);
+ spin_lock_init(&iod->info_id_lock);
+ spin_lock_init(&iod->clat_lock);
+
+ /* link between io device and modem control */
+ iod->mc = modemctl;
+
+ switch (pdata->protocol) {
+ case PROTOCOL_SIPC:
+ if (iod->format == IPC_FMT && iod->ch == SIPC5_CH_ID_FMT_0)
+ modemctl->iod = iod;
+ break;
+ case PROTOCOL_SIT:
+ if (iod->format == IPC_FMT && iod->ch == EXYNOS_CH_ID_FMT_0)
+ modemctl->iod = iod;
+ break;
+ default:
+ mif_err("protocol error\n");
+ return NULL;
+ }
+
+ if (iod->format == IPC_BOOT) {
+ modemctl->bootd = iod;
+ mif_info("BOOT device = %s\n", iod->name);
+ }
+
+ /* link between io device and modem shared */
+ iod->msd = msd;
+
+ /* add iod to rb_tree */
+ if (iod->format != IPC_RAW)
+ insert_iod_with_format(msd, iod->format, iod);
+
+ switch (pdata->protocol) {
+ case PROTOCOL_SIPC:
+ if (sipc5_is_not_reserved_channel(iod->ch))
+ insert_iod_with_channel(msd, iod->ch, iod);
+ break;
+ case PROTOCOL_SIT:
+ insert_iod_with_channel(msd, iod->ch, iod);
+ break;
+ default:
+ mif_err("protocol error\n");
+ return NULL;
+ }
+
+ /* register misc device or net device */
+ ret = sipc5_init_io_device(iod, pdata->mld);
+ if (ret) {
+ devm_kfree(dev, iod);
+ mif_err("sipc5_init_io_device fail (%d)\n", ret);
+ return NULL;
+ }
+
+ mif_info("%s created. attrs:0x%08x\n", iod->name, iod->attrs);
+ return iod;
+}
+
+static int attach_devices(struct io_device *iod, struct device *dev)
+{
+ struct modem_shared *msd = iod->msd;
+ struct link_device *ld;
+
+ /* find link type for this io device */
+ list_for_each_entry(ld, &msd->link_dev_list, list) {
+ if (IS_CONNECTED(iod, ld)) {
+ mif_debug("set %s->%s\n", iod->name, ld->name);
+ set_current_link(iod, ld);
+
+ if (iod->io_typ == IODEV_NET && iod->ndev) {
+ struct vnet *vnet;
+
+ vnet = netdev_priv(iod->ndev);
+ vnet->hiprio_ack_only = ld->hiprio_ack_only;
+ }
+ }
+ }
+
+ if (!get_current_link(iod)) {
+ mif_err("%s->link == NULL\n", iod->name);
+ BUG();
+ }
+
+ iod->ws = cpif_wake_lock_register(dev, iod->name);
+ if (iod->ws == NULL) {
+ mif_err("%s: wakeup_source_register fail\n", iod->name);
+ return -EINVAL;
+ }
+
+ switch (iod->ch) {
+ case SIPC5_CH_ID_FMT_0 ... SIPC5_CH_ID_FMT_9:
+ iod->waketime = FMT_WAKE_TIME;
+ break;
+
+ case SIPC5_CH_ID_BOOT_0 ... SIPC5_CH_ID_DUMP_9:
+ iod->waketime = RAW_WAKE_TIME;
+ break;
+
+#if IS_ENABLED(CONFIG_CH_EXTENSION)
+ case SIPC_CH_EX_ID_PDP_0 ... SIPC_CH_EX_ID_PDP_MAX:
+ case SIPC_CH_ID_BT_DUN ... SIPC_CH_ID_CIQ_DATA:
+ case SIPC_CH_ID_CPLOG1 ... SIPC_CH_ID_LOOPBACK2:
+#else
+ case SIPC_CH_ID_PDP_0 ... SIPC_CH_ID_LOOPBACK2:
+#endif
+ iod->waketime = NET_WAKE_TIME;
+ break;
+
+ default:
+ iod->waketime = 0;
+ break;
+ }
+
+ switch (iod->format) {
+ case IPC_FMT:
+ iod->waketime = FMT_WAKE_TIME;
+ break;
+
+ case IPC_BOOT ... IPC_DUMP:
+ iod->waketime = RAW_WAKE_TIME;
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int parse_dt_common_pdata(struct device_node *np,
+ struct modem_data *pdata)
+{
+ mif_dt_read_string(np, "mif,name", pdata->name);
+ mif_dt_read_u32(np, "mif,cp_num", pdata->cp_num);
+
+ mif_dt_read_enum(np, "mif,modem_type", pdata->modem_type);
+ mif_dt_read_enum(np, "mif,ipc_version", pdata->ipc_version);
+
+ mif_dt_read_u32_noerr(np, "mif,protocol", pdata->protocol);
+ mif_info("protocol:%d\n", pdata->protocol);
+
+ mif_dt_read_u32(np, "mif,link_type", pdata->link_type);
+ mif_dt_read_string(np, "mif,link_name", pdata->link_name);
+ mif_dt_read_u32(np, "mif,link_attrs", pdata->link_attrs);
+ mif_dt_read_u32(np, "mif,interrupt_types", pdata->interrupt_types);
+
+ mif_dt_read_u32_noerr(np, "mif,capability_check", pdata->capability_check);
+ mif_info("capability_check:%d\n", pdata->capability_check);
+
+ mif_dt_read_u32_noerr(np, "mif,cp2ap_active_not_alive", pdata->cp2ap_active_not_alive);
+ mif_info("cp2ap_active_not_alive:%d\n", pdata->cp2ap_active_not_alive);
+
+ mif_dt_read_u32_noerr(np, "mif_off_during_volte", pdata->mif_off_during_volte);
+ mif_info("mif_off_during_volte:%d\n", pdata->mif_off_during_volte);
+
+ return 0;
+}
+
+static int parse_dt_mbox_pdata(struct device *dev, struct device_node *np,
+ struct modem_data *pdata)
+{
+ struct modem_mbox *mbox;
+ int ret = 0;
+
+ if ((pdata->link_type != LINKDEV_SHMEM) &&
+ (pdata->link_type != LINKDEV_PCIE)) {
+ mif_err("mbox: link type error:0x%08x\n", pdata->link_type);
+ return ret;
+ }
+
+ mbox = (struct modem_mbox *)devm_kzalloc(dev, sizeof(struct modem_mbox), GFP_KERNEL);
+ if (!mbox) {
+ mif_err("mbox: failed to alloc memory\n");
+ return -ENOMEM;
+ }
+ pdata->mbx = mbox;
+
+ mif_dt_read_u32(np, "mif,int_ap2cp_msg", mbox->int_ap2cp_msg);
+ mif_dt_read_u32(np, "mif,int_ap2cp_wakeup", mbox->int_ap2cp_wakeup);
+ mif_dt_read_u32(np, "mif,int_ap2cp_status", mbox->int_ap2cp_status);
+ mif_dt_read_u32(np, "mif,int_ap2cp_active", mbox->int_ap2cp_active);
+#if IS_ENABLED(CONFIG_CP_LCD_NOTIFIER)
+ mif_dt_read_u32(np, "mif,int_ap2cp_lcd_status",
+ mbox->int_ap2cp_lcd_status);
+#endif
+#if IS_ENABLED(CONFIG_CP_PKTPROC_CLAT)
+ mif_dt_read_u32(np, "mif,int_ap2cp_clatinfo_send", mbox->int_ap2cp_clatinfo_send);
+#endif
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE)
+ mif_dt_read_u32(np, "mif,int_ap2cp_pcie_link_ack", mbox->int_ap2cp_pcie_link_ack);
+#endif
+ mif_dt_read_u32(np, "mif,int_ap2cp_uart_noti", mbox->int_ap2cp_uart_noti);
+
+ mif_dt_read_u32(np, "mif,irq_cp2ap_msg", mbox->irq_cp2ap_msg);
+ mif_dt_read_u32(np, "mif,irq_cp2ap_status", mbox->irq_cp2ap_status);
+ mif_dt_read_u32(np, "mif,irq_cp2ap_active", mbox->irq_cp2ap_active);
+#if IS_ENABLED(CONFIG_CP_PKTPROC_CLAT)
+ mif_dt_read_u32(np, "mif,irq_cp2ap_clatinfo_ack", mbox->irq_cp2ap_clatinfo_ack);
+#endif
+ mif_dt_read_u32(np, "mif,irq_cp2ap_wakelock", mbox->irq_cp2ap_wakelock);
+ mif_dt_read_u32(np, "mif,irq_cp2ap_ratmode", mbox->irq_cp2ap_rat_mode);
+
+ return ret;
+}
+
+static int parse_dt_ipc_region_pdata(struct device *dev, struct device_node *np,
+ struct modem_data *pdata)
+{
+ int ret = 0;
+
+ /* legacy buffer (fmt, raw) setting */
+ mif_dt_read_u32(np, "legacy_fmt_head_tail_offset",
+ pdata->legacy_fmt_head_tail_offset);
+ mif_dt_read_u32(np, "legacy_fmt_buffer_offset", pdata->legacy_fmt_buffer_offset);
+ mif_dt_read_u32(np, "legacy_fmt_txq_size", pdata->legacy_fmt_txq_size);
+ mif_dt_read_u32(np, "legacy_fmt_rxq_size", pdata->legacy_fmt_rxq_size);
+ mif_dt_read_u32(np, "legacy_raw_head_tail_offset",
+ pdata->legacy_raw_head_tail_offset);
+ mif_dt_read_u32(np, "legacy_raw_buffer_offset", pdata->legacy_raw_buffer_offset);
+ mif_dt_read_u32(np, "legacy_raw_txq_size", pdata->legacy_raw_txq_size);
+ mif_dt_read_u32(np, "legacy_raw_rxq_size", pdata->legacy_raw_rxq_size);
+ mif_dt_read_u32(np, "legacy_raw_rx_buffer_cached",
+ pdata->legacy_raw_rx_buffer_cached);
+
+ mif_dt_read_u32_noerr(np, "offset_ap_version", pdata->offset_ap_version);
+ mif_dt_read_u32_noerr(np, "offset_cp_version", pdata->offset_cp_version);
+ mif_dt_read_u32_noerr(np, "offset_cmsg_offset", pdata->offset_cmsg_offset);
+ mif_dt_read_u32_noerr(np, "offset_srinfo_offset", pdata->offset_srinfo_offset);
+ mif_dt_read_u32_noerr(np, "offset_clk_table_offset", pdata->offset_clk_table_offset);
+ mif_dt_read_u32_noerr(np, "offset_buff_desc_offset", pdata->offset_buff_desc_offset);
+ mif_dt_read_u32_noerr(np, "offset_capability_offset", pdata->offset_capability_offset);
+
+#if IS_ENABLED(CONFIG_MODEM_IF_LEGACY_QOS)
+ /* legacy priority queue setting */
+ mif_dt_read_u32(np, "legacy_raw_qos_head_tail_offset",
+ pdata->legacy_raw_qos_head_tail_offset);
+ mif_dt_read_u32(np, "legacy_raw_qos_buffer_offset", pdata->legacy_raw_qos_buffer_offset);
+ mif_dt_read_u32(np, "legacy_raw_qos_txq_size", pdata->legacy_raw_qos_txq_size);
+ mif_dt_read_u32(np, "legacy_raw_qos_rxq_size", pdata->legacy_raw_qos_rxq_size);
+#endif
+
+ /* control message offset setting (optional)*/
+ mif_dt_read_u32_noerr(np, "cmsg_offset", pdata->cmsg_offset);
+ /* srinfo settings */
+ mif_dt_read_u32(np, "srinfo_offset", pdata->srinfo_offset);
+ mif_dt_read_u32(np, "srinfo_size", pdata->srinfo_size);
+ /* clk_table offset (optional)*/
+ mif_dt_read_u32_noerr(np, "clk_table_offset", pdata->clk_table_offset);
+ /* offset setting for new SIT buffer descriptors (optional) */
+ mif_dt_read_u32_noerr(np, "buff_desc_offset", pdata->buff_desc_offset);
+
+ /* offset setting for capability */
+ if (pdata->capability_check) {
+ mif_dt_read_u32(np, "capability_offset", pdata->capability_offset);
+ mif_dt_read_u32(np, "ap_capability_0", pdata->ap_capability[0]);
+ mif_dt_read_u32(np, "ap_capability_1", pdata->ap_capability[1]);
+ }
+
+ of_property_read_u32_array(np, "ap2cp_msg", pdata->ap2cp_msg, 2);
+ of_property_read_u32_array(np, "cp2ap_msg", pdata->cp2ap_msg, 2);
+ of_property_read_u32_array(np, "cp2ap_united_status", pdata->cp2ap_united_status, 2);
+ of_property_read_u32_array(np, "ap2cp_united_status", pdata->ap2cp_united_status, 2);
+#if IS_ENABLED(CONFIG_CP_PKTPROC_CLAT)
+ mif_dt_count_u32_array(np, "ap2cp_clatinfo_xlat_v4_addr",
+ pdata->ap2cp_clatinfo_xlat_v4_addr, 2);
+ mif_dt_count_u32_array(np, "ap2cp_clatinfo_xlat_addr_0",
+ pdata->ap2cp_clatinfo_xlat_addr_0, 2);
+ mif_dt_count_u32_array(np, "ap2cp_clatinfo_xlat_addr_1",
+ pdata->ap2cp_clatinfo_xlat_addr_1, 2);
+ mif_dt_count_u32_array(np, "ap2cp_clatinfo_xlat_addr_2",
+ pdata->ap2cp_clatinfo_xlat_addr_2, 2);
+ mif_dt_count_u32_array(np, "ap2cp_clatinfo_xlat_addr_3",
+ pdata->ap2cp_clatinfo_xlat_addr_3, 2);
+ mif_dt_count_u32_array(np, "ap2cp_clatinfo_index",
+ pdata->ap2cp_clatinfo_index, 2);
+#endif
+ of_property_read_u32_array(np, "ap2cp_kerneltime", pdata->ap2cp_kerneltime, 2);
+ of_property_read_u32_array(np, "ap2cp_kerneltime_sec", pdata->ap2cp_kerneltime_sec, 2);
+ of_property_read_u32_array(np, "ap2cp_kerneltime_usec", pdata->ap2cp_kerneltime_usec, 2);
+ of_property_read_u32_array(np, "ap2cp_handover_block_info",
+ pdata->ap2cp_handover_block_info, 2);
+
+ /* Status Bit Info */
+ mif_dt_read_u32(np, "sbi_lte_active_mask", pdata->sbi_lte_active_mask);
+ mif_dt_read_u32(np, "sbi_lte_active_pos", pdata->sbi_lte_active_pos);
+ mif_dt_read_u32(np, "sbi_cp_status_mask", pdata->sbi_cp_status_mask);
+ mif_dt_read_u32(np, "sbi_cp_status_pos", pdata->sbi_cp_status_pos);
+ mif_dt_read_u32(np, "sbi_cp_rat_mode_mask", pdata->sbi_cp2ap_rat_mode_mask);
+ mif_dt_read_u32(np, "sbi_cp_rat_mode_pos", pdata->sbi_cp2ap_rat_mode_pos);
+ mif_dt_read_u32(np, "sbi_cp2ap_wakelock_mask", pdata->sbi_cp2ap_wakelock_mask);
+ mif_dt_read_u32(np, "sbi_cp2ap_wakelock_pos", pdata->sbi_cp2ap_wakelock_pos);
+ mif_dt_read_u32(np, "sbi_pda_active_mask", pdata->sbi_pda_active_mask);
+ mif_dt_read_u32(np, "sbi_pda_active_pos", pdata->sbi_pda_active_pos);
+ mif_dt_read_u32(np, "sbi_ap_status_mask", pdata->sbi_ap_status_mask);
+ mif_dt_read_u32(np, "sbi_ap_status_pos", pdata->sbi_ap_status_pos);
+ mif_dt_read_u32(np, "sbi_crash_type_mask", pdata->sbi_crash_type_mask);
+ mif_dt_read_u32(np, "sbi_crash_type_pos", pdata->sbi_crash_type_pos);
+#if IS_ENABLED(CONFIG_CP_LCD_NOTIFIER)
+ mif_dt_read_u32(np, "sbi_lcd_status_mask", pdata->sbi_lcd_status_mask);
+ mif_dt_read_u32(np, "sbi_lcd_status_pos", pdata->sbi_lcd_status_pos);
+#endif
+ mif_dt_read_u32(np, "sbi_uart_noti_mask", pdata->sbi_uart_noti_mask);
+ mif_dt_read_u32(np, "sbi_uart_noti_pos", pdata->sbi_uart_noti_pos);
+ mif_dt_read_u32(np, "sbi_ds_det_mask", pdata->sbi_ds_det_mask);
+ mif_dt_read_u32(np, "sbi_ds_det_pos", pdata->sbi_ds_det_pos);
+ mif_dt_read_u32_noerr(np, "sbi_ap2cp_kerneltime_sec_mask",
+ pdata->sbi_ap2cp_kerneltime_sec_mask);
+ mif_dt_read_u32_noerr(np, "sbi_ap2cp_kerneltime_sec_pos",
+ pdata->sbi_ap2cp_kerneltime_sec_pos);
+ mif_dt_read_u32_noerr(np, "sbi_ap2cp_kerneltime_usec_mask",
+ pdata->sbi_ap2cp_kerneltime_usec_mask);
+ mif_dt_read_u32_noerr(np, "sbi_ap2cp_kerneltime_usec_pos",
+ pdata->sbi_ap2cp_kerneltime_usec_pos);
+
+ /* Check pktproc use 36bit addr */
+ mif_dt_read_u32(np, "pktproc_use_36bit_addr",
+ pdata->pktproc_use_36bit_addr);
+
+ return ret;
+}
+
+static int parse_dt_iodevs_pdata(struct device *dev, struct device_node *np,
+ struct modem_data *pdata)
+{
+ struct device_node *child = NULL;
+
+ for_each_child_of_node(np, child) {
+ struct modem_io_t *p_iod = NULL;
+ struct modem_io_t *iod;
+ unsigned int ch_count = 0;
+ char *name;
+
+ do {
+ iod = devm_kzalloc(dev, sizeof(struct modem_io_t), GFP_KERNEL);
+ if (!iod) {
+ mif_err("failed to alloc iodev\n");
+ return -ENOMEM;
+ }
+
+ if (!p_iod) {
+ mif_dt_read_string(child, "iod,name", name);
+ mif_dt_read_u32(child, "iod,ch", iod->ch);
+ mif_dt_read_enum(child, "iod,format", iod->format);
+ mif_dt_read_enum(child, "iod,io_type", iod->io_type);
+ mif_dt_read_u32(child, "iod,link_type", iod->link_type);
+ mif_dt_read_u32(child, "iod,attrs", iod->attrs);
+ mif_dt_read_u32_noerr(child, "iod,max_tx_size",
+ iod->ul_buffer_size);
+
+ if (iod->attrs & IO_ATTR_SBD_IPC) {
+ mif_dt_read_u32(child, "iod,ul_num_buffers",
+ iod->ul_num_buffers);
+ mif_dt_read_u32(child, "iod,ul_buffer_size",
+ iod->ul_buffer_size);
+ mif_dt_read_u32(child, "iod,dl_num_buffers",
+ iod->dl_num_buffers);
+ mif_dt_read_u32(child, "iod,dl_buffer_size",
+ iod->dl_buffer_size);
+ }
+
+ if (iod->attrs & IO_ATTR_OPTION_REGION)
+ mif_dt_read_string(child, "iod,option_region",
+ iod->option_region);
+
+ if (iod->attrs & IO_ATTR_MULTI_CH)
+ mif_dt_read_u32(child, "iod,ch_count", iod->ch_count);
+
+ p_iod = iod;
+ } else {
+ memcpy(iod, p_iod, sizeof(struct modem_io_t));
+ }
+
+ if (ch_count < iod->ch_count) {
+ scnprintf(iod->name, sizeof(iod->name), "%s%d", name, ch_count);
+ iod->ch = p_iod->ch + ch_count;
+ } else {
+ scnprintf(iod->name, sizeof(iod->name), "%s", name);
+ }
+
+ pdata->iodevs[pdata->num_iodevs] = iod;
+ pdata->num_iodevs++;
+ } while (++ch_count < iod->ch_count);
+ }
+
+ mif_info("num_iodevs:%d\n", pdata->num_iodevs);
+
+ return 0;
+}
+
+static struct modem_data *modem_if_parse_dt_pdata(struct device *dev)
+{
+ struct modem_data *pdata;
+ struct device_node *iodevs_node = NULL;
+
+ pdata = devm_kzalloc(dev, sizeof(struct modem_data), GFP_KERNEL);
+ if (!pdata) {
+ mif_err("modem_data: alloc fail\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (parse_dt_common_pdata(dev->of_node, pdata)) {
+ mif_err("DT error: failed to parse common\n");
+ goto error;
+ }
+
+ if (parse_dt_mbox_pdata(dev, dev->of_node, pdata)) {
+ mif_err("DT error: failed to parse mbox\n");
+ goto error;
+ }
+
+ if (parse_dt_ipc_region_pdata(dev, dev->of_node, pdata)) {
+ mif_err("DT error: failed to parse control messages\n");
+ goto error;
+ }
+
+ iodevs_node = of_get_child_by_name(dev->of_node, "iodevs");
+ if (!iodevs_node) {
+ mif_err("DT error: failed to get child node\n");
+ goto error;
+ }
+
+ if (parse_dt_iodevs_pdata(dev, iodevs_node, pdata)) {
+ mif_err("DT error: failed to parse iodevs\n");
+ goto error;
+ }
+
+ dev->platform_data = pdata;
+ mif_info("DT parse complete!\n");
+
+ return pdata;
+
+error:
+ if (pdata) {
+ unsigned int id;
+
+ for (id = 0; id < ARRAY_SIZE(pdata->iodevs); id++) {
+ if (pdata->iodevs[id])
+ devm_kfree(dev, pdata->iodevs[id]);
+ }
+
+ devm_kfree(dev, pdata);
+ }
+ return ERR_PTR(-EINVAL);
+}
+
+static const struct of_device_id cpif_dt_match[] = {
+ { .compatible = "samsung,exynos-cp", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cpif_dt_match);
+
+enum mif_sim_mode {
+ MIF_SIM_NONE = 0,
+ MIF_SIM_SINGLE,
+ MIF_SIM_DUAL,
+ MIF_SIM_TRIPLE,
+};
+
+static int simslot_count(struct seq_file *m, void *v)
+{
+ enum mif_sim_mode mode = (enum mif_sim_mode)(uintptr_t)(m->private);
+
+ seq_printf(m, "%u\n", mode);
+ return 0;
+}
+
+static int simslot_count_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, simslot_count, pde_data(inode));
+}
+
+static const struct file_operations __maybe_unused simslot_count_fops = {
+ .open = simslot_count_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#if IS_ENABLED(CONFIG_GPIO_DS_DETECT)
+static enum mif_sim_mode get_sim_mode(struct device_node *of_node)
+{
+ enum mif_sim_mode mode = MIF_SIM_SINGLE;
+ int gpio_ds_det;
+ int retval;
+
+ gpio_ds_det = of_get_named_gpio(of_node, "mif,gpio_ds_det", 0);
+ if (!gpio_is_valid(gpio_ds_det)) {
+ mif_err("DT error: failed to get sim mode\n");
+ goto make_proc;
+ }
+
+ retval = gpio_request(gpio_ds_det, "DS_DET");
+ if (retval) {
+ mif_err("Failed to request GPIO(%d)\n", retval);
+ goto make_proc;
+ } else {
+ gpio_direction_input(gpio_ds_det);
+ }
+
+ retval = gpio_get_value(gpio_ds_det);
+ if (retval)
+ mode = MIF_SIM_DUAL;
+
+ mif_info("sim_mode: %d\n", mode);
+ gpio_free(gpio_ds_det);
+
+make_proc:
+ if (!proc_create_data("simslot_count", 0444, NULL, &simslot_count_fops,
+ (void *)(long)mode)) {
+ mif_err("Failed to create proc\n");
+ mode = MIF_SIM_SINGLE;
+ }
+
+ return mode;
+}
+#else
+static enum mif_sim_mode get_sim_mode(struct device_node *of_node)
+{
+ return MIF_SIM_SINGLE;
+}
+#endif
+
+static ssize_t do_cp_crash_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ modem_force_crash_exit_ext(buf);
+
+ return count;
+}
+
+static ssize_t modem_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct modem_ctl *mc = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", cp_state_str(mc->phone_state));
+}
+
+static DEVICE_ATTR_WO(do_cp_crash);
+static DEVICE_ATTR_RO(modem_state);
+
+static struct attribute *modem_attrs[] = {
+ &dev_attr_do_cp_crash.attr,
+ &dev_attr_modem_state.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(modem);
+
+static int cpif_cdev_alloc_region(struct modem_data *pdata, struct modem_shared *msd)
+{
+ int ret;
+
+ ret = alloc_chrdev_region(&msd->cdev_major, 0, pdata->num_iodevs, "cpif");
+ if (ret < 0) {
+ mif_err("alloc_chrdev_region() failed:%d\n", ret);
+ return ret;
+ }
+
+ msd->cdev_class = class_create(THIS_MODULE, "cpif");
+ if (IS_ERR(msd->cdev_class)) {
+ mif_err("class_create() failed:%ld\n", PTR_ERR(msd->cdev_class));
+ ret = -ENOMEM;
+ unregister_chrdev_region(MAJOR(msd->cdev_major), pdata->num_iodevs);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cpif_probe(struct platform_device *pdev)
+{
+ int i;
+ struct device *dev = &pdev->dev;
+ struct modem_data *pdata = NULL;
+ struct modem_shared *msd;
+ struct modem_ctl *modemctl;
+ struct io_device **iod;
+ struct link_device *ld;
+ enum mif_sim_mode sim_mode;
+ int err;
+
+ mif_info("Exynos CP interface driver %s\n", get_cpif_driver_version());
+ mif_info("%s: +++ (%s)\n", pdev->name, CONFIG_OPTION_REGION);
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
+ if (err) {
+ mif_err("dma_set_mask_and_coherent() error:%d\n", err);
+ goto fail;
+ }
+
+ if (dev->of_node) {
+ pdata = modem_if_parse_dt_pdata(dev);
+ if (IS_ERR(pdata)) {
+ mif_err("MIF DT parse error!\n");
+ err = PTR_ERR(pdata);
+ goto fail;
+ }
+ } else {
+ if (!pdata) {
+ mif_err("Non-DT, incorrect pdata!\n");
+ err = -EINVAL;
+ goto fail;
+ }
+ }
+
+ msd = create_modem_shared_data(pdev);
+ if (!msd) {
+ mif_err("%s: msd == NULL\n", pdata->name);
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ modemctl = create_modemctl_device(pdev, msd);
+ if (!modemctl) {
+ mif_err("%s: modemctl == NULL\n", pdata->name);
+ devm_kfree(dev, msd);
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ modemctl->log = logbuffer_register("cpif");
+ if (IS_ERR_OR_NULL(modemctl->log)) {
+ mif_err("Failed to register logbuffer!\n");
+ modemctl->log = NULL;
+ }
+
+ /* get the s5910 node pointer */
+ modemctl->s5910_dev = NULL;
+ if (dev->of_node) {
+ struct device_node *np;
+ struct device *dp;
+ struct device_link *link;
+
+ np = of_parse_phandle(dev->of_node, "google,clk-buffer", 0);
+ if (np) {
+ dp = s5910_get_device(np);
+ if (dp) {
+ modemctl->s5910_dev = dp;
+ link = device_link_add(dev, dp, 0);
+ if (link)
+ mif_info("%s s5910 linked\n",
+ pdata->name);
+ }
+ }
+
+#if IS_ENABLED(CONFIG_CP_PMIC)
+ modemctl->pmic_dev = NULL;
+
+ np = of_parse_phandle(dev->of_node, "google,cp-pmic-spmi", 0);
+ if (np) {
+ dp = pmic_get_device(np);
+ if (dp) {
+ modemctl->pmic_dev = dp;
+ link = device_link_add(dev, dp, 0);
+ if (link)
+ mif_info("%s: cp pmic device linked\n", pdata->name);
+ }
+ }
+#endif
+ }
+
+ if (toe_dev_create(pdev)) {
+ mif_err("%s: toe dev not created\n", pdata->name);
+ goto free_mc;
+ }
+
+ /* create link device */
+ ld = call_link_init_func(pdev, pdata->link_type);
+ if (!ld)
+ goto free_mc;
+
+ mif_info("%s: %s link created\n", pdata->name, ld->name);
+
+ ld->mc = modemctl;
+ ld->msd = msd;
+ list_add(&ld->list, &msd->link_dev_list);
+
+ /* get sim mode */
+ sim_mode = get_sim_mode(dev->of_node);
+
+ /* char device */
+ err = cpif_cdev_alloc_region(pdata, msd);
+ if (err) {
+ mif_err("cpif_cdev_alloc_region() err:%d\n", err);
+ goto free_mc;
+ }
+
+ /* create io deivces and connect to modemctl device */
+ iod = kcalloc(pdata->num_iodevs, sizeof(*iod), GFP_KERNEL);
+ if (!iod) {
+ mif_err("kcalloc() err\n");
+ goto free_chrdev;
+ }
+
+ for (i = 0; i < pdata->num_iodevs; i++) {
+ if (sim_mode < MIF_SIM_DUAL &&
+ pdata->iodevs[i]->attrs & IO_ATTR_DUALSIM)
+ continue;
+
+ if (pdata->iodevs[i]->attrs & IO_ATTR_OPTION_REGION &&
+ strcmp(pdata->iodevs[i]->option_region, CONFIG_OPTION_REGION))
+ continue;
+
+ iod[i] = create_io_device(pdev, pdata->iodevs[i], msd,
+ modemctl, pdata);
+ if (!iod[i]) {
+ mif_err("%s: iod[%d] == NULL\n", pdata->name, i);
+ goto free_iod;
+ }
+
+ /* Basically, iods of IPC_FMT and IPC_BOOT will receive the state */
+ if (iod[i]->format == IPC_FMT || iod[i]->format == IPC_BOOT ||
+ iod[i]->attrs & IO_ATTR_STATE_RESET_NOTI)
+ list_add_tail(&iod[i]->list, &modemctl->modem_state_notify_list);
+
+ attach_devices(iod[i], dev);
+ }
+
+ cp_btl_create(&pdata->btl, dev);
+
+ platform_set_drvdata(pdev, modemctl);
+
+ kfree(iod);
+
+ if (sysfs_create_groups(&dev->kobj, modem_groups))
+ mif_err("failed to create modem groups node\n");
+
+#if IS_ENABLED(CONFIG_MODEM_IF_LEGACY_QOS)
+ err = cpif_qos_init_list();
+ if (err < 0)
+ mif_err("failed to initialize hiprio list(%d)\n", err);
+#endif
+
+#if IS_ENABLED(CONFIG_CPIF_VENDOR_HOOK)
+ err = hook_init();
+ if (err)
+ mif_err("failed to register vendor hook\n");
+#endif
+
+ mif_info("%s: done ---\n", pdev->name);
+ return 0;
+
+free_iod:
+ for (i = 0; i < pdata->num_iodevs; i++) {
+ if (iod[i]) {
+ sipc5_deinit_io_device(iod[i]);
+ devm_kfree(dev, iod[i]);
+ }
+ }
+ kfree(iod);
+
+free_chrdev:
+ class_destroy(msd->cdev_class);
+ unregister_chrdev_region(MAJOR(msd->cdev_major), pdata->num_iodevs);
+
+free_mc:
+ if (modemctl) {
+ call_modem_uninit_func(modemctl, pdata);
+ devm_kfree(dev, modemctl);
+ }
+
+ if (msd)
+ devm_kfree(dev, msd);
+
+ err = -ENOMEM;
+
+fail:
+ mif_err("%s: xxx\n", pdev->name);
+
+ panic("CP interface driver probe failed\n");
+ return err;
+}
+
+static void cpif_shutdown(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct modem_ctl *mc = dev_get_drvdata(dev);
+
+ if (mc->ops.power_shutdown)
+ mc->ops.power_shutdown(mc);
+
+ mc->phone_state = STATE_OFFLINE;
+
+ mif_err("%s\n", mc->name);
+}
+
+static int modem_suspend(struct device *pdev)
+{
+ int ret = 0;
+ struct modem_ctl *mc = dev_get_drvdata(pdev);
+
+ if (mc->ops.suspend)
+ ret = mc->ops.suspend(mc);
+
+ if (!ret) {
+#if defined(CPIF_WAKEPKT_SET_MARK)
+ atomic_set(&mc->mark_skb_wakeup, 1);
+#endif
+ set_wakeup_packet_log(true);
+ }
+
+ return ret;
+}
+
+static int modem_resume(struct device *pdev)
+{
+ struct modem_ctl *mc = dev_get_drvdata(pdev);
+
+ set_wakeup_packet_log(false);
+
+ if (mc->ops.resume)
+ mc->ops.resume(mc);
+
+ return 0;
+}
+
+static const struct dev_pm_ops cpif_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(modem_suspend, modem_resume)
+};
+
+static struct platform_driver cpif_driver = {
+ .probe = cpif_probe,
+ .shutdown = cpif_shutdown,
+ .driver = {
+ .name = "cp_interface",
+ .owner = THIS_MODULE,
+ .pm = &cpif_pm_ops,
+ .suppress_bind_attrs = true,
+#if IS_ENABLED(CONFIG_OF)
+ .of_match_table = of_match_ptr(cpif_dt_match),
+#endif
+ },
+};
+
+module_platform_driver(cpif_driver);
+
+MODULE_DESCRIPTION("Exynos CP interface Driver");
+MODULE_LICENSE("GPL");
diff --git a/modem_prj.h b/modem_prj.h
new file mode 100644
index 0000000..194211c
--- /dev/null
+++ b/modem_prj.h
@@ -0,0 +1,858 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2010 Samsung Electronics.
+ *
+ */
+
+#ifndef __MODEM_PRJ_H__
+#define __MODEM_PRJ_H__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+#include <linux/skbuff.h>
+#include <linux/wait.h>
+#include <linux/completion.h>
+#include <linux/pm_wakeup.h>
+#include <linux/spinlock.h>
+#include <linux/cdev.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/pm_runtime.h>
+#include <linux/version.h>
+#if IS_ENABLED(CONFIG_EXYNOS_ITMON)
+#include <soc/google/exynos-itmon.h>
+#endif
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE)
+#include <linux/pci.h>
+#if IS_ENABLED(CONFIG_GS_S2MPU)
+#include <soc/google/s2mpu.h>
+#endif
+#include <misc/logbuffer.h>
+#endif
+#include "modem_v1.h"
+
+#include "include/circ_queue.h"
+#include "include/sipc5.h"
+#include "include/exynos_ipc.h"
+
+/* #define DEBUG_MODEM_IF_LINK_TX */
+/* #define DEBUG_MODEM_IF_LINK_RX */
+
+/* #define DEBUG_MODEM_IF_IODEV_TX */
+/* #define DEBUG_MODEM_IF_IODEV_RX */
+
+/* #define DEBUG_MODEM_IF_FLOW_CTRL */
+
+/* #define DEBUG_MODEM_IF_PS_DATA */
+/* #define DEBUG_MODEM_IF_IP_DATA */
+
+/*
+ * IOCTL commands
+ */
+#define IOCTL_MAGIC 'o'
+
+#define IOCTL_POWER_ON _IO(IOCTL_MAGIC, 0x19)
+#define IOCTL_POWER_OFF _IO(IOCTL_MAGIC, 0x20)
+
+enum cp_boot_mode {
+ CP_BOOT_MODE_NORMAL,
+ CP_BOOT_MODE_DUMP,
+ CP_BOOT_RE_INIT,
+ CP_BOOT_MODE_SILENT,
+ CP_BOOT_REQ_CP_RAM_LOGGING = 5,
+ CP_BOOT_MODE_MANUAL = 7,
+ CP_BOOT_EXT_BAAW = 11,
+ CP_BOOT_MODE_NORMAL_BL1 = 32,
+ CP_BOOT_MODE_NORMAL_BOOTLOADER,
+ CP_BOOT_MODE_DUMP_BL1,
+ CP_BOOT_MODE_DUMP_BOOTLOADER,
+ CP_BOOT_MODE_DUMP_PARTIAL,
+ CP_BOOT_MODE_DUMP_WARM,
+
+ MAX_CP_BOOT_MODE
+};
+struct boot_mode {
+ enum cp_boot_mode idx;
+};
+#define IOCTL_POWER_RESET _IOW(IOCTL_MAGIC, 0x21, struct boot_mode)
+#define IOCTL_START_CP_BOOTLOADER _IOW(IOCTL_MAGIC, 0x22, struct boot_mode)
+#define IOCTL_COMPLETE_NORMAL_BOOTUP _IO(IOCTL_MAGIC, 0x23)
+#define IOCTL_GET_CP_STATUS _IO(IOCTL_MAGIC, 0x27)
+#define IOCTL_START_CP_DUMP _IO(IOCTL_MAGIC, 0x32)
+#define IOCTL_TRIGGER_CP_CRASH _IO(IOCTL_MAGIC, 0x34)
+#define IOCTL_TRIGGER_KERNEL_PANIC _IO(IOCTL_MAGIC, 0x35)
+
+struct cp_image {
+ unsigned long long binary;
+ u32 size;
+ u32 m_offset;
+ u32 b_offset;
+ u32 mode;
+ u32 len;
+} __packed;
+#define IOCTL_LOAD_CP_IMAGE _IOW(IOCTL_MAGIC, 0x40, struct cp_image)
+
+struct gnss_image {
+ u32 firmware_size;
+ u32 offset;
+ char *firmware_bin;
+} __packed;
+#define IOCTL_LOAD_GNSS_IMAGE _IOW(IOCTL_MAGIC, 0x41, struct gnss_image)
+#define IOCTL_READ_GNSS_IMAGE _IOR(IOCTL_MAGIC, 0x42, struct gnss_image)
+
+#define IOCTL_GET_SRINFO _IO(IOCTL_MAGIC, 0x45)
+#define IOCTL_SET_SRINFO _IO(IOCTL_MAGIC, 0x46)
+#define IOCTL_GET_CP_BOOTLOG _IO(IOCTL_MAGIC, 0x47)
+#define IOCTL_CLR_CP_BOOTLOG _IO(IOCTL_MAGIC, 0x48)
+
+/* Log dump */
+#define IOCTL_MIF_LOG_DUMP _IO(IOCTL_MAGIC, 0x51)
+
+enum cp_log_dump_index {
+ LOG_IDX_SHMEM,
+ LOG_IDX_VSS,
+ LOG_IDX_ACPM,
+ LOG_IDX_CP_BTL,
+ LOG_IDX_DATABUF_DL,
+ LOG_IDX_DATABUF_UL,
+ LOG_IDX_L2B,
+ LOG_IDX_DDM,
+ MAX_LOG_DUMP_IDX
+};
+struct cp_log_dump {
+ char name[32];
+ enum cp_log_dump_index idx;
+ u32 size;
+} __packed;
+#define IOCTL_GET_LOG_DUMP _IOWR(IOCTL_MAGIC, 0x52, struct cp_log_dump)
+
+struct modem_sec_req {
+ u32 mode;
+ u32 param2;
+ u32 param3;
+ u32 param4;
+} __packed;
+#define IOCTL_REQ_SECURITY _IOW(IOCTL_MAGIC, 0x53, struct modem_sec_req)
+
+/* Crash Reason */
+#define CP_CRASH_INFO_SIZE 512
+#define CP_CRASH_TAG "CP Crash "
+
+enum crash_type {
+ CRASH_REASON_CP_ACT_CRASH = 0,
+ CRASH_REASON_RIL_MNR,
+ CRASH_REASON_RIL_REQ_FULL,
+ CRASH_REASON_RIL_PHONE_DIE,
+ CRASH_REASON_RIL_RSV_MAX,
+ CRASH_REASON_USER = 5,
+ CRASH_REASON_MIF_TX_ERR = 6,
+ CRASH_REASON_MIF_RIL_BAD_CH,
+ CRASH_REASON_MIF_RX_BAD_DATA,
+ CRASH_REASON_RIL_TRIGGER_CP_CRASH,
+ CRASH_REASON_MIF_FORCED,
+ CRASH_REASON_CP_WDOG_CRASH,
+ CRASH_REASON_MIF_RSV_MAX = 12,
+ CRASH_REASON_CP_SRST,
+ CRASH_REASON_CP_RSV_0,
+ CRASH_REASON_CP_RSV_MAX,
+ CRASH_REASON_CLD = 16,
+ CRASH_REASON_PCIE_DOORBELL_FALIURE_AP2CP_IRQ,
+ CRASH_REASON_PCIE_LINKDOWN_ERROR,
+ CRASH_REASON_PCIE_CPL_TIMEOUT_ERROR,
+ CRASH_REASON_PCIE_DOORBELL_FAILURE_POWERON,
+ CRASH_REASON_PCIE_DOORBELL_FAILURE_POWEROFF = 21,
+ CRASH_REASON_PCIE_LINKDOWN_RECOVERY_FAILURE = 22,
+ CRASH_REASON_NONE = 0xFFFF,
+};
+
+struct crash_reason {
+ u32 type;
+ char string[CP_CRASH_INFO_SIZE];
+} __packed;
+#define IOCTL_GET_CP_CRASH_REASON _IOR(IOCTL_MAGIC, 0x55, struct crash_reason)
+
+#define CPIF_VERSION_SIZE 20
+struct cpif_version {
+ char string[CPIF_VERSION_SIZE];
+} __packed;
+#define IOCTL_GET_CPIF_VERSION _IOR('o', 0x56, struct cpif_version)
+
+#define CPID_LEN 15
+#define CPSIG_LEN 64
+
+struct t_handover_block_info {
+ u32 version; /* version */
+ u32 project_id; /* project id */
+ u32 revision; /* revision */
+ u32 major_id; /* major_id */
+ u32 minor_id; /* minor_id */
+ u32 modem_sku; /* modem sku */
+ u32 modem_hw; /* modem hw */
+ u32 cpinfo0;
+ u32 cpinfo1;
+ u32 cpinfo2;
+ u32 rf_sub;
+ u32 rf_config; /* rf config */
+ u32 reserved[4];
+ char cpid[2][CPID_LEN + 1];
+ char cpsig[CPSIG_LEN + 1];
+} __packed;
+#define IOCTL_HANDOVER_BLOCK_INFO _IO('o', 0x57)
+
+#define IOCTL_SET_SPI_BOOT_MODE _IO('o', 0x58)
+
+#define IOCTL_GET_OPENED_STATUS _IOR(IOCTL_MAGIC, 0x59, int)
+
+#define IOCTL_SILENT_RESET _IO(IOCTL_MAGIC, 0x60)
+
+/*
+ * Definitions for IO devices
+ */
+#define MAX_IOD_RXQ_LEN 2048
+
+
+#define IPv6 6
+#define SOURCE_MAC_ADDR {0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC}
+
+/* Loopback */
+#define DATA_LOOPBACK_CHANNEL 31
+
+/* Debugging features */
+#define MIF_LOG_DIR "/sdcard/log"
+#define MIF_MAX_PATH_LEN 256
+
+/* Does modem ctl structure will use state ? or status defined below ?*/
+enum modem_state {
+ STATE_OFFLINE,
+ STATE_CRASH_RESET, /* silent reset */
+ STATE_CRASH_EXIT, /* cp ramdump */
+ STATE_BOOTING,
+ STATE_ONLINE,
+ STATE_NV_REBUILDING, /* <= rebuilding start */
+ STATE_LOADER_DONE,
+ STATE_SIM_ATTACH, /* Deprecated */
+ STATE_SIM_DETACH, /* Deprecated */
+ STATE_CRASH_WATCHDOG, /* cp watchdog crash */
+
+ /* Internal states */
+ STATE_RESET, /* normal reset */
+};
+
+/* Intervals in ms for the reset noti via poll */
+#define STATE_RESET_INTERVAL_MS (200)
+
+enum link_state {
+ LINK_STATE_OFFLINE = 0,
+ LINK_STATE_IPC,
+ LINK_STATE_CP_CRASH
+};
+
+enum link_mode {
+ LINK_MODE_MIN_SPEED_BOOTING,
+ LINK_MODE_MAX_SPEED_BOOTING,
+ LINK_MODE_ADAPTIVE_SPEED_BOOTED,
+};
+
+enum modem_variant {
+ MODEM_SEC_5300,
+ MODEM_SEC_5400
+};
+
+struct cp_power_stats {
+ u64 count; /* count state was entered */
+ u64 duration_usec; /* total time (usecs) in state */
+ u64 last_entry_timestamp_usec; /* timestamp(usecs since boot) of last time entered */
+ u64 last_exit_timestamp_usec; /* timestamp(usecs since boot) of last time exited */
+ bool suspended; /* whether the modem is currently in sleep */
+};
+
+struct sec_info {
+ enum cp_boot_mode mode;
+ u32 size;
+};
+
+#define SIPC_MULTI_FRAME_MORE_BIT (0x80)
+#define SIPC_MULTI_FRAME_ID_MASK (0x7F)
+#define SIPC_MULTI_FRAME_ID_BITS 7
+#define NUM_SIPC_MULTI_FRAME_IDS (1 << SIPC_MULTI_FRAME_ID_BITS)
+
+struct __packed sipc_fmt_hdr {
+ u16 len;
+ u8 msg_seq;
+ u8 ack_seq;
+ u8 main_cmd;
+ u8 sub_cmd;
+ u8 cmd_type;
+};
+
+/* Channel 0, 5, 6, 27, 255 are reserved in SIPC5.
+ * see SIPC5 spec: 2.2.2 Channel Identification (Ch ID) Field.
+ * They do not need to store in `iodevs_tree_fmt'
+ */
+#define sipc5_is_not_reserved_channel(ch) \
+ ((ch) != 0 && (ch) != 5 && (ch) != 6 && (ch) != 27 && (ch) != 255)
+
+/* mark value for high priority packet, hex QOSH */
+#define RAW_HPRIO 0x514F5348
+
+/** struct skbuff_priv - private data of struct sk_buff
+ * this is matched to char cb[48] of struct sk_buff
+ */
+struct skbuff_private {
+ struct io_device *iod;
+ struct link_device *ld;
+
+ /* for time-stamping */
+ struct timespec64 ts;
+
+ u32 sipc_ch:8, /* SIPC Channel Number */
+ frm_ctrl:8, /* Multi-framing control */
+ reserved:14,
+ lnk_hdr:1, /* Existence of a link-layer header */
+ rx_clat:1; /* IP converted by Rx CLAT */
+
+ struct napi_struct *napi;
+} __packed;
+
+static inline struct skbuff_private *skbpriv(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct skbuff_private) > sizeof(skb->cb));
+ return (struct skbuff_private *)&skb->cb;
+}
+
+struct io_device {
+ struct list_head list;
+
+ /* rb_tree node for an io device */
+ struct rb_node node_fmt;
+
+ /* Name of the IO device */
+ char *name;
+
+ /* Reference count */
+ atomic_t opened;
+
+ /* Wait queue for the IO device */
+ wait_queue_head_t wq;
+
+ /* char device and net device structures for the IO device */
+ struct cdev cdev;
+ struct device *cdevice;
+ struct net_device *ndev;
+ struct list_head node_ndev;
+#if IS_ENABLED(CONFIG_CPIF_TP_MONITOR)
+ struct list_head node_all_ndev;
+#endif
+
+ /* clat net device */
+ struct net_device *clat_ndev;
+ /* spinlock to hold clat net device */
+ spinlock_t clat_lock;
+
+ /* CH and Format for channel on the link */
+ unsigned int ch;
+ u32 link_type;
+ u32 format;
+ u32 io_typ;
+
+ /* Attributes of an IO device */
+ u32 attrs;
+
+ /* The size of maximum Tx packet */
+ unsigned int max_tx_size;
+
+ /* SIPC version */
+ u32 ipc_version;
+
+ /* Whether or not IPC is over SBD-based link device */
+ bool sbd_ipc;
+
+ /* Whether or not link-layer header is required */
+ bool link_header;
+
+ /* Rx queue of sk_buff */
+ struct sk_buff_head sk_rx_q;
+
+ /* For keeping multi-frame packets temporarily */
+ struct sk_buff_head sk_multi_q[NUM_SIPC_MULTI_FRAME_IDS];
+
+ /*
+ * work for each io device, when delayed work needed
+ * use this for private io device rx action
+ */
+ struct delayed_work rx_work;
+
+ /* Information ID for supporting 'Multi FMT'
+ * reference SIPC Spec. 2.2.4
+ */
+ u8 info_id;
+ spinlock_t info_id_lock;
+
+ int (*recv_skb_single)(struct io_device *iod, struct link_device *ld,
+ struct sk_buff *skb);
+
+ int (*recv_net_skb)(struct io_device *iod, struct link_device *ld,
+ struct sk_buff *skb);
+
+ struct modem_ctl *mc;
+ struct modem_shared *msd;
+
+ struct wakeup_source *ws;
+ long waketime;
+
+ /* DO NOT use __current_link directly
+ * you MUST use skbpriv(skb)->ld in mc, link, etc..
+ */
+ struct link_device *__current_link;
+
+ struct exynos_seq_num seq_num;
+ u8 packet_index;
+};
+#define to_io_device(_cdev) container_of(_cdev, struct io_device, cdev)
+
+/* get_current_link, set_current_link don't need to use locks.
+ * In ARM, set_current_link and get_current_link are compiled to
+ * each one instruction (str, ldr) as atomic_set, atomic_read.
+ * And, the order of set_current_link and get_current_link is not important.
+ */
+#define get_current_link(iod) ((iod)->__current_link)
+#define set_current_link(iod, ld) ((iod)->__current_link = (ld))
+
+struct link_device {
+ struct list_head list;
+ u32 link_type;
+ u32 interrupt_types;
+
+ struct modem_ctl *mc;
+ struct modem_shared *msd;
+ struct device *dev;
+
+ char *name;
+ bool sbd_ipc;
+ bool aligned;
+
+ /* */
+ u32 protocol;
+ u8 chid_fmt_0;
+ u8 chid_rfs_0;
+ u32 magic_boot;
+ u32 magic_crash;
+ u32 magic_dump;
+ u32 magic_ipc;
+ bool (*is_start_valid)(u8 *frm);
+ bool (*is_padding_exist)(u8 *frm);
+ bool (*is_multi_frame)(u8 *frm);
+ bool (*has_ext_len)(u8 *frm);
+ u8 (*get_ch)(u8 *frm);
+ u8 (*get_ctrl)(u8 *frm);
+ u32 (*calc_padding_size)(u32 len);
+ u32 (*get_hdr_len)(u8 *frm);
+ u32 (*get_frame_len)(u8 *frm);
+ u32 (*get_total_len)(u8 *frm);
+ bool (*is_fmt_ch)(u8 ch);
+ bool (*is_ps_ch)(u8 ch);
+ bool (*is_rfs_ch)(u8 ch);
+ bool (*is_boot_ch)(u8 ch);
+ bool (*is_dump_ch)(u8 ch);
+ bool (*is_bootdump_ch)(u8 ch);
+ bool (*is_ipc_ch)(u8 ch);
+ bool (*is_csd_ch)(u8 ch);
+ bool (*is_log_ch)(u8 ch);
+ bool (*is_router_ch)(u8 ch);
+ bool (*is_misc_ch)(u8 ch);
+ bool (*is_embms_ch)(u8 ch);
+ bool (*is_uts_ch)(u8 ch);
+ bool (*is_wfs0_ch)(u8 ch);
+ bool (*is_wfs1_ch)(u8 ch);
+ bool (*is_oem_ch)(u8 ch);
+
+ /* SIPC version */
+ u32 ipc_version;
+
+ /* capability check */
+ u32 capability_check;
+
+ bool hiprio_ack_only;
+
+ /* Modem data */
+ struct modem_data *mdm_data;
+
+ /* Stop/resume control for network ifaces */
+ spinlock_t netif_lock;
+
+ /* bit mask for stopped channel */
+ unsigned long tx_flowctrl_mask;
+
+ /* flag of stopped state for all channels */
+ atomic_t netif_stopped;
+
+ struct workqueue_struct *rx_wq;
+
+ /* CP interface network rx management */
+ struct cpif_netrx_mng *cpif_netrx_mng;
+
+ /* Save reason of forced crash */
+ struct crash_reason crash_reason;
+
+ int (*init_comm)(struct link_device *ld, struct io_device *iod);
+ void (*terminate_comm)(struct link_device *ld, struct io_device *iod);
+
+ /* called by an io_device when it has a packet to send over link
+ * - the io device is passed so the link device can look at id and
+ * format fields to determine how to route/format the packet
+ */
+ int (*send)(struct link_device *ld, struct io_device *iod,
+ struct sk_buff *skb);
+
+ /* method for CP booting */
+ int (*load_cp_image)(struct link_device *ld, struct io_device *iod, unsigned long arg);
+ int (*load_gnss_image)(struct link_device *ld, struct io_device *iod, unsigned long arg);
+ int (*read_gnss_image)(struct link_device *ld, struct io_device *iod, unsigned long arg);
+ void (*link_prepare_normal_boot)(struct link_device *ld, struct io_device *iod);
+ int (*link_start_normal_boot)(struct link_device *ld, struct io_device *iod);
+ int (*link_start_partial_boot)(struct link_device *ld, struct io_device *iod);
+
+ void (*link_trigger_cp_crash)(struct mem_link_device *mld, u32 crash_reason_owner,
+ char *crash_reason_string);
+ int (*link_start_dump_boot)(struct link_device *ld, struct io_device *iod);
+
+ /* IOCTL extension */
+ int (*ioctl)(struct link_device *ld, struct io_device *iod,
+ unsigned int cmd, unsigned long arg);
+
+ /* Close (stop) TX with physical link (on CP crash, etc.) */
+ void (*close_tx)(struct link_device *ld);
+
+ /* Change secure mode, Call SMC API */
+ int (*security_req)(struct link_device *ld, struct io_device *iod,
+ unsigned long arg);
+
+ /* Get crash reason form modem_if driver */
+ int (*get_cp_crash_reason)(struct link_device *ld, struct io_device *iod,
+ unsigned long arg);
+
+ int (*enable_rx_int)(struct link_device *ld);
+ int (*disable_rx_int)(struct link_device *ld);
+
+ void (*start_timers)(struct mem_link_device *mld);
+ void (*stop_timers)(struct mem_link_device *mld);
+
+ int (*handover_block_info)(struct link_device *ld, unsigned long arg);
+};
+
+static inline struct sk_buff *rx_alloc_skb(unsigned int length,
+ struct io_device *iod, struct link_device *ld)
+{
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(length);
+ if (likely(skb)) {
+ skbpriv(skb)->iod = iod;
+ skbpriv(skb)->ld = ld;
+ }
+
+ return skb;
+}
+
+struct modemctl_ops {
+ int (*power_on)(struct modem_ctl *mc);
+ int (*power_off)(struct modem_ctl *mc);
+ int (*power_shutdown)(struct modem_ctl *mc);
+ int (*power_reset)(struct modem_ctl *mc);
+ int (*power_reset_dump)(struct modem_ctl *mc, bool silent);
+ int (*silent_reset)(struct modem_ctl *mc);
+ int (*power_reset_partial)(struct modem_ctl *mc);
+ int (*power_reset_warm)(struct modem_ctl *mc);
+
+ int (*start_normal_boot)(struct modem_ctl *mc);
+ int (*complete_normal_boot)(struct modem_ctl *mc);
+ int (*start_normal_boot_bl1)(struct modem_ctl *mc);
+ int (*start_normal_boot_bootloader)(struct modem_ctl *mc);
+ int (*start_dump_boot_bl1)(struct modem_ctl *mc);
+ int (*start_dump_boot_bootloader)(struct modem_ctl *mc);
+ int (*start_dump_boot_partial)(struct modem_ctl *mc);
+
+ int (*trigger_cp_crash)(struct modem_ctl *mc);
+ int (*start_dump_boot)(struct modem_ctl *mc);
+
+ int (*suspend)(struct modem_ctl *mc);
+ int (*resume)(struct modem_ctl *mc);
+};
+
+/* for IPC Logger */
+struct mif_storage {
+ char *addr;
+ unsigned int cnt;
+};
+
+/* modem_shared - shared data for all io/link devices and a modem ctl
+ * msd : mc : iod : ld = 1 : 1 : M : N
+ */
+struct modem_shared {
+ /* list of link devices */
+ struct list_head link_dev_list;
+
+ /* list of activated ndev */
+ struct list_head activated_ndev_list;
+ spinlock_t active_list_lock;
+
+ /* Array of pointers to IO devices corresponding to ch[n] */
+ struct io_device *ch2iod[IOD_CH_ID_MAX];
+
+ /* Array of active channels */
+ u8 ch[IOD_CH_ID_MAX];
+
+ /* The number of active channels in the array @ch[] */
+ unsigned int num_channels;
+
+ /* rb_tree root of io devices. */
+ struct rb_root iodevs_tree_fmt; /* group by dev_format */
+
+ /* for IPC Logger */
+ struct mif_storage storage;
+ spinlock_t lock;
+
+ /* loopbacked IP address
+ * default is 0.0.0.0 (disabled)
+ * after you setted this, you can use IP packet loopback using this IP.
+ * exam: echo 1.2.3.4 > /sys/devices/virtual/misc/umts_multipdp/loopback
+ */
+ __be32 loopback_ipaddr;
+
+ /* char device */
+ dev_t cdev_major;
+ struct class *cdev_class;
+};
+
+struct modem_ctl {
+ struct device *dev;
+ char *name;
+ enum modem_variant variant;
+ struct modem_data *mdm_data;
+ struct modem_shared *msd;
+ struct device *s5910_dev;
+#if IS_ENABLED(CONFIG_CP_PMIC)
+ struct device *pmic_dev;
+#endif
+
+ enum modem_state phone_state;
+
+ /* spin lock for each modem_ctl instance */
+ spinlock_t lock;
+ spinlock_t tx_timer_lock;
+
+ /* list for notify to opened iod when changed modem state */
+ struct list_head modem_state_notify_list;
+
+ /* completion for waiting for CP initialization */
+ struct completion init_cmpl;
+
+ /* completion for waiting for CP power-off */
+ struct completion off_cmpl;
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC_CLAT)
+ /* completion for waiting for cp2ap clatinfo ack */
+ struct completion clatinfo_ack;
+#endif
+
+ /* for broadcasting AP's PM state (active or sleep) */
+ unsigned int int_pda_active;
+ unsigned int int_cp_wakeup;
+ /* for checking aliveness of CP */
+ unsigned int irq_phone_active;
+
+#if IS_ENABLED(CONFIG_CP_LCD_NOTIFIER)
+ /* for broadcasting AP LCD state */
+ unsigned int int_lcd_status;
+#endif
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_SHMEM)
+ /* for notify uart connection with direction*/
+ unsigned int int_uart_noti;
+
+ /* for checking aliveness of CP */
+ struct modem_irq irq_cp_wdt;
+ struct modem_irq irq_cp_fail;
+
+ /* Status Bit Info */
+ unsigned int sbi_lte_active_mask;
+ unsigned int sbi_lte_active_pos;
+ unsigned int sbi_cp_status_mask;
+ unsigned int sbi_cp_status_pos;
+
+ unsigned int sbi_pda_active_mask;
+ unsigned int sbi_pda_active_pos;
+ unsigned int sbi_ap_status_mask;
+ unsigned int sbi_ap_status_pos;
+
+ unsigned int sbi_uart_noti_mask;
+ unsigned int sbi_uart_noti_pos;
+
+#if IS_ENABLED(CONFIG_CP_LCD_NOTIFIER)
+ unsigned int sbi_lcd_status_mask;
+ unsigned int sbi_lcd_status_pos;
+#endif
+
+ unsigned int ap2cp_cfg_addr;
+ void __iomem *ap2cp_cfg_ioaddr;
+#endif
+
+ unsigned int sbi_crash_type_mask;
+ unsigned int sbi_crash_type_pos;
+
+ unsigned int sbi_ds_det_mask;
+ unsigned int sbi_ds_det_pos;
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE)
+ struct irq_chip *apwake_irq_chip;
+ struct irq_chip *cp_wrst_irq_chip;
+ struct pci_dev *s51xx_pdev;
+ struct workqueue_struct *wakeup_wq;
+ struct work_struct wakeup_work;
+ struct work_struct suspend_work;
+ struct workqueue_struct *crash_wq;
+ struct work_struct crash_work;
+
+ struct wakeup_source *ws;
+ struct wakeup_source *ws_wrst;
+ struct mutex pcie_onoff_lock;
+ struct mutex pcie_check_lock;
+ spinlock_t pcie_tx_lock;
+ spinlock_t pcie_pm_lock;
+ struct pci_driver pci_driver;
+
+ int pcie_ch_num;
+ int pcie_linkdown_retry_cnt;
+ int pcie_linkdown_retry_cnt_all;
+ int pcie_cto_retry_cnt;
+ int pcie_cto_retry_cnt_all;
+ int sbb_debug;
+
+ bool reserve_doorbell_int;
+ bool pcie_registered;
+ bool pcie_powered_on;
+ bool pcie_pm_suspended;
+ bool pcie_pm_resume_wait;
+ int pcie_pm_resume_gpio_val;
+ bool device_reboot;
+
+#if IS_ENABLED(CONFIG_CPIF_AP_SUSPEND_DURING_VOICE_CALL)
+ bool pcie_voice_call_on;
+ struct work_struct call_on_work;
+ struct work_struct call_off_work;
+ struct notifier_block call_state_nb;
+#endif
+
+ struct notifier_block force_crash_nb;
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_GPIO_WA)
+ atomic_t dump_toggle_issued;
+#endif
+
+ struct cpif_gpio cp_gpio[CP_GPIO_MAX];
+ struct modem_irq cp_gpio_irq[CP_GPIO_IRQ_MAX];
+
+ bool s5100_cp_reset_required;
+
+#if IS_ENABLED(CONFIG_GS_S2MPU)
+ struct s2mpu_info *s2mpu;
+#endif
+
+ struct notifier_block reboot_nb;
+ struct notifier_block pm_notifier;
+#endif
+
+ struct notifier_block send_panic_nb;
+ struct notifier_block abox_call_state_nb;
+
+ struct modemctl_ops ops;
+ struct io_device *iod;
+ struct io_device *bootd;
+
+#if IS_ENABLED(CONFIG_EXYNOS_ITMON)
+ struct notifier_block itmon_nb;
+#endif
+
+ void (*modem_complete)(struct modem_ctl *mc);
+
+#if IS_ENABLED(CONFIG_CP_LCD_NOTIFIER)
+ struct notifier_block lcd_notifier;
+#endif
+
+ struct cp_power_stats cp_power_stats;
+ spinlock_t power_stats_lock;
+#if defined(CPIF_WAKEPKT_SET_MARK)
+ atomic_t mark_skb_wakeup;
+#endif
+ struct logbuffer *log;
+
+ u32 tp_threshold;
+ u32 tp_hysteresis;
+ bool pcie_dynamic_spd_enabled;
+
+ bool cp_ever_powered_on;
+};
+
+static inline bool cp_offline(struct modem_ctl *mc)
+{
+ if (!mc)
+ return true;
+ return (mc->phone_state == STATE_OFFLINE);
+}
+
+static inline bool cp_online(struct modem_ctl *mc)
+{
+ if (!mc)
+ return false;
+ return (mc->phone_state == STATE_ONLINE);
+}
+
+static inline bool cp_booting(struct modem_ctl *mc)
+{
+ if (!mc)
+ return false;
+ return (mc->phone_state == STATE_BOOTING);
+}
+
+static inline bool cp_crashed(struct modem_ctl *mc)
+{
+ if (!mc)
+ return false;
+ return (mc->phone_state == STATE_CRASH_EXIT
+ || mc->phone_state == STATE_CRASH_WATCHDOG);
+}
+
+static inline bool rx_possible(struct modem_ctl *mc)
+{
+ if (likely(cp_online(mc)))
+ return true;
+
+ if (cp_booting(mc) || cp_crashed(mc))
+ return true;
+
+ return false;
+}
+
+u16 exynos_build_fr_config(struct io_device *iod, struct link_device *ld,
+ unsigned int count);
+void exynos_build_header(struct io_device *iod, struct link_device *ld,
+ u8 *buff, u16 cfg, u8 ctl, size_t count);
+u8 sipc5_build_config(struct io_device *iod, struct link_device *ld,
+ unsigned int count);
+void sipc5_build_header(struct io_device *iod, u8 *buff, u8 cfg,
+ unsigned int tx_bytes, unsigned int remains);
+void vnet_setup(struct net_device *ndev);
+const struct file_operations *get_bootdump_io_fops(void);
+const struct file_operations *get_ipc_io_fops(void);
+int sipc5_init_io_device(struct io_device *iod, struct mem_link_device *mld);
+void sipc5_deinit_io_device(struct io_device *iod);
+
+#if IS_ENABLED(CONFIG_CPIF_VENDOR_HOOK)
+int hook_init(void);
+#endif
+
+#endif
diff --git a/modem_toe_device.c b/modem_toe_device.c
new file mode 100644
index 0000000..1e08835
--- /dev/null
+++ b/modem_toe_device.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * MODEM TOE device support
+ *
+ */
+
+#include "modem_toe_device.h"
+#include "dit.h"
+#include "link_device.h"
+
+#define TOE_DEV_NAME "umts_toe0"
+
+static struct toe_ctrl_t *tc;
+
+void toe_set_iod_clat_netdev(struct io_device *iod, void *args)
+{
+ struct clat_info *clat = (struct clat_info *) args;
+ struct net_device *ndev = NULL;
+ unsigned long flags;
+
+ if (strncmp(iod->name, clat->ipv6_iface, IFNAMSIZ) != 0)
+ return;
+
+ if (clat->ipv4_iface[0])
+ ndev = dev_get_by_name(&init_net, clat->ipv4_iface);
+
+ if (!clat->ipv4_iface[0] || ndev) {
+ spin_lock_irqsave(&iod->clat_lock, flags);
+ if (iod->clat_ndev)
+ dev_put(iod->clat_ndev);
+
+ if (ndev)
+ ndev->features |= NETIF_F_GRO_UDP_FWD;
+ iod->clat_ndev = ndev;
+ spin_unlock_irqrestore(&iod->clat_lock, flags);
+
+#if IS_ENABLED(CONFIG_CPIF_TP_MONITOR)
+ if (iod->clat_ndev) {
+ struct link_device *ld = get_current_link(iod);
+ struct mem_link_device *mld = to_mem_link_device(ld);
+
+ mif_info("set RPS again\n");
+ mld->tpmon->reset_data("RPS");
+ }
+#endif
+
+ mif_info("%s clat netdev[%d] ch: %d, iface v6/v4: %s/%s\n",
+ (ndev ? "set" : "clear"), clat->clat_index, iod->ch,
+ clat->ipv6_iface, clat->ipv4_iface);
+ }
+}
+
+static int toe_dev_open(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static int toe_dev_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static unsigned int toe_dev_poll(struct file *filp, struct poll_table_struct *wait)
+{
+ return 0;
+}
+
+static ssize_t toe_dev_read(struct file *filp, char *buf, size_t count, loff_t *fpos)
+{
+ return 0;
+}
+
+static long toe_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct clat_info clat;
+ u32 ready, num;
+ struct mem_link_device *mld = tc->mld;
+
+ switch (cmd) {
+ case IOCTL_TOE_SET_CLAT_READY:
+ if (copy_from_user(&ready, (const void __user *)arg, sizeof(ready)))
+ return -EFAULT;
+
+ tc->clat_hal_ready = (ready ? true : false);
+ break;
+ case IOCTL_TOE_SET_CLAT_IFACES_NUM:
+ if (copy_from_user(&num, (const void __user *)arg, sizeof(num)))
+ return -EFAULT;
+
+ tc->clat_ifaces_num = num;
+ break;
+ case IOCTL_TOE_SET_CLAT_INFO:
+ if (copy_from_user(&clat, (const void __user *)arg, sizeof(struct clat_info)))
+ return -EFAULT;
+
+ if (!tc->set_clat_info || !tc->set_clat_info(mld, &clat))
+ return -EINVAL;
+ break;
+ default:
+ mif_err("unknown command: 0x%X\n", cmd);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static ssize_t status_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ ssize_t count = 0;
+
+ count += scnprintf(&buf[count], PAGE_SIZE - count,
+ "hal_ready:%d ifaces_num:%d dev_support:%d\n",
+ tc->clat_hal_ready, tc->clat_ifaces_num, tc->clat_dev_support);
+
+ return count;
+}
+
+static DEVICE_ATTR_RO(status);
+
+static struct attribute *toe_attrs[] = {
+ &dev_attr_status.attr,
+ NULL,
+};
+
+static const struct attribute_group toe_group = {
+ .attrs = toe_attrs,
+ .name = "toe",
+};
+
+static const struct file_operations toe_dev_fops = {
+ .owner = THIS_MODULE,
+ .open = toe_dev_open,
+ .poll = toe_dev_poll,
+ .read = toe_dev_read,
+ .release = toe_dev_release,
+ .compat_ioctl = toe_dev_ioctl,
+ .unlocked_ioctl = toe_dev_ioctl,
+};
+
+static struct miscdevice toe_dev_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = TOE_DEV_NAME,
+ .fops = &toe_dev_fops,
+};
+
+int toe_dev_init(struct mem_link_device *mld)
+{
+ if (unlikely(!tc)) {
+ mif_err("toe not created\n");
+ return -EPERM;
+ }
+
+ tc->mld = mld;
+ tc->clat_dev_support = false;
+ tc->set_clat_info = NULL;
+ tc->set_iod_clat_netdev = toe_set_iod_clat_netdev;
+
+ /* Can add the other devs or change the ordering */
+ if (dit_support_clat()) {
+ tc->clat_dev_support = true;
+ tc->set_clat_info = dit_hal_set_clat_info;
+ }
+#if IS_ENABLED(CONFIG_CP_PKTPROC_CLAT)
+ else {
+ tc->clat_dev_support = true;
+ tc->set_clat_info = shmem_ap2cp_write_clatinfo;
+ }
+#endif
+ mld->tc = tc;
+
+ return 0;
+}
+
+int toe_dev_create(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret = 0;
+
+ tc = devm_kzalloc(dev, sizeof(struct toe_ctrl_t), GFP_KERNEL);
+ if (!tc) {
+ mif_err("toe ctrl alloc failed\n");
+ return -ENOMEM;
+ }
+
+ ret = sysfs_create_group(&dev->kobj, &toe_group);
+ if (ret != 0) {
+ mif_err("sysfs_create_group() error %d\n", ret);
+ goto error;
+ }
+
+ ret = misc_register(&toe_dev_misc);
+ if (ret) {
+ mif_err("misc register error\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ if (tc) {
+ devm_kfree(dev, tc);
+ tc = NULL;
+ }
+
+ return ret;
+}
diff --git a/modem_toe_device.h b/modem_toe_device.h
new file mode 100644
index 0000000..f017541
--- /dev/null
+++ b/modem_toe_device.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * MODEM TOE device support
+ *
+ */
+
+#ifndef __MODEM_TOE_DEVICE_H__
+#define __MODEM_TOE_DEVICE_H__
+
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/inet.h>
+
+#include "modem_utils.h"
+
+#define IOCTL_TOE_MAGIC ('T')
+#define IOCTL_TOE_SET_CLAT_READY _IOW(IOCTL_TOE_MAGIC, 0x00, uint32_t)
+#define IOCTL_TOE_SET_CLAT_IFACES_NUM _IOW(IOCTL_TOE_MAGIC, 0x01, uint32_t)
+#define IOCTL_TOE_SET_CLAT_INFO _IOW(IOCTL_TOE_MAGIC, 0x02, struct clat_info)
+
+struct clat_info {
+ u32 clat_index;
+ char ipv6_iface[IFNAMSIZ];
+ char ipv4_iface[IFNAMSIZ];
+ struct in6_addr ipv6_local_subnet;
+ struct in_addr ipv4_local_subnet;
+ struct in6_addr plat_subnet;
+} __packed;
+
+struct toe_ctrl_t {
+ bool clat_hal_ready;
+ u32 clat_ifaces_num;
+ bool clat_dev_support;
+ struct mem_link_device *mld;
+
+ bool (*set_clat_info)(struct mem_link_device *mld, struct clat_info *clat);
+ void (*set_iod_clat_netdev)(struct io_device *iod, void *args);
+};
+
+void toe_set_iod_clat_netdev(struct io_device *iod, void *args);
+int toe_dev_init(struct mem_link_device *mld);
+int toe_dev_create(struct platform_device *pdev);
+
+#endif /* __MODEM_TOE_DEVICE_H__ */
diff --git a/modem_utils.c b/modem_utils.c
new file mode 100644
index 0000000..bdd3eb8
--- /dev/null
+++ b/modem_utils.c
@@ -0,0 +1,694 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2011 Samsung Electronics.
+ *
+ */
+
+#include <linux/stdarg.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/rtc.h>
+#include <linux/time.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/wait.h>
+#include <linux/time.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <soc/google/exynos-modem-ctrl.h>
+
+#include "modem_prj.h"
+#include "modem_utils.h"
+#include "cpif_version.h"
+
+#define TX_SEPARATOR "cpif: >>>>>>>>>> Outgoing packet\n"
+#define RX_SEPARATOR "cpif: Incoming packet <<<<<<<<<<\n"
+#define LINE_SEPARATOR \
+ "cpif: ------------------------------------------------------------\n"
+#define PRINT_BUFF_SIZE 4096
+
+enum bit_debug_flags {
+ DEBUG_FLAG_FMT,
+ DEBUG_FLAG_MISC,
+ DEBUG_FLAG_RFS,
+ DEBUG_FLAG_PS,
+ DEBUG_FLAG_BOOT,
+ DEBUG_FLAG_DUMP,
+ DEBUG_FLAG_CSVT,
+ DEBUG_FLAG_LOG,
+ DEBUG_FLAG_BT_DUN, /* for rx/tx of umts_router */
+ DEBUG_FLAG_ALL
+};
+
+#define DEBUG_FLAG_DEFAULT (1 << DEBUG_FLAG_FMT | 1 << DEBUG_FLAG_MISC)
+#ifdef DEBUG_MODEM_IF_PS_DATA
+static unsigned long dflags = (DEBUG_FLAG_DEFAULT | 1 << DEBUG_FLAG_RFS | 1 << DEBUG_FLAG_PS);
+#else
+static unsigned long dflags = (DEBUG_FLAG_DEFAULT);
+#endif
+module_param(dflags, ulong, 0664);
+MODULE_PARM_DESC(dflags, "modem_v1 debug flags");
+
+static unsigned long wakeup_dflags =
+ (DEBUG_FLAG_DEFAULT | 1 << DEBUG_FLAG_RFS | 1 << DEBUG_FLAG_PS);
+module_param(wakeup_dflags, ulong, 0664);
+MODULE_PARM_DESC(wakeup_dflags, "modem_v1 wakeup debug flags");
+
+static bool wakeup_log_enable;
+inline void set_wakeup_packet_log(bool enable)
+{
+ wakeup_log_enable = enable;
+}
+
+inline unsigned long get_log_flags(void)
+{
+ return wakeup_log_enable ? wakeup_dflags : dflags;
+}
+
+static inline bool log_enabled(u8 ch, struct link_device *ld)
+{
+ unsigned long flags = get_log_flags();
+
+ if (ld->is_fmt_ch && ld->is_fmt_ch(ch))
+ return test_bit(DEBUG_FLAG_FMT, &flags);
+ else if (ld->is_boot_ch && ld->is_boot_ch(ch))
+ return test_bit(DEBUG_FLAG_BOOT, &flags);
+ else if (ld->is_dump_ch && ld->is_dump_ch(ch))
+ return test_bit(DEBUG_FLAG_DUMP, &flags);
+ else if (ld->is_rfs_ch && ld->is_rfs_ch(ch))
+ return test_bit(DEBUG_FLAG_RFS, &flags);
+ else if (ld->is_csd_ch && ld->is_csd_ch(ch))
+ return test_bit(DEBUG_FLAG_CSVT, &flags);
+ else if (ld->is_log_ch && ld->is_log_ch(ch))
+ return test_bit(DEBUG_FLAG_LOG, &flags);
+ else if (ld->is_ps_ch && ld->is_ps_ch(ch))
+ return test_bit(DEBUG_FLAG_PS, &flags);
+ else if (ld->is_router_ch && ld->is_router_ch(ch))
+ return test_bit(DEBUG_FLAG_BT_DUN, &flags);
+ else if (ld->is_misc_ch && ld->is_misc_ch(ch))
+ return test_bit(DEBUG_FLAG_MISC, &flags);
+ else
+ return test_bit(DEBUG_FLAG_ALL, &flags);
+}
+
+/* print ipc packet */
+void mif_pkt(u8 ch, const char *tag, struct sk_buff *skb)
+{
+ if (!skbpriv(skb)->ld)
+ return;
+
+ if (!log_enabled(ch, skbpriv(skb)->ld))
+ return;
+
+ if (unlikely(!skb)) {
+ mif_err("ERR! NO skb!!!\n");
+ return;
+ }
+
+ pr_skb(tag, skb, skbpriv(skb)->ld);
+}
+
+/* print buffer as hex string */
+int pr_buffer(const char *tag, const char *data, size_t data_len,
+ size_t max_len)
+{
+ size_t len = min(data_len, max_len);
+ unsigned char str[PR_BUFFER_SIZE * 3]; /* 1 <= sizeof <= max_len*3 */
+
+ if (len > PR_BUFFER_SIZE)
+ len = PR_BUFFER_SIZE;
+
+ dump2hex(str, (len ? len * 3 : 1), data, len);
+
+ /* don't change this printk to mif_debug for print this as level7 */
+ return pr_info("%s: %s(%ld): %s%s\n", MIF_TAG, tag, (long)data_len,
+ str, (len == data_len) ? "" : " ...");
+}
+
+struct io_device *get_iod_with_format(struct modem_shared *msd,
+ u32 format)
+{
+ struct rb_node *n = msd->iodevs_tree_fmt.rb_node;
+
+ while (n) {
+ struct io_device *iodev;
+
+ iodev = rb_entry(n, struct io_device, node_fmt);
+ if (format < iodev->format)
+ n = n->rb_left;
+ else if (format > iodev->format)
+ n = n->rb_right;
+ else
+ return iodev;
+ }
+
+ return NULL;
+}
+
+void insert_iod_with_channel(struct modem_shared *msd, unsigned int channel,
+ struct io_device *iod)
+{
+ unsigned int idx = msd->num_channels;
+
+ msd->ch2iod[channel] = iod;
+ msd->ch[idx] = channel;
+ msd->num_channels++;
+}
+
+struct io_device *insert_iod_with_format(struct modem_shared *msd,
+ u32 format, struct io_device *iod)
+{
+ struct rb_node **p = &msd->iodevs_tree_fmt.rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*p) {
+ struct io_device *iodev;
+
+ parent = *p;
+ iodev = rb_entry(parent, struct io_device, node_fmt);
+ if (format < iodev->format)
+ p = &(*p)->rb_left;
+ else if (format > iodev->format)
+ p = &(*p)->rb_right;
+ else
+ return iodev;
+ }
+
+ rb_link_node(&iod->node_fmt, parent, p);
+ rb_insert_color(&iod->node_fmt, &msd->iodevs_tree_fmt);
+ return NULL;
+}
+
+/* netif wake/stop queue of iod having activated ndev */
+static void netif_tx_flowctl(struct modem_shared *msd, bool tx_stop)
+{
+ struct io_device *iod;
+
+ if (!msd) {
+ mif_err_limited("modem shared data does not exist\n");
+ return;
+ }
+
+ spin_lock(&msd->active_list_lock);
+ list_for_each_entry(iod, &msd->activated_ndev_list, node_ndev) {
+ if (tx_stop)
+ netif_stop_subqueue(iod->ndev, 0);
+ else
+ netif_wake_subqueue(iod->ndev, 0);
+
+#ifdef DEBUG_MODEM_IF_FLOW_CTRL
+ mif_err("tx_stop:%s, iod->ndev->name:%s\n",
+ tx_stop ? "suspend" : "resume",
+ iod->ndev->name);
+#endif
+ }
+ spin_unlock(&msd->active_list_lock);
+}
+
+bool stop_net_ifaces(struct link_device *ld, unsigned long set_mask)
+{
+ bool ret = false;
+
+ if (set_mask > 0)
+ cpif_set_bit(ld->tx_flowctrl_mask, set_mask);
+
+ if (!atomic_read(&ld->netif_stopped)) {
+ mif_info_limited("tx queue stopped: tx_flowctrl=%#04lx(set_bit:%lu)\n",
+ ld->tx_flowctrl_mask, set_mask);
+
+ netif_tx_flowctl(ld->msd, true);
+ atomic_set(&ld->netif_stopped, 1);
+ ret = true;
+ }
+
+ return ret;
+}
+
+void resume_net_ifaces(struct link_device *ld, unsigned long clear_mask)
+{
+ cpif_clear_bit(ld->tx_flowctrl_mask, clear_mask);
+
+ if (!ld->tx_flowctrl_mask && atomic_read(&ld->netif_stopped)) {
+ mif_info_limited("tx queue resumed: tx_flowctrl=%#04lx(clear_bit:%lu)\n",
+ ld->tx_flowctrl_mask, clear_mask);
+
+ netif_tx_flowctl(ld->msd, false);
+ atomic_set(&ld->netif_stopped, 0);
+ }
+}
+
+/*
+ * @brief ipv4 string to be32 (big endian 32bits integer)
+ * @return zero when errors occurred
+ */
+__be32 ipv4str_to_be32(const char *ipv4str, size_t count)
+{
+ unsigned char ip[4];
+ char ipstr[16]; /* == strlen("xxx.xxx.xxx.xxx") + 1 */
+ char *next = ipstr;
+ int i;
+
+ strlcpy(ipstr, ipv4str, ARRAY_SIZE(ipstr));
+
+ for (i = 0; i < 4; i++) {
+ char *p;
+
+ p = strsep(&next, ".");
+ if (p && kstrtou8(p, 10, &ip[i]) < 0)
+ return 0; /* == 0.0.0.0 */
+ }
+
+ return *((__be32 *)ip);
+}
+
+void mif_add_timer(struct timer_list *timer, unsigned long expire,
+ void (*function)(struct timer_list *))
+{
+ if (timer_pending(timer))
+ return;
+
+ timer_setup(timer, function, 0);
+ timer->expires = get_jiffies_64() + expire;
+
+ add_timer(timer);
+}
+
+#ifdef DEBUG_MODEM_IF_IP_DATA
+static int strcat_tcp_header(char *buff, unsigned int maxlen, const u8 *pkt)
+{
+ struct tcphdr *tcph = (struct tcphdr *)pkt;
+ int eol, count = 0;
+ char flag_str[48] = {0, };
+
+/*
+ * -------------------------------------------------------------------------
+
+ TCP Header Format
+
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Source Port | Destination Port |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Sequence Number |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Acknowledgment Number |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Data | |C|E|U|A|P|R|S|F| |
+ | Offset| Rsvd |W|C|R|C|S|S|Y|I| Window |
+ | | |R|E|G|K|H|T|N|N| |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Checksum | Urgent Pointer |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Options | Padding |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | data |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+-------------------------------------------------------------------------
+*/
+
+ count += scnprintf(buff + count, maxlen - count,
+ "%s: TCP:: Src.Port %u, Dst.Port %u\n",
+ MIF_TAG, ntohs(tcph->source), ntohs(tcph->dest));
+
+ count += scnprintf(buff + count, maxlen - count,
+ "%s: TCP:: SEQ %#08X(%u), ACK %#08X(%u)\n",
+ MIF_TAG, ntohs(tcph->seq), ntohs(tcph->seq),
+ ntohs(tcph->ack_seq), ntohs(tcph->ack_seq));
+
+ if (tcph->cwr)
+ strlcat(flag_str, "CWR ", sizeof(flag_str));
+ if (tcph->ece)
+ strlcat(flag_str, "ECE ", sizeof(flag_str));
+ if (tcph->urg)
+ strlcat(flag_str, "URG ", sizeof(flag_str));
+ if (tcph->ack)
+ strlcat(flag_str, "ACK ", sizeof(flag_str));
+ if (tcph->psh)
+ strlcat(flag_str, "PSH ", sizeof(flag_str));
+ if (tcph->rst)
+ strlcat(flag_str, "RST ", sizeof(flag_str));
+ if (tcph->syn)
+ strlcat(flag_str, "SYN ", sizeof(flag_str));
+ if (tcph->fin)
+ strlcat(flag_str, "FIN ", sizeof(flag_str));
+ eol = strlen(flag_str) - 1;
+ if (eol > 0)
+ flag_str[eol] = 0;
+ count += scnprintf(buff + count, maxlen - count,
+ "%s: TCP:: Flags {%s}\n", MIF_TAG, flag_str);
+
+ count += scnprintf(buff + count, maxlen - count,
+ "%s: TCP:: Window %u, Checksum %#04X, Urgent %u\n", MIF_TAG,
+ ntohs(tcph->window), ntohs(tcph->check), ntohs(tcph->urg_ptr));
+
+ return count;
+}
+
+static int strcat_udp_header(char *buff, unsigned int maxlen, const u8 *pkt)
+{
+ struct udphdr *udph = (struct udphdr *)pkt;
+ int count = 0;
+
+/*
+ * -------------------------------------------------------------------------
+
+ UDP Header Format
+
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Source Port | Destination Port |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Length | Checksum |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | data |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+-------------------------------------------------------------------------
+*/
+
+ count += scnprintf(buff + count, maxlen - count,
+ "%s: UDP:: Src.Port %u, Dst.Port %u\n",
+ MIF_TAG, ntohs(udph->source), ntohs(udph->dest));
+
+ count += scnprintf(buff + count, maxlen - count,
+ "%s: UDP:: Length %u, Checksum %#04X\n",
+ MIF_TAG, ntohs(udph->len), ntohs(udph->check));
+
+ if (ntohs(udph->dest) == 53) {
+ count += scnprintf(buff + count, maxlen - count,
+ "%s: UDP:: DNS query!!!\n", MIF_TAG);
+ }
+
+ if (ntohs(udph->source) == 53) {
+ count += scnprintf(buff + count, maxlen - count,
+ "%s: UDP:: DNS response!!!\n", MIF_TAG);
+ }
+
+ return count;
+}
+
+void print_ipv4_packet(const u8 *ip_pkt, enum direction dir)
+{
+ char *buff;
+ struct iphdr *iph = (struct iphdr *)ip_pkt;
+ const u8 *pkt = ip_pkt + (iph->ihl << 2);
+ u16 flags = (ntohs(iph->frag_off) & 0xE000);
+ u16 frag_off = (ntohs(iph->frag_off) & 0x1FFF);
+ int eol, count = 0;
+ char flag_str[16] = {0, };
+
+/*
+ * ---------------------------------------------------------------------------
+ IPv4 Header Format
+
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |Version| IHL |Type of Service| Total Length |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Identification |C|D|M| Fragment Offset |
+ | |E|F|F| |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Time to Live | Protocol | Header Checksum |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Source Address |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Destination Address |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Options | Padding |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ IHL - Header Length
+ Flags - Consist of 3 bits
+ The 1st bit is "Congestion" bit.
+ The 2nd bit is "Dont Fragment" bit.
+ The 3rd bit is "More Fragments" bit.
+
+---------------------------------------------------------------------------
+*/
+
+ if (iph->version != 4)
+ return;
+
+ buff = kzalloc(PRINT_BUFF_SIZE, GFP_ATOMIC);
+ if (!buff)
+ return;
+
+ if (dir == TX)
+ pr_err(TX_SEPARATOR);
+ else
+ pr_err(RX_SEPARATOR);
+ pr_err(LINE_SEPARATOR);
+
+ count += scnprintf(buff + count, PRINT_BUFF_SIZE - count,
+ "%s: IP4:: Version %u, Header Length %u, TOS %u, Length %u\n",
+ MIF_TAG, iph->version, (iph->ihl << 2), iph->tos,
+ ntohs(iph->tot_len));
+
+ count += scnprintf(buff + count, PRINT_BUFF_SIZE - count,
+ "%s: IP4:: ID %u, Fragment Offset %u\n", MIF_TAG,
+ ntohs(iph->id), frag_off);
+
+ if (flags & IP_CE)
+ strlcat(flag_str, "CE ", sizeof(flag_str));
+ if (flags & IP_DF)
+ strlcat(flag_str, "DF ", sizeof(flag_str));
+ if (flags & IP_MF)
+ strlcat(flag_str, "MF ", sizeof(flag_str));
+ eol = strlen(flag_str) - 1;
+ if (eol > 0)
+ flag_str[eol] = 0;
+ count += scnprintf(buff + count, PRINT_BUFF_SIZE - count,
+ "%s: IP4:: Flags {%s}\n", MIF_TAG, flag_str);
+
+ count += scnprintf(buff + count, PRINT_BUFF_SIZE - count,
+ "%s: IP4:: TTL %u, Protocol %u, Header Checksum %#04X\n",
+ MIF_TAG, iph->ttl, iph->protocol, ntohs(iph->check));
+
+ count += scnprintf(buff + count, PRINT_BUFF_SIZE - count,
+ "%s: IP4:: Src.IP %pI4, Dst.IP %pI4\n",
+ MIF_TAG, &ip_pkt[12], &ip_pkt[16]);
+
+ switch (iph->protocol) {
+ case 6: /* TCP */
+ count += strcat_tcp_header(buff + count,
+ PRINT_BUFF_SIZE - count, pkt);
+ break;
+
+ case 17: /* UDP */
+ count += strcat_udp_header(buff + count,
+ PRINT_BUFF_SIZE - count, pkt);
+ break;
+
+ default:
+ break;
+ }
+
+ pr_err("%s\n", buff);
+ pr_err(LINE_SEPARATOR);
+
+ kfree(buff);
+}
+#endif /* DEBUG_MODEM_IF_IP_DATA */
+
+void mif_init_irq(struct modem_irq *irq, unsigned int num, const char *name,
+ unsigned long flags)
+{
+ spin_lock_init(&irq->lock);
+ irq->num = num;
+ strncpy(irq->name, name, (MAX_NAME_LEN - 1));
+ irq->flags = flags;
+ mif_info("name:%s num:%d flags:%#08lX\n", name, num, flags);
+}
+
+int mif_request_irq(struct modem_irq *irq, irq_handler_t isr, void *data)
+{
+ int ret;
+
+ ret = request_irq(irq->num, isr, irq->flags, irq->name, data);
+ if (ret) {
+ mif_err("%s: ERR! request_irq fail (%d)\n", irq->name, ret);
+ return ret;
+ }
+
+ enable_irq_wake(irq->num);
+ irq->active = true;
+ irq->registered = true;
+
+ mif_info("%s(#%d) handler registered (flags:%#08lX)\n",
+ irq->name, irq->num, irq->flags);
+
+ return 0;
+}
+
+void mif_free_irq(struct modem_irq *irq, void *data)
+{
+ free_irq(irq->num, data);
+ mif_info("%s(#%d) handler unregistered (flags:%#08lX)\n",
+ irq->name, irq->num, irq->flags);
+}
+
+void mif_enable_irq(struct modem_irq *irq)
+{
+ unsigned long flags;
+
+ if (irq->registered == false)
+ return;
+
+ spin_lock_irqsave(&irq->lock, flags);
+
+ if (irq->active) {
+ mif_err("%s(#%d) is already active <%ps>\n", irq->name, irq->num, CALLER);
+ goto exit;
+ }
+
+ enable_irq(irq->num);
+ /*
+ * The pad assignment of CP2AP_ACTIVE is not in PAD_ALIVE to be registered wake-up source.
+ * (Bug 152900487)
+ * This error can affect the crash dump process.
+ * CP2AP_ACTIVE is assigned to XEINT_17 on planned form factor designs.
+ */
+ if (!irq->not_alive)
+ enable_irq_wake(irq->num);
+
+ irq->active = true;
+
+ mif_debug("%s(#%d) is enabled <%ps>\n", irq->name, irq->num, CALLER);
+
+exit:
+ spin_unlock_irqrestore(&irq->lock, flags);
+}
+
+void mif_disable_irq(struct modem_irq *irq)
+{
+ unsigned long flags;
+
+ if (irq->registered == false)
+ return;
+
+ spin_lock_irqsave(&irq->lock, flags);
+
+ if (!irq->active) {
+ mif_info("%s(#%d) is not active <%ps>\n", irq->name, irq->num, CALLER);
+ goto exit;
+ }
+
+ disable_irq_nosync(irq->num);
+ /*
+ * The pad assignment of CP2AP_ACTIVE is not in PAD_ALIVE to be registered wake-up source.
+ * (Bug 152900487)
+ * This error can affect the crash dump process.
+ * CP2AP_ACTIVE is assigned to XEINT_17 on planned form factor designs.
+ */
+ if (!irq->not_alive)
+ disable_irq_wake(irq->num);
+
+ irq->active = false;
+
+ mif_debug("%s(#%d) is disabled <%ps>\n", irq->name, irq->num, CALLER);
+
+exit:
+ spin_unlock_irqrestore(&irq->lock, flags);
+}
+
+bool mif_gpio_set_value(struct cpif_gpio *gpio, int value, unsigned int delay_ms)
+{
+ int dup = 0;
+
+ if (!gpio->valid) {
+ mif_err("SET GPIO %d is not valid\n", gpio->num);
+ return false;
+ }
+
+ if (gpio_get_value(gpio->num) == value)
+ dup = 1;
+
+ /* set gpio even if it is set already */
+ gpio_set_value(gpio->num, value);
+
+ if (!strcmp(gpio->label, "AP2CP_PM_WRST_N") || !strcmp(gpio->label, "AP2CP_CP_WRST_N"))
+ mif_info("SET GPIO %s = %d (wait %dms, dup %d)\n", gpio->label, value, delay_ms, dup);
+
+ if (delay_ms > 0 && !dup)
+ mdelay(delay_ms);
+
+ return (!dup);
+}
+EXPORT_SYMBOL(mif_gpio_set_value);
+
+int mif_gpio_get_value(struct cpif_gpio *gpio, bool log_print)
+{
+ int value;
+
+ if (!gpio->valid) {
+ mif_err("GET GPIO %d is not valid\n", gpio->num);
+ return -EINVAL;
+ }
+
+ value = gpio_get_value(gpio->num);
+
+ if (log_print)
+ mif_debug("GET GPIO %s = %d\n", gpio->label, value);
+
+ return value;
+}
+EXPORT_SYMBOL(mif_gpio_get_value);
+
+int mif_gpio_toggle_value(struct cpif_gpio *gpio, int delay_ms)
+{
+ int value;
+
+ value = mif_gpio_get_value(gpio, false);
+ mif_gpio_set_value(gpio, !value, delay_ms);
+ mif_gpio_set_value(gpio, value, 0);
+
+ return value;
+}
+EXPORT_SYMBOL(mif_gpio_toggle_value);
+
+void mif_stop_logging(void)
+{
+}
+
+const char *get_cpif_driver_version(void)
+{
+ return &(cpif_driver_version[0]);
+}
+
+int copy_from_user_memcpy_toio(void __iomem *dst, const void __user *src, size_t count)
+{
+ u8 buf[256];
+
+ while (count) {
+ size_t c = count;
+
+ if (c > sizeof(buf))
+ c = sizeof(buf);
+ if (copy_from_user(buf, src, c))
+ return -EFAULT;
+
+ memcpy_toio(dst, buf, c);
+ count -= c;
+ dst += c;
+ src += c;
+ }
+
+ return 0;
+}
diff --git a/modem_utils.h b/modem_utils.h
new file mode 100644
index 0000000..b8c657e
--- /dev/null
+++ b/modem_utils.h
@@ -0,0 +1,625 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2011 Samsung Electronics.
+ *
+ */
+
+#ifndef __MODEM_UTILS_H__
+#define __MODEM_UTILS_H__
+
+#include <linux/rbtree.h>
+#include "modem_prj.h"
+#include "link_device_memory.h"
+
+#define CP_CPU_BASE_ADDRESS 0x40000000
+
+#define MIF_TAG "cpif"
+
+#define IS_CONNECTED(iod, ld) ((iod)->link_type == (ld)->link_type)
+
+#define MAX_MIF_BUFF_SIZE 0x80000 /* 512kb */
+#define MAX_MIF_SEPA_SIZE 32
+#define MIF_SEPARATOR "IPC_LOGGER(VER1.1)"
+#define MAX_IPC_SKB_SIZE 4096
+#define MAX_LOG_SIZE 64
+
+#define MAX_LOG_CNT (MAX_MIF_BUFF_SIZE / MAX_LOG_SIZE)
+#define MIF_ID_SIZE sizeof(enum mif_log_id)
+
+#define MAX_IPC_LOG_SIZE \
+ (MAX_LOG_SIZE - sizeof(enum mif_log_id) \
+ - sizeof(unsigned long long) - sizeof(size_t))
+#define MAX_IRQ_LOG_SIZE \
+ (MAX_LOG_SIZE - sizeof(enum mif_log_id) \
+ - sizeof(unsigned long long) - sizeof(struct mif_irq_map))
+#define MAX_COM_LOG_SIZE \
+ (MAX_LOG_SIZE - sizeof(enum mif_log_id) \
+ - sizeof(unsigned long long))
+#define MAX_TIM_LOG_SIZE \
+ (MAX_LOG_SIZE - sizeof(enum mif_log_id) \
+ - sizeof(unsigned long long) - sizeof(struct timespec64))
+
+#define PR_BUFFER_SIZE 128
+
+#define PADDR_LO(paddr) ((paddr) & 0xFFFFFFFF)
+#define PADDR_HI(paddr) (((paddr) >> 32) & 0xF)
+
+enum mif_log_id {
+ MIF_IPC_RL2AP = 1,
+ MIF_IPC_AP2CP,
+ MIF_IPC_CP2AP,
+ MIF_IPC_AP2RL,
+ MIF_IRQ,
+ MIF_COM,
+ MIF_TIME
+};
+
+struct mif_irq_map {
+ u16 magic;
+ u16 access;
+
+ u16 fmt_tx_in;
+ u16 fmt_tx_out;
+ u16 fmt_rx_in;
+ u16 fmt_rx_out;
+
+ u16 raw_tx_in;
+ u16 raw_tx_out;
+ u16 raw_rx_in;
+ u16 raw_rx_out;
+
+ u16 cp2ap;
+};
+
+struct mif_ipc_block {
+ enum mif_log_id id;
+ unsigned long long time;
+ size_t len;
+ char buff[MAX_IPC_LOG_SIZE];
+};
+
+struct mif_irq_block {
+ enum mif_log_id id;
+ unsigned long long time;
+ struct mif_irq_map map;
+ char buff[MAX_IRQ_LOG_SIZE];
+};
+
+struct mif_common_block {
+ enum mif_log_id id;
+ unsigned long long time;
+ char buff[MAX_COM_LOG_SIZE];
+};
+
+struct mif_time_block {
+ enum mif_log_id id;
+ unsigned long long time;
+ struct timespec64 epoch;
+ char buff[MAX_TIM_LOG_SIZE];
+};
+
+enum ipc_layer {
+ LINK,
+ IODEV,
+ APP,
+ MAX_SIPC_LAYER
+};
+
+static const char * const sipc_layer_string[] = {
+ [LINK] = "LNK",
+ [IODEV] = "IOD",
+ [APP] = "APP",
+ [MAX_SIPC_LAYER] = "INVALID"
+};
+
+static const inline char *layer_str(enum ipc_layer layer)
+{
+ if (unlikely(layer >= MAX_SIPC_LAYER))
+ return "INVALID";
+ else
+ return sipc_layer_string[layer];
+}
+
+static const char * const dev_format_string[] = {
+ [IPC_FMT] = "FMT",
+ [IPC_RAW] = "RAW",
+ [IPC_RFS] = "RFS",
+ [IPC_MULTI_RAW] = "MULTI_RAW",
+ [IPC_BOOT] = "BOOT",
+ [IPC_DUMP] = "DUMP",
+ [IPC_CMD] = "CMD",
+ [IPC_DEBUG] = "DEBUG",
+};
+
+static const inline char *dev_str(u32 dev)
+{
+ if (unlikely(dev >= MAX_DEV_FORMAT))
+ return "INVALID";
+ else
+ return dev_format_string[dev];
+}
+
+static inline enum direction opposite(enum direction dir)
+{
+ return (dir == TX) ? RX : TX;
+}
+
+static const char * const direction_string[] = {
+ [TX] = "TX",
+ [RX] = "RX"
+};
+
+static const inline char *dir_str(enum direction dir)
+{
+ if (unlikely(dir >= MAX_DIR))
+ return "INVALID";
+ else
+ return direction_string[dir];
+}
+
+static const char * const udl_string[] = {
+ [UL] = "UL",
+ [DL] = "DL"
+};
+
+static const inline char *udl_str(enum direction dir)
+{
+ if (unlikely(dir >= ULDL))
+ return "INVALID";
+ else
+ return udl_string[dir];
+}
+
+static const char * const q_direction_string[] = {
+ [TX] = "TXQ",
+ [RX] = "RXQ"
+};
+
+static const inline char *q_dir(enum direction dir)
+{
+ if (unlikely(dir >= MAX_DIR))
+ return "INVALID";
+ else
+ return q_direction_string[dir];
+}
+
+static const char * const ipc_direction_string[] = {
+ [TX] = "AP->CP",
+ [RX] = "AP<-CP"
+};
+
+static const inline char *ipc_dir(enum direction dir)
+{
+ if (unlikely(dir >= MAX_DIR))
+ return "INVALID";
+ else
+ return ipc_direction_string[dir];
+}
+
+static const char * const arrow_direction[] = {
+ [TX] = "->",
+ [RX] = "<-"
+};
+
+static const inline char *arrow(enum direction dir)
+{
+ if (unlikely(dir >= MAX_DIR))
+ return "><";
+ else
+ return arrow_direction[dir];
+}
+
+static const char * const modem_state_string[] = {
+ [STATE_OFFLINE] = "OFFLINE",
+ [STATE_CRASH_RESET] = "CRASH_RESET",
+ [STATE_CRASH_EXIT] = "CRASH_EXIT",
+ [STATE_BOOTING] = "BOOTING",
+ [STATE_ONLINE] = "ONLINE",
+ [STATE_NV_REBUILDING] = "NV_REBUILDING",
+ [STATE_LOADER_DONE] = "LOADER_DONE",
+ [STATE_SIM_ATTACH] = "SIM_ATTACH",
+ [STATE_SIM_DETACH] = "SIM_DETACH",
+ [STATE_CRASH_WATCHDOG] = "WDT_RESET",
+ [STATE_RESET] = "RESET",
+};
+
+static const inline char *cp_state_str(enum modem_state state)
+{
+ return modem_state_string[state];
+}
+
+static const inline char *mc_state(struct modem_ctl *mc)
+{
+ return cp_state_str(mc->phone_state);
+}
+
+struct __packed utc_time {
+ u32 year:18,
+ mon:4,
+ day:5,
+ hour:5;
+ u32 min:6,
+ sec:6,
+ us:20;
+};
+
+/* {Hour, Minute, Second, U(micro)-second} format */
+#define HMSU_FMT "[%02d:%02d:%02d.%06d]"
+
+static inline unsigned long ns2us(unsigned long ns)
+{
+ return (ns > 0) ? (ns / 1000) : 0;
+}
+
+static inline unsigned long ns2ms(unsigned long ns)
+{
+ return (ns > 0) ? (ns / 1000000) : 0;
+}
+
+static inline unsigned long us2ms(unsigned long us)
+{
+ return (us > 0) ? (us / 1000) : 0;
+}
+
+static inline unsigned long ms2us(unsigned long ms)
+{
+ return ms * 1E3L;
+}
+
+static inline unsigned long ms2ns(unsigned long ms)
+{
+ return ms * 1E6L;
+}
+
+static inline void ts642utc(struct timespec64 *ts, struct utc_time *utc)
+{
+ struct tm tm;
+
+ time64_to_tm((ts->tv_sec - (sys_tz.tz_minuteswest * 60)), 0, &tm);
+ utc->year = 1900 + (u32)tm.tm_year;
+ utc->mon = 1 + tm.tm_mon;
+ utc->day = tm.tm_mday;
+ utc->hour = tm.tm_hour;
+ utc->min = tm.tm_min;
+ utc->sec = tm.tm_sec;
+ utc->us = (u32)ns2us(ts->tv_nsec);
+}
+
+static inline void get_utc_time(struct utc_time *utc)
+{
+ struct timespec64 ts;
+
+ ktime_get_ts64(&ts);
+ ts642utc(&ts, utc);
+}
+
+/* dump2hex
+ * dump data to hex as fast as possible.
+ * the length of @buff must be greater than "@len * 3"
+ * it need 3 bytes per one data byte to print.
+ */
+static inline void dump2hex(char *buff, size_t buff_size,
+ const char *data, size_t data_len)
+{
+ static const char *hex = "0123456789abcdef";
+ char *dest = buff;
+ size_t len;
+ size_t i;
+
+ if (buff_size < (data_len * 3))
+ len = buff_size / 3;
+ else
+ len = data_len;
+
+ for (i = 0; i < len; i++) {
+ *dest++ = hex[(data[i] >> 4) & 0xf];
+ *dest++ = hex[data[i] & 0xf];
+ *dest++ = ' ';
+ }
+
+ /* The last character must be overwritten with NULL */
+ if (likely(len > 0))
+ dest--;
+
+ *dest = 0;
+}
+
+static inline unsigned int calc_offset(void *target, void *base)
+{
+ return (unsigned long)target - (unsigned long)base;
+}
+
+static inline struct link_device *find_linkdev(struct modem_shared *msd,
+ u32 link_type)
+{
+ struct link_device *ld;
+
+ list_for_each_entry(ld, &msd->link_dev_list, list) {
+ if (ld->link_type == link_type)
+ return ld;
+ }
+ return NULL;
+}
+
+static inline unsigned int count_bits(unsigned int n)
+{
+ unsigned int i;
+
+ for (i = 0; n != 0; i++)
+ n &= (n - 1);
+ return i;
+}
+
+static inline bool count_flood(int cnt, int mask)
+{
+ return (cnt > 0 && (cnt & mask) == 0) ? true : false;
+}
+
+void mif_pkt(u8 ch, const char *tag, struct sk_buff *skb);
+
+/* print buffer as hex string */
+int pr_buffer(const char *tag, const char *data, size_t data_len,
+ size_t max_len);
+
+/* print a sk_buff as hex string */
+#define PRINT_SKBUFF_PROTOCOL_SIT 24
+#define PRINT_SKBUFF_PROTOCOL_SIPC 16
+static inline void pr_skb(const char *tag, struct sk_buff *skb, struct link_device *ld)
+{
+ int length = 0;
+
+ switch (ld->protocol) {
+ case PROTOCOL_SIT:
+ length = PRINT_SKBUFF_PROTOCOL_SIT;
+ break;
+ case PROTOCOL_SIPC:
+ length = PRINT_SKBUFF_PROTOCOL_SIPC;
+ break;
+ default:
+ mif_err("ERR - unknwon protocol\n");
+ break;
+ }
+
+ pr_buffer(tag, (char *)((skb)->data), (size_t)((skb)->len), length);
+}
+
+/* Stop/wake all normal priority TX queues in network interfaces */
+bool stop_net_ifaces(struct link_device *ld, unsigned long set_mask);
+void resume_net_ifaces(struct link_device *ld, unsigned long clear_mask);
+
+/* Get an IO device */
+struct io_device *get_iod_with_format(struct modem_shared *msd,
+ u32 format);
+
+static inline struct io_device *link_get_iod_with_format(
+ struct link_device *ld, u32 format)
+{
+ struct io_device *iod = get_iod_with_format(ld->msd, format);
+
+ return (iod && IS_CONNECTED(iod, ld)) ? iod : NULL;
+}
+
+static inline struct io_device *get_iod_with_channel(
+ struct modem_shared *msd, unsigned int channel)
+{
+ return msd->ch2iod[channel];
+}
+
+static inline struct io_device *link_get_iod_with_channel(
+ struct link_device *ld, unsigned int channel)
+{
+ struct io_device *iod = get_iod_with_channel(ld->msd, channel);
+ struct mem_link_device *mld = ld->mdm_data->mld;
+
+ if (!iod && atomic_read(&mld->init_end_cnt))
+ mif_err("No IOD matches channel (%d)\n", channel);
+
+ return (iod && IS_CONNECTED(iod, ld)) ? iod : NULL;
+}
+
+/* insert iod to tree functions */
+struct io_device *insert_iod_with_format(struct modem_shared *msd,
+ u32 format, struct io_device *iod);
+void insert_iod_with_channel(struct modem_shared *msd, unsigned int channel,
+ struct io_device *iod);
+
+/* iodev for each */
+typedef void (*action_fn)(struct io_device *iod, void *args);
+static inline void iodevs_for_each(struct modem_shared *msd, action_fn action, void *args)
+{
+ int i;
+
+ for (i = 0; i < msd->num_channels; i++) {
+ u8 ch = msd->ch[i];
+ struct io_device *iod = msd->ch2iod[ch];
+
+ action(iod, args);
+ }
+}
+
+__be32 ipv4str_to_be32(const char *ipv4str, size_t count);
+
+void mif_add_timer(struct timer_list *timer, unsigned long expire,
+ void (*function)(struct timer_list *));
+
+/*
+ * ---------------------------------------------------------------------------
+
+ IPv4 Header Format
+
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |Version| IHL |Type of Service| Total Length |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Identification |C|D|M| Fragment Offset |
+ | |E|F|F| |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Time to Live | Protocol | Header Checksum |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Source Address |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Destination Address |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Options | Padding |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ IHL - Header Length
+ Flags - Consist of 3 bits
+ The 1st bit is "Congestion" bit.
+ The 2nd bit is "Dont Fragment" bit.
+ The 3rd bit is "More Fragments" bit.
+
+---------------------------------------------------------------------------
+*/
+#define IPV4_HDR_SIZE 20
+
+/*
+ * -------------------------------------------------------------------------
+
+ TCP Header Format
+
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Source Port | Destination Port |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Sequence Number |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Acknowledgment Number |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Data | |C|E|U|A|P|R|S|F| |
+ | Offset| Rsvd |W|C|R|C|S|S|Y|I| Window |
+ | | |R|E|G|K|H|T|N|N| |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Checksum | Urgent Pointer |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Options | Padding |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | data |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+-------------------------------------------------------------------------
+*/
+#define TCP_HDR_SIZE 20
+
+/*
+ * -------------------------------------------------------------------------
+
+ UDP Header Format
+
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Source Port | Destination Port |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Length | Checksum |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | data |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+-------------------------------------------------------------------------
+*/
+#define UDP_HDR_SIZE 8
+
+#ifdef DEBUG_MODEM_IF_IP_DATA
+void print_ipv4_packet(const u8 *ip_pkt, enum direction dir);
+#endif
+
+void mif_init_irq(struct modem_irq *irq, unsigned int num, const char *name,
+ unsigned long flags);
+int mif_request_irq(struct modem_irq *irq, irq_handler_t isr, void *data);
+void mif_free_irq(struct modem_irq *irq, void *data);
+void mif_enable_irq(struct modem_irq *irq);
+void mif_disable_irq(struct modem_irq *irq);
+bool mif_gpio_set_value(struct cpif_gpio *gpio, int value, unsigned int delay_ms);
+int mif_gpio_get_value(struct cpif_gpio *gpio, bool log_print);
+int mif_gpio_toggle_value(struct cpif_gpio *gpio, int delay_ms);
+
+struct file *mif_open_file(const char *path);
+void mif_save_file(struct file *fp, const char *buff, size_t size);
+void mif_close_file(struct file *fp);
+
+void mif_stop_logging(void);
+void set_wakeup_packet_log(bool enable);
+
+/* MIF buffer management */
+
+/* Printout debuggin message for MIF buffer */
+//#define MIF_BUFF_DEBUG
+
+/*
+ * IP packet : 2048
+ * sizeof(struct skb_shared_info): 512
+ * 2048 + 512 = 2560 (0xA00)
+ */
+#define MIF_BUFF_DEFAULT_PACKET_SIZE (2048)
+#define MIF_BUFF_CELL_PADDING_SIZE (512)
+#define MIF_BUFF_DEFAULT_CELL_SIZE (MIF_BUFF_DEFAULT_PACKET_SIZE+MIF_BUFF_CELL_PADDING_SIZE)
+
+const char *get_cpif_driver_version(void);
+
+static inline struct wakeup_source *cpif_wake_lock_register(struct device *dev, const char *name)
+{
+ struct wakeup_source *ws = NULL;
+
+ ws = wakeup_source_register(dev, name);
+ if (ws == NULL) {
+ mif_err("%s: wakelock register fail\n", name);
+ return NULL;
+ }
+
+ return ws;
+}
+
+static inline void cpif_wake_lock_unregister(struct wakeup_source *ws)
+{
+ if (ws == NULL) {
+ mif_err("wakelock unregister fail\n");
+ return;
+ }
+
+ wakeup_source_unregister(ws);
+}
+
+static inline void cpif_wake_lock(struct wakeup_source *ws)
+{
+ if (ws == NULL) {
+ mif_err("wakelock fail\n");
+ return;
+ }
+
+ __pm_stay_awake(ws);
+}
+
+static inline void cpif_wake_lock_timeout(struct wakeup_source *ws, long timeout)
+{
+ if (ws == NULL) {
+ mif_err("wakelock timeout fail\n");
+ return;
+ }
+
+ __pm_wakeup_event(ws, jiffies_to_msecs(timeout));
+}
+
+static inline void cpif_wake_unlock(struct wakeup_source *ws)
+{
+ if (ws == NULL) {
+ mif_err("wake unlock fail\n");
+ return;
+ }
+
+ __pm_relax(ws);
+}
+
+static inline int cpif_wake_lock_active(struct wakeup_source *ws)
+{
+ if (ws == NULL) {
+ mif_err("wake unlock fail\n");
+ return 0;
+ }
+
+ return ws->active;
+}
+
+int copy_from_user_memcpy_toio(void __iomem *dst, const void __user *src, size_t count);
+
+#endif/*__MODEM_UTILS_H__*/
diff --git a/modem_v1.h b/modem_v1.h
new file mode 100644
index 0000000..28b63a2
--- /dev/null
+++ b/modem_v1.h
@@ -0,0 +1,421 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2012 Samsung Electronics.
+ *
+ */
+
+#ifndef __MODEM_V1_H__
+#define __MODEM_V1_H__
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+#include <linux/types.h>
+#include <linux/shm_ipc.h>
+#include <dt-bindings/soc/google/exynos-cpif.h>
+
+#include "cp_btl.h"
+
+#define MAX_STR_LEN 256
+#define MAX_NAME_LEN 64
+#define MAX_DUMP_LEN 20
+
+#define SMC_ID 0x82000700
+#define SMC_ID_CLK 0x82001011
+#define SSS_CLK_ENABLE 0
+#define SSS_CLK_DISABLE 1
+
+struct __packed multi_frame_control {
+ u8 id:7,
+ more:1;
+};
+
+enum direction {
+ TX = 0,
+ UL = 0,
+ AP2CP = 0,
+ RX = 1,
+ DL = 1,
+ CP2AP = 1,
+ TXRX = 2,
+ ULDL = 2,
+ MAX_DIR = 2
+};
+
+enum read_write {
+ RD = 0,
+ WR = 1,
+ RDWR = 2
+};
+
+/**
+ * struct modem_io_t - declaration for io_device
+ * @name: device name
+ * @id: for SIPC4, contains format & channel information
+ * (id & 11100000b)>>5 = format (eg, 0=FMT, 1=RAW, 2=RFS)
+ * (id & 00011111b) = channel (valid only if format is RAW)
+ * for SIPC5, contains only 8-bit channel ID
+ * @format: device format
+ * @io_type: type of this io_device
+ * @link_type: link_devices to use this io_device
+ * for example, LINKDEV_SHMEM or LINKDEV_PCIE
+ */
+struct modem_io_t {
+ char name[SZ_64];
+ u32 ch;
+ u32 ch_count;
+ u32 format;
+ u32 io_type;
+ u32 link_type;
+ u32 attrs;
+ char *option_region;
+ unsigned int ul_num_buffers;
+ unsigned int ul_buffer_size;
+ unsigned int dl_num_buffers;
+ unsigned int dl_buffer_size;
+};
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_SHMEM) || IS_ENABLED(CONFIG_LINK_DEVICE_PCIE)
+struct modem_mbox {
+ unsigned int int_ap2cp_msg;
+ unsigned int int_ap2cp_active;
+ unsigned int int_ap2cp_wakeup;
+ unsigned int int_ap2cp_status;
+#if IS_ENABLED(CONFIG_CP_LCD_NOTIFIER)
+ unsigned int int_ap2cp_lcd_status;
+#endif
+#if IS_ENABLED(CONFIG_CP_LLC)
+ unsigned int int_ap2cp_llc_status;
+#endif
+#if IS_ENABLED(CONFIG_CP_PKTPROC_CLAT)
+ unsigned int int_ap2cp_clatinfo_send;
+#endif
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE)
+ unsigned int int_ap2cp_pcie_link_ack;
+#endif
+ unsigned int int_ap2cp_uart_noti;
+
+ unsigned int irq_cp2ap_msg;
+ unsigned int irq_cp2ap_status;
+ unsigned int irq_cp2ap_active;
+#if IS_ENABLED(CONFIG_CP_LLC)
+ unsigned int irq_cp2ap_llc_status;
+#endif
+#if IS_ENABLED(CONFIG_CP_PKTPROC_CLAT)
+ unsigned int irq_cp2ap_clatinfo_ack;
+#endif
+ unsigned int irq_cp2ap_wakelock;
+ unsigned int irq_cp2ap_rat_mode;
+};
+#endif
+
+#define AP_CP_CAP_PARTS 2
+#define AP_CP_CAP_PART_LEN 4
+#define AP_CP_CAP_BIT_MAX 32
+
+/* AP capability[0] index */
+enum ap_capability_0_bits {
+ AP_CAP_0_PKTPROC_UL_BIT = 0,
+ AP_CAP_0_CH_EXTENSION_BIT,
+ AP_CAP_0_PKTPROC_36BIT_ADDR_BIT,
+ AP_CAP_0_MAX = AP_CP_CAP_BIT_MAX
+};
+
+/* AP capability[1] index */
+enum ap_capability_1_bits {
+ AP_CAP_1_MAX = AP_CP_CAP_BIT_MAX
+};
+
+/* platform data */
+struct modem_data {
+ char *name;
+ u32 cp_num;
+
+ struct modem_mbox *mbx;
+ struct mem_link_device *mld;
+
+ /* Modem component */
+ u32 modem_type;
+
+ u32 link_type;
+ char *link_name;
+ unsigned long link_attrs; /* Set of link_attr_bit flags */
+ u32 interrupt_types;
+
+ u32 protocol;
+
+ /* SIPC version */
+ u32 ipc_version;
+
+ /* Information of IO devices */
+ unsigned int num_iodevs;
+ struct modem_io_t *iodevs[IOD_CH_ID_MAX];
+
+ /* capability check */
+ u32 capability_check;
+
+ /* check if cp2ap_active is in alive */
+ u32 cp2ap_active_not_alive;
+
+ /* MIF will be off during VoLTE call and we need to register wrstbi interrupt */
+ u32 mif_off_during_volte;
+
+ /* legacy buffer setting */
+ u32 legacy_fmt_head_tail_offset;
+ u32 legacy_fmt_buffer_offset;
+ u32 legacy_fmt_txq_size;
+ u32 legacy_fmt_rxq_size;
+ u32 legacy_raw_head_tail_offset;
+ u32 legacy_raw_buffer_offset;
+ u32 legacy_raw_txq_size;
+ u32 legacy_raw_rxq_size;
+ u32 legacy_raw_rx_buffer_cached;
+
+ /* several 4 byte length info in ipc region */
+ u32 offset_ap_version;
+ u32 offset_cp_version;
+ u32 offset_cmsg_offset;
+ u32 offset_srinfo_offset;
+ u32 offset_clk_table_offset;
+ u32 offset_buff_desc_offset;
+ u32 offset_capability_offset;
+
+ /* ctrl messages between cp and ap */
+ u32 ap2cp_msg[2];
+ u32 cp2ap_msg[2];
+ u32 cp2ap_united_status[2];
+ u32 ap2cp_united_status[2];
+#if IS_ENABLED(CONFIG_CP_LLC)
+ u32 ap2cp_llc_status[2];
+ u32 cp2ap_llc_status[2];
+#endif
+#if IS_ENABLED(CONFIG_CP_PKTPROC_CLAT)
+ u32 ap2cp_clatinfo_xlat_v4_addr[2];
+ u32 ap2cp_clatinfo_xlat_addr_0[2];
+ u32 ap2cp_clatinfo_xlat_addr_1[2];
+ u32 ap2cp_clatinfo_xlat_addr_2[2];
+ u32 ap2cp_clatinfo_xlat_addr_3[2];
+ u32 ap2cp_clatinfo_index[2];
+#endif
+ u32 ap2cp_kerneltime[2];
+ u32 ap2cp_kerneltime_sec[2];
+ u32 ap2cp_kerneltime_usec[2];
+ u32 ap2cp_handover_block_info[2];
+
+ /* Status Bit Info */
+ unsigned int sbi_lte_active_mask;
+ unsigned int sbi_lte_active_pos;
+ unsigned int sbi_cp_status_mask;
+ unsigned int sbi_cp_status_pos;
+ unsigned int sbi_cp2ap_wakelock_mask;
+ unsigned int sbi_cp2ap_wakelock_pos;
+ unsigned int sbi_cp2ap_rat_mode_mask;
+ unsigned int sbi_cp2ap_rat_mode_pos;
+
+ unsigned int sbi_pda_active_mask;
+ unsigned int sbi_pda_active_pos;
+ unsigned int sbi_ap_status_mask;
+ unsigned int sbi_ap_status_pos;
+
+ unsigned int sbi_ap2cp_kerneltime_sec_mask;
+ unsigned int sbi_ap2cp_kerneltime_sec_pos;
+ unsigned int sbi_ap2cp_kerneltime_usec_mask;
+ unsigned int sbi_ap2cp_kerneltime_usec_pos;
+
+ unsigned int sbi_uart_noti_mask;
+ unsigned int sbi_uart_noti_pos;
+ unsigned int sbi_crash_type_mask;
+ unsigned int sbi_crash_type_pos;
+ unsigned int sbi_ds_det_mask;
+ unsigned int sbi_ds_det_pos;
+#if IS_ENABLED(CONFIG_CP_LCD_NOTIFIER)
+ unsigned int sbi_lcd_status_mask;
+ unsigned int sbi_lcd_status_pos;
+#endif
+
+ /* ulpath offset for 2CP models */
+ u32 ulpath_offset;
+
+ /* control message offset */
+ u32 cmsg_offset;
+
+ /* srinfo settings */
+ u32 srinfo_offset;
+ u32 srinfo_size;
+
+ /* clk_table offset */
+ u32 clk_table_offset;
+
+ /* new SIT buffer descriptor offset */
+ u32 buff_desc_offset;
+
+ /* capability */
+ u32 capability_offset;
+ u32 ap_capability[AP_CP_CAP_PARTS];
+
+#if IS_ENABLED(CONFIG_MODEM_IF_LEGACY_QOS)
+ /* SIT priority queue info */
+ u32 legacy_raw_qos_head_tail_offset;
+ u32 legacy_raw_qos_buffer_offset;
+ u32 legacy_raw_qos_txq_size;
+ u32 legacy_raw_qos_rxq_size; /* unused for now */
+#endif
+ struct cp_btl btl; /* CP background trace log */
+
+ u32 pktproc_use_36bit_addr; /* Check pktproc use 36bit addr */
+};
+
+enum cp_gpio_type {
+ CP_GPIO_AP2CP_CP_PWR,
+ CP_GPIO_AP2CP_NRESET,
+ CP_GPIO_AP2CP_WAKEUP,
+ CP_GPIO_AP2CP_DUMP_NOTI,
+ CP_GPIO_AP2CP_AP_ACTIVE,
+#if !IS_ENABLED(CONFIG_CP_WRESET_WA)
+ CP_GPIO_AP2CP_CP_WRST_N,
+ CP_GPIO_CP2AP_CP_WRST_N,
+ CP_GPIO_AP2CP_PM_WRST_N,
+#endif
+ CP_GPIO_CP2AP_PS_HOLD,
+ CP_GPIO_CP2AP_WAKEUP,
+ CP_GPIO_CP2AP_CP_ACTIVE,
+ CP_GPIO_AP2CP_PARTIAL_RST_N,
+ CP_GPIO_MAX
+};
+
+enum cp_gpio_irq_type {
+ CP_GPIO_IRQ_NONE,
+ CP_GPIO_IRQ_CP2AP_WAKEUP,
+ CP_GPIO_IRQ_CP2AP_CP_ACTIVE,
+ CP_GPIO_IRQ_CP2AP_CP_WRST_N,
+ CP_GPIO_IRQ_MAX
+};
+
+struct modem_irq {
+ spinlock_t lock;
+ unsigned int num;
+ char name[MAX_NAME_LEN];
+ unsigned long flags;
+ bool active;
+ bool registered;
+ u32 not_alive;
+};
+
+struct cpif_gpio {
+ bool valid;
+ int num;
+ enum cp_gpio_irq_type irq_type;
+ const char *label;
+ const char *node_name;
+};
+
+#define mif_dt_read_enum(np, prop, dest) \
+ do { \
+ u32 val; \
+ if (of_property_read_u32(np, prop, &val)) { \
+ mif_err("%s is not defined\n", prop); \
+ return -EINVAL; \
+ } \
+ dest = (__typeof__(dest))(val); \
+ } while (0)
+
+#define mif_dt_read_bool(np, prop, dest) \
+ do { \
+ u32 val; \
+ if (of_property_read_u32(np, prop, &val)) { \
+ mif_err("%s is not defined\n", prop); \
+ return -EINVAL; \
+ } \
+ dest = val ? true : false; \
+ } while (0)
+
+#define mif_dt_read_string(np, prop, dest) \
+ do { \
+ if (of_property_read_string(np, prop, \
+ (const char **)&dest)) { \
+ mif_err("%s is not defined\n", prop); \
+ return -EINVAL; \
+ } \
+ } while (0)
+
+#define mif_dt_read_u32(np, prop, dest) \
+ do { \
+ u32 val; \
+ if (of_property_read_u32(np, prop, &val)) { \
+ mif_err("%s is not defined\n", prop); \
+ return -EINVAL; \
+ } \
+ dest = val; \
+ } while (0)
+
+#define mif_dt_read_u32_noerr(np, prop, dest) \
+ do { \
+ u32 val; \
+ if (!of_property_read_u32(np, prop, &val)) \
+ dest = val; \
+ } while (0)
+
+#define mif_dt_read_u64(np, prop, dest) \
+ do { \
+ u64 val; \
+ if (of_property_read_u64(np, prop, &val)) { \
+ mif_err("%s is not defined\n", prop); \
+ return -EINVAL; \
+ } \
+ dest = val; \
+ } while (0)
+
+#define mif_dt_read_u64_noerr(np, prop, dest) \
+ do { \
+ u64 val; \
+ if (!of_property_read_u64(np, prop, &val)) \
+ dest = val; \
+ } while (0)
+
+#define mif_dt_count_u32_elems(np, prop, dest) \
+ do { \
+ int val; \
+ val = of_property_count_u32_elems(np, prop); \
+ if (val < 0) { \
+ mif_err("can not get %s\n", prop); \
+ return -EINVAL; \
+ } \
+ dest = (u32)val; \
+ } while (0)
+
+#define mif_dt_count_u32_array(np, prop, dest, size) \
+ do { \
+ int val; \
+ val = of_property_read_u32_array(np, prop, dest, size); \
+ if (val < 0) { \
+ mif_err("can not get %s %d\n", prop, size); \
+ return -EINVAL; \
+ } \
+ } while (0)
+
+
+#define cpif_set_bit(data, offset) ((data) |= BIT(offset))
+#define cpif_clear_bit(data, offset) ((data) &= ~BIT(offset))
+#define cpif_check_bit(data, offset) ((data) & BIT(offset))
+
+#define LOG_TAG "cpif: "
+#define CALLER (__builtin_return_address(0))
+
+#define mif_err_limited(fmt, ...) \
+ printk_ratelimited(KERN_ERR LOG_TAG "%s: " pr_fmt(fmt), __func__, ##__VA_ARGS__)
+#define mif_err(fmt, ...) \
+ pr_err(LOG_TAG "%s: " pr_fmt(fmt), __func__, ##__VA_ARGS__)
+
+#define mif_info_limited(fmt, ...) \
+ printk_ratelimited(KERN_INFO LOG_TAG "%s: " pr_fmt(fmt), __func__, ##__VA_ARGS__)
+#define mif_info(fmt, ...) \
+ pr_info(LOG_TAG "%s: " pr_fmt(fmt), __func__, ##__VA_ARGS__)
+
+#define mif_debug(fmt, ...) \
+ pr_debug(LOG_TAG "%s: " pr_fmt(fmt), __func__, ##__VA_ARGS__)
+
+#define mif_trace(fmt, ...) \
+ printk(KERN_DEBUG "cpif: %s: %d: called(%pF): " fmt, \
+ __func__, __LINE__, __builtin_return_address(0), ##__VA_ARGS__)
+
+#endif
diff --git a/modem_variation.c b/modem_variation.c
new file mode 100644
index 0000000..7c02b0a
--- /dev/null
+++ b/modem_variation.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010 Samsung Electronics.
+ *
+ */
+
+#include "modem_variation.h"
+
+/* add declaration of modem & link type */
+/* modem device support */
+DECLARE_MODEM_INIT_DUMMY(dummy)
+DECLARE_MODEM_UNINIT_DUMMY(dummy)
+
+#if !IS_ENABLED(CONFIG_SEC_MODEM_S5000AP)
+DECLARE_MODEM_INIT_DUMMY(s5000ap)
+DECLARE_MODEM_UNINIT_DUMMY(s5000ap)
+#endif
+
+#if !IS_ENABLED(CONFIG_SEC_MODEM_S5100)
+DECLARE_MODEM_INIT_DUMMY(s5100)
+DECLARE_MODEM_UNINIT_DUMMY(s5100)
+#endif
+
+/* link device support */
+DECLARE_LINK_INIT_DUMMY()
+
+static modem_init_call modem_init_func[MAX_MODEM_TYPE] = {
+ [SEC_S5000AP] = MODEM_INIT_CALL(s5000ap),
+ [SEC_S5100] = MODEM_INIT_CALL(s5100),
+ [MODEM_TYPE_DUMMY] = MODEM_INIT_CALL(dummy),
+};
+
+static modem_uninit_call modem_uninit_func[MAX_MODEM_TYPE] = {
+ [SEC_S5000AP] = MODEM_UNINIT_CALL(s5000ap),
+ [SEC_S5100] = MODEM_UNINIT_CALL(s5100),
+ [MODEM_TYPE_DUMMY] = MODEM_UNINIT_CALL(dummy),
+};
+
+static link_init_call link_init_func[LINKDEV_MAX] = {
+ [LINKDEV_UNDEFINED] = LINK_INIT_CALL_DUMMY(),
+ [LINKDEV_SHMEM] = LINK_INIT_CALL(),
+ [LINKDEV_PCIE] = LINK_INIT_CALL(),
+};
+
+int call_modem_init_func(struct modem_ctl *mc, struct modem_data *pdata)
+{
+ if (modem_init_func[pdata->modem_type])
+ return modem_init_func[pdata->modem_type](mc, pdata);
+ else
+ return -ENOTSUPP;
+}
+
+void call_modem_uninit_func(struct modem_ctl *mc, struct modem_data *pdata)
+{
+ if (modem_uninit_func[pdata->modem_type])
+ return modem_uninit_func[pdata->modem_type](mc, pdata);
+ else
+ dev_WARN(mc->dev, "no uninit callback defined for modem type");
+}
+
+struct link_device *call_link_init_func(struct platform_device *pdev,
+ u32 link_type)
+{
+ if (link_init_func[link_type])
+ return link_init_func[link_type](pdev, link_type);
+ else
+ return NULL;
+}
diff --git a/modem_variation.h b/modem_variation.h
new file mode 100644
index 0000000..523e7eb
--- /dev/null
+++ b/modem_variation.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2010 Samsung Electronics.
+ *
+ */
+
+#ifndef __MODEM_VARIATION_H__
+#define __MODEM_VARIATION_H__
+
+#include "modem_prj.h"
+
+#define DECLARE_MODEM_INIT(type) \
+ int type ## _init_modemctl_device( \
+ struct modem_ctl *mc, \
+ struct modem_data *pdata)
+
+#define DECLARE_MODEM_UNINIT(type) \
+ void type ## _uninit_modemctl_device( \
+ struct modem_ctl *mc, \
+ struct modem_data *pdata)
+
+
+#define DECLARE_MODEM_INIT_DUMMY(type) \
+ int type ## _init_modemctl_device( \
+ struct modem_ctl *mc, \
+ struct modem_data *pdata) \
+ { return 0; }
+
+#define DECLARE_MODEM_UNINIT_DUMMY(type) \
+ void type ## _uninit_modemctl_device( \
+ struct modem_ctl *mc, \
+ struct modem_data *pdata) \
+ { return; }
+
+
+#define DECLARE_LINK_INIT() \
+ struct link_device *create_link_device( \
+ struct platform_device *pdev, \
+ u32 link_type)
+
+#define DECLARE_LINK_INIT_DUMMY() \
+ struct link_device *dummy_create_link_device( \
+ struct platform_device *pdev, \
+ u32 link_type) \
+ { return NULL; }
+
+#define MODEM_INIT_CALL(type) type ## _init_modemctl_device
+
+#define MODEM_UNINIT_CALL(type) type ## _uninit_modemctl_device
+
+#define LINK_INIT_CALL() create_link_device
+#define LINK_INIT_CALL_DUMMY() dummy_create_link_device
+
+
+/**
+ * Add extern declaration of modem & link type
+ * (CAUTION!!! Every DUMMY function must be declared in modem_variation.c)
+ */
+
+/* modem device support */
+#if IS_ENABLED(CONFIG_SEC_MODEM_S5000AP)
+DECLARE_MODEM_INIT(s5000ap);
+DECLARE_MODEM_UNINIT(s5000ap);
+#endif
+
+#if IS_ENABLED(CONFIG_SEC_MODEM_S5100)
+DECLARE_MODEM_INIT(s5100);
+DECLARE_MODEM_UNINIT(s5100);
+#endif
+
+/* link device support */
+#if IS_ENABLED(CONFIG_LINK_DEVICE_SHMEM) || IS_ENABLED(CONFIG_LINK_DEVICE_PCIE)
+DECLARE_LINK_INIT();
+#endif
+
+typedef int (*modem_init_call)(struct modem_ctl *, struct modem_data *);
+typedef void (*modem_uninit_call)(struct modem_ctl *, struct modem_data *);
+typedef struct link_device *(*link_init_call)(struct platform_device *,
+ u32 link_type);
+
+int call_modem_init_func(struct modem_ctl *mc, struct modem_data *pdata);
+void call_modem_uninit_func(struct modem_ctl *mc, struct modem_data *pdata);
+
+struct link_device *call_link_init_func(struct platform_device *pdev,
+ u32 link_type);
+
+#endif
diff --git a/net_io_device.c b/net_io_device.c
new file mode 100644
index 0000000..e4d9dec
--- /dev/null
+++ b/net_io_device.c
@@ -0,0 +1,365 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Samsung Electronics.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/if_arp.h>
+#include <linux/ip.h>
+#include <linux/if_ether.h>
+#include <linux/etherdevice.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <trace/events/napi.h>
+#include <net/ip.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/netdevice.h>
+#include <net/tcp.h>
+
+#include <soc/google/exynos-modem-ctrl.h>
+
+#include "modem_prj.h"
+#include "modem_utils.h"
+#include "modem_dump.h"
+#if IS_ENABLED(CONFIG_MODEM_IF_LEGACY_QOS)
+#include "cpif_qos_info.h"
+#endif
+
+static int vnet_open(struct net_device *ndev)
+{
+ struct vnet *vnet = netdev_priv(ndev);
+ struct io_device *iod = (struct io_device *)vnet->iod;
+ struct modem_shared *msd = iod->msd;
+ struct link_device *ld;
+ int ret;
+
+ atomic_inc(&iod->opened);
+
+ list_for_each_entry(ld, &msd->link_dev_list, list) {
+ if (IS_CONNECTED(iod, ld) && ld->init_comm) {
+ ret = ld->init_comm(ld, iod);
+ if (ret < 0) {
+ mif_err("%s<->%s: ERR! init_comm fail(%d)\n",
+ iod->name, ld->name, ret);
+ atomic_dec(&iod->opened);
+ return ret;
+ }
+ }
+ }
+ list_add(&iod->node_ndev, &iod->msd->activated_ndev_list);
+
+ netif_start_queue(ndev);
+
+ mif_info("%s (opened %d) by %s\n",
+ iod->name, atomic_read(&iod->opened), current->comm);
+
+ return 0;
+}
+
+static int vnet_stop(struct net_device *ndev)
+{
+ struct vnet *vnet = netdev_priv(ndev);
+ struct io_device *iod = (struct io_device *)vnet->iod;
+ struct modem_shared *msd = iod->msd;
+ struct link_device *ld;
+
+ if (atomic_dec_and_test(&iod->opened))
+ skb_queue_purge(&iod->sk_rx_q);
+
+ list_for_each_entry(ld, &msd->link_dev_list, list) {
+ if (IS_CONNECTED(iod, ld) && ld->terminate_comm)
+ ld->terminate_comm(ld, iod);
+ }
+
+ spin_lock(&msd->active_list_lock);
+ list_del(&iod->node_ndev);
+ spin_unlock(&msd->active_list_lock);
+ netif_stop_queue(ndev);
+
+ mif_info("%s (opened %d) by %s\n",
+ iod->name, atomic_read(&iod->opened), current->comm);
+
+ return 0;
+}
+
+static netdev_tx_t vnet_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct vnet *vnet = netdev_priv(ndev);
+ struct io_device *iod = (struct io_device *)vnet->iod;
+ struct link_device *ld = get_current_link(iod);
+ struct modem_ctl *mc = iod->mc;
+ unsigned int count = skb->len;
+ struct sk_buff *skb_new = skb;
+ char *buff;
+ int ret;
+ u8 __maybe_unused cfg;
+ u16 __maybe_unused cfg_sit;
+ unsigned int headroom;
+ unsigned int tailroom;
+ unsigned int tx_bytes;
+ struct timespec64 ts;
+
+ if (unlikely(!cp_online(mc))) {
+ if (!netif_queue_stopped(ndev))
+ netif_stop_queue(ndev);
+ /* Just drop the TX packet */
+ goto drop;
+ }
+
+ /* Record the timestamp */
+ ktime_get_ts64(&ts);
+
+#if IS_ENABLED(CONFIG_CP_PKTPROC_UL)
+ /* no need of head and tail */
+ cfg = 0;
+ cfg_sit = 0;
+ headroom = 0;
+ tailroom = 0;
+#else
+ if (iod->link_header) {
+ switch (ld->protocol) {
+ case PROTOCOL_SIPC:
+ cfg = sipc5_build_config(iod, ld, count);
+ headroom = sipc5_get_hdr_len(&cfg);
+ break;
+ case PROTOCOL_SIT:
+ cfg_sit = exynos_build_fr_config(iod, ld, count);
+ headroom = EXYNOS_HEADER_SIZE;
+ break;
+ default:
+ mif_err("protocol error %d\n", ld->protocol);
+ return -EINVAL;
+ }
+ if (ld->aligned)
+ tailroom = ld->calc_padding_size(headroom + count);
+ else
+ tailroom = 0;
+ } else {
+ cfg = 0;
+ cfg_sit = 0;
+ headroom = 0;
+ tailroom = 0;
+ }
+
+ if ((skb_headroom(skb) < headroom) || (skb_tailroom(skb) < tailroom)) {
+ skb_new = skb_copy_expand(skb, headroom, tailroom, GFP_ATOMIC);
+ if (!skb_new) {
+ mif_info("%s: ERR! skb_copy_expand fail\n", iod->name);
+ goto retry;
+ }
+ }
+#endif
+
+ tx_bytes = headroom + count + tailroom;
+
+ /* Store the IO device, the link device, etc. */
+ skbpriv(skb_new)->iod = iod;
+ skbpriv(skb_new)->ld = ld;
+
+ skbpriv(skb_new)->lnk_hdr = iod->link_header;
+ skbpriv(skb_new)->sipc_ch = iod->ch;
+
+ /* Copy the timestamp to the skb */
+ skbpriv(skb_new)->ts = ts;
+#if defined(DEBUG_MODEM_IF_IODEV_TX) && defined(DEBUG_MODEM_IF_PS_DATA)
+ mif_pkt(iod->ch, "IOD-TX", skb_new);
+#endif
+
+ /* Build SIPC5 link header*/
+#if IS_ENABLED(CONFIG_CP_PKTPROC_UL)
+ buff = skb_new->data;
+#else
+ buff = skb_push(skb_new, headroom);
+#endif
+
+#if !IS_ENABLED(CONFIG_CP_PKTPROC_UL)
+ if (cfg || cfg_sit) {
+ switch (ld->protocol) {
+ case PROTOCOL_SIPC:
+ sipc5_build_header(iod, buff, cfg, count, 0);
+ break;
+ case PROTOCOL_SIT:
+ exynos_build_header(iod, ld, buff, cfg_sit, 0, count);
+ break;
+ default:
+ mif_err("protocol error %d\n", ld->protocol);
+ return -EINVAL;
+ }
+ }
+#endif
+
+ /* IP loop-back */
+ if (iod->msd->loopback_ipaddr) {
+ struct iphdr *ip_header = (struct iphdr *)skb->data;
+
+ if (ip_header->daddr == iod->msd->loopback_ipaddr) {
+ swap(ip_header->saddr, ip_header->daddr);
+ buff[SIPC5_CH_ID_OFFSET] = DATA_LOOPBACK_CHANNEL;
+ }
+ }
+
+#if !IS_ENABLED(CONFIG_CP_PKTPROC_UL)
+ /* Apply padding */
+ if (tailroom)
+ skb_put(skb_new, tailroom);
+#endif
+
+ ret = ld->send(ld, iod, skb_new);
+ if (unlikely(ret < 0)) {
+ if ((ret != -EBUSY) && (ret != -ENOSPC)) {
+ mif_err_limited("%s->%s: ERR! %s->send fail:%d (tx_bytes:%d len:%d)\n",
+ iod->name, mc->name, ld->name, ret,
+ tx_bytes, count);
+ goto drop;
+ }
+
+ goto retry;
+ }
+
+ if (ret != tx_bytes) {
+ mif_info("%s->%s: WARN! %s->send ret:%d (tx_bytes:%d len:%d)\n",
+ iod->name, mc->name, ld->name, ret, tx_bytes, count);
+ }
+
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += count;
+
+ /*
+ * If @skb has been expanded to $skb_new, @skb must be freed here.
+ * ($skb_new will be freed by the link device.)
+ */
+ if (skb_new != skb)
+ dev_consume_skb_any(skb);
+
+ return NETDEV_TX_OK;
+
+retry:
+#if !IS_ENABLED(CONFIG_CP_PKTPROC_UL)
+ if (iod->link_header && skb_new && (skb_new == skb)) {
+ if (headroom)
+ skb_pull(skb_new, headroom);
+
+ if (tailroom)
+ skb_trim(skb_new, count);
+ }
+#endif
+
+ /*
+ * If @skb has been expanded to $skb_new, only $skb_new must be freed here
+ * because @skb will be reused by NET_TX.
+ */
+ if (skb_new && skb_new != skb)
+ dev_consume_skb_any(skb_new);
+
+ return NETDEV_TX_BUSY;
+
+drop:
+ ndev->stats.tx_dropped++;
+
+ dev_kfree_skb_any(skb);
+
+ /*
+ * If @skb has been expanded to $skb_new, $skb_new must also be freed here.
+ */
+ if (skb_new != skb)
+ dev_consume_skb_any(skb_new);
+
+ return NETDEV_TX_OK;
+}
+
+static bool _is_tcp_ack(struct sk_buff *skb)
+{
+ u16 payload_len = 0;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ if (ip_hdr(skb)->protocol != IPPROTO_TCP)
+ return false;
+
+ if (skb->network_header == skb->transport_header)
+ skb->transport_header += (ip_hdr(skb)->ihl << 2);
+ payload_len = ntohs(ip_hdr(skb)->tot_len) - (ip_hdr(skb)->ihl << 2);
+ break;
+ case htons(ETH_P_IPV6):
+ if (ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)
+ return false;
+
+ if (skb->network_header == skb->transport_header)
+ skb->transport_header += sizeof(struct ipv6hdr);
+ payload_len = ntohs(ipv6_hdr(skb)->payload_len);
+ break;
+ default:
+ break;
+ }
+
+ if (!payload_len)
+ return false;
+
+ if (payload_len == (tcp_hdr(skb)->doff << 2) &&
+ (tcp_flag_word(tcp_hdr(skb)) & cpu_to_be32(0x00FF0000)) == TCP_FLAG_ACK)
+ return true;
+
+ return false;
+}
+
+static inline bool is_tcp_ack(struct sk_buff *skb)
+{
+ if (skb_is_tcp_pure_ack(skb))
+ return true;
+
+ if (unlikely(_is_tcp_ack(skb)))
+ return true;
+
+ return false;
+}
+
+#if IS_ENABLED(CONFIG_MODEM_IF_QOS)
+static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+#if IS_ENABLED(CONFIG_MODEM_IF_LEGACY_QOS)
+ struct vnet *vnet = netdev_priv(dev);
+#endif
+
+ if (!skb)
+ return 0;
+
+ if (is_tcp_ack(skb))
+ return 1;
+
+#if IS_ENABLED(CONFIG_MODEM_IF_LEGACY_QOS)
+ if (!vnet->hiprio_ack_only && skb->sk && cpif_qos_get_node(skb->sk->sk_uid.val))
+ return 1;
+#endif
+
+ return 0;
+}
+#endif
+
+static const struct net_device_ops vnet_ops = {
+ .ndo_open = vnet_open,
+ .ndo_stop = vnet_stop,
+ .ndo_start_xmit = vnet_xmit,
+#if IS_ENABLED(CONFIG_MODEM_IF_QOS)
+ .ndo_select_queue = vnet_select_queue,
+#endif
+};
+
+void vnet_setup(struct net_device *ndev)
+{
+ ndev->netdev_ops = &vnet_ops;
+ ndev->type = ARPHRD_RAWIP;
+ ndev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ ndev->addr_len = 0;
+ ndev->hard_header_len = 0;
+ ndev->tx_queue_len = 1000;
+ ndev->mtu = ETH_DATA_LEN;
+ ndev->watchdog_timeo = 5 * HZ;
+ ndev->features |= (NETIF_F_GRO | NETIF_F_GRO_UDP_FWD);
+}
diff --git a/s51xx_pcie.c b/s51xx_pcie.c
new file mode 100644
index 0000000..c94c0d0
--- /dev/null
+++ b/s51xx_pcie.c
@@ -0,0 +1,567 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe modem control driver for S51xx series
+ *
+ * Copyright (C) 2019 Samsung Electronics.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/if_arp.h>
+#include <linux/version.h>
+
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/seq_file.h>
+#include <linux/pm_runtime.h>
+//#include <sound/samsung/abox.h>
+
+#include "modem_prj.h"
+#include "modem_utils.h"
+#include "modem_ctrl.h"
+#include "s51xx_pcie.h"
+
+void s51xx_pcie_chk_ep_conf(struct pci_dev *pdev)
+{
+ int i;
+ u32 val1, val2, val3, val4;
+
+ /* EP config. full dump: */
+ for (i = 0x0; i < 0x50; i += 0x10) {
+ pci_read_config_dword(pdev, i, &val1);
+ pci_read_config_dword(pdev, i + 0x4, &val2);
+ pci_read_config_dword(pdev, i + 0x8, &val3);
+ pci_read_config_dword(pdev, i + 0xC, &val4);
+ dev_dbg(&pdev->dev, "0x%02x: %08x %08x %08x %08x\n",
+ i, val1, val2, val3, val4);
+ }
+}
+
+inline int s51xx_pcie_send_doorbell_int(struct pci_dev *pdev, int int_num)
+{
+ struct s51xx_pcie *s51xx_pcie = pci_get_drvdata(pdev);
+ struct pci_driver *driver = pdev->driver;
+ struct modem_ctl *mc = container_of(driver, struct modem_ctl, pci_driver);
+ u32 reg, count = 0;
+ int cnt = 0;
+ u16 cmd;
+
+ if (s51xx_pcie->link_status == 0) {
+ mif_err_limited("Can't send Interrupt(not enabled)!!!\n");
+ return -EAGAIN;
+ }
+
+ if (pcie_get_cpl_timeout_state(s51xx_pcie->pcie_channel_num)) {
+ mif_err_limited("Can't send Interrupt(cto_retry_cnt: %d)!!!\n",
+ mc->pcie_cto_retry_cnt);
+ return 0;
+ }
+
+ if (s51xx_check_pcie_link_status(s51xx_pcie->pcie_channel_num) == 0) {
+ mif_err_limited("Can't send Interrupt(not linked)!!!\n");
+ goto check_cpl_timeout;
+ }
+
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if ((((cmd & PCI_COMMAND_MEMORY) == 0) ||
+ (cmd & PCI_COMMAND_MASTER) == 0) || (cmd == 0xffff)) {
+ mif_err_limited("Can't send Interrupt(not setted bme_en, 0x%04x)!!!\n", cmd);
+
+ do {
+ cnt++;
+
+ /* set bme bit */
+ pci_set_master(pdev);
+
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ mif_info("cmd reg = 0x%04x\n", cmd);
+
+ /* set mse bit */
+ cmd |= PCI_COMMAND_MEMORY;
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ mif_info("cmd reg = 0x%04x\n", cmd);
+
+ if ((cmd & PCI_COMMAND_MEMORY) &&
+ (cmd & PCI_COMMAND_MASTER) && (cmd != 0xffff))
+ break;
+ } while (cnt < 10);
+
+ if (cnt >= 10) {
+ mif_err_limited("BME is not set(cnt=%d)\n", cnt);
+ pcie_register_dump(s51xx_pcie->pcie_channel_num);
+ goto check_cpl_timeout;
+ }
+ }
+
+send_doorbell_again:
+ iowrite32(int_num, s51xx_pcie->doorbell_addr);
+
+ reg = ioread32(s51xx_pcie->doorbell_addr);
+
+ /* debugging:
+ * mif_info("s51xx_pcie.doorbell_addr = 0x%p -
+ * written(int_num=0x%x) read(reg=0x%x)\n", \
+ * s51xx_pcie->doorbell_addr, int_num, reg);
+ */
+
+ if (reg == 0xffffffff) {
+ count++;
+ if (count < 100) {
+ if (!in_interrupt())
+ udelay(1000); /* 1ms */
+ else {
+ mif_err_limited("Can't send doorbell in interrupt mode (0x%08X)\n",
+ reg);
+ return 0;
+ }
+
+ goto send_doorbell_again;
+ }
+ mif_err("[Need to CHECK] Can't send doorbell int (0x%x)\n", reg);
+ pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, ®);
+ mif_err("Check BAR0 register : %#x\n", reg);
+ pcie_register_dump(s51xx_pcie->pcie_channel_num);
+
+ goto check_cpl_timeout;
+ }
+
+ return 0;
+
+check_cpl_timeout:
+ if (pcie_get_cpl_timeout_state(s51xx_pcie->pcie_channel_num) ||
+ pcie_get_sudden_linkdown_state(s51xx_pcie->pcie_channel_num))
+ mif_err_limited("Can't send Interrupt(link_down_retry_cnt: %d, cto_retry_cnt: %d)!!!\n",
+ mc->pcie_linkdown_retry_cnt, mc->pcie_cto_retry_cnt);
+ else
+ pcie_force_linkdown_work(s51xx_pcie->pcie_channel_num);
+
+ return 0;
+}
+
+void first_save_s51xx_status(struct pci_dev *pdev)
+{
+ struct s51xx_pcie *s51xx_pcie = pci_get_drvdata(pdev);
+
+ if (s51xx_check_pcie_link_status(s51xx_pcie->pcie_channel_num) == 0) {
+ mif_err("It's not Linked - Ignore saving the s5100\n");
+ return;
+ }
+
+ pci_save_state(pdev);
+ s51xx_pcie->first_pci_saved_configs = pci_store_saved_state(pdev);
+ if (s51xx_pcie->first_pci_saved_configs == NULL)
+ mif_err("MSI-DBG: s51xx pcie.first_pci_saved_configs is NULL(s51xx config NOT saved)\n");
+ else
+ mif_info("first s51xx config status save: done\n");
+}
+
+void s51xx_pcie_save_state(struct pci_dev *pdev)
+{
+ struct s51xx_pcie *s51xx_pcie = pci_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "[%s]\n", __func__);
+
+ if (s51xx_check_pcie_link_status(s51xx_pcie->pcie_channel_num) == 0) {
+ mif_err("It's not Linked - Ignore restore state!!!\n");
+ return;
+ }
+
+ /* pci_pme_active(s51xx_pcie.s51xx_pdev, 0); */
+
+ /* Disable L1.2 before PCIe power off */
+ s51xx_pcie_l1ss_ctrl(0, s51xx_pcie->pcie_channel_num);
+
+ pci_clear_master(pdev);
+
+ if (s51xx_pcie->pci_saved_configs)
+ kfree(s51xx_pcie->pci_saved_configs);
+
+ pci_save_state(pdev);
+
+ s51xx_pcie->pci_saved_configs = pci_store_saved_state(pdev);
+
+ s51xx_pcie_chk_ep_conf(pdev);
+
+ disable_msi_int(pdev);
+
+ /* pci_enable_wake(s51xx_pcie.s51xx_pdev, PCI_D0, 0); */
+
+ pci_disable_device(pdev);
+
+ pci_wake_from_d3(pdev, false);
+ if (pci_set_power_state(pdev, PCI_D3hot))
+ mif_err("Can't set D3 state!!!!\n");
+}
+
+void s51xx_pcie_restore_state(struct pci_dev *pdev, bool boot_on,
+ enum modem_variant variant)
+{
+ struct s51xx_pcie *s51xx_pcie = pci_get_drvdata(pdev);
+ int ret;
+ u32 val = 0;
+
+ dev_dbg(&pdev->dev, "[%s]\n", __func__);
+
+ if (s51xx_check_pcie_link_status(s51xx_pcie->pcie_channel_num) == 0) {
+ mif_err("It's not Linked - Ignore restore state!!!\n");
+ return;
+ }
+
+ if (pci_set_power_state(pdev, PCI_D0))
+ mif_err("Can't set D0 state!!!!\n");
+
+ if (!s51xx_pcie->pci_saved_configs &&
+ !s51xx_pcie->first_pci_saved_configs)
+ dev_err(&pdev->dev, "[%s] s51xx pcie saved configs is NULL\n", __func__);
+
+ if (boot_on || !s51xx_pcie->pci_saved_configs) {
+ /* On reset, restore from the first saved config */
+ pci_load_saved_state(pdev, s51xx_pcie->first_pci_saved_configs);
+ } else {
+ /* Restore from running config */
+ pci_load_saved_state(pdev, s51xx_pcie->pci_saved_configs);
+ }
+ pci_restore_state(pdev);
+
+ /* move chk_ep_conf function after setting BME(Bus Master Enable)
+ * s51xx_pcie_chk_ep_conf(pdev);
+ */
+
+ pci_enable_wake(pdev, PCI_D0, 0);
+ /* pci_enable_wake(s51xx_pcie.s51xx_pdev, PCI_D3hot, 0); */
+
+ ret = pci_enable_device(pdev);
+
+ if (ret)
+ mif_err("Can't enable PCIe Device after linkup!\n");
+
+ dev_dbg(&pdev->dev, "[%s] PCIe RC bme bit setting\n", __func__);
+ pci_set_master(pdev);
+
+ /* DBG: print out EP config values after restore_state */
+ s51xx_pcie_chk_ep_conf(pdev);
+
+ if (variant != MODEM_SEC_5400) {
+ /* BAR0 value correction */
+ pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &val);
+ dev_dbg(&pdev->dev, "restored:PCI_BASE_ADDRESS_0 = %#x\n", val);
+ if ((val & PCI_BASE_ADDRESS_MEM_MASK) != s51xx_pcie->dbaddr_changed_base) {
+ pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0,
+ s51xx_pcie->dbaddr_changed_base);
+ pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, 0x0);
+ mif_info("write BAR0 value: %#x\n", s51xx_pcie->dbaddr_changed_base);
+ s51xx_pcie_chk_ep_conf(pdev);
+ }
+ }
+ if (boot_on) {
+ /* Disable L1.2 after PCIe power on when booting */
+ s51xx_pcie_l1ss_ctrl(0, s51xx_pcie->pcie_channel_num);
+ } else {
+ /* Enable L1.2 after PCIe power on */
+ s51xx_pcie_l1ss_ctrl(1, s51xx_pcie->pcie_channel_num);
+ }
+
+ s51xx_pcie->link_status = 1;
+ /* pci_pme_active(s51xx_pcie.s51xx_pdev, 1); */
+}
+
+int s51xx_check_pcie_link_status(int ch_num)
+{
+ return pcie_check_link_status(ch_num);
+}
+
+void s51xx_pcie_l1ss_ctrl(int enable, int ch_num)
+{
+ pcie_l1ss_ctrl(enable, ch_num);
+}
+
+void disable_msi_int(struct pci_dev *pdev)
+{
+ struct s51xx_pcie *s51xx_pcie = pci_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "[%s]\n", __func__);
+
+ s51xx_pcie->link_status = 0;
+ /* It's not needed now...
+ * pci_disable_msi(s51xx_pcie.s51xx_pdev);
+ * pci_config_pm_runtime_put(&s51xx_pcie.s51xx_pdev->dev);
+ */
+}
+
+int s51xx_pcie_request_msi_int(struct pci_dev *pdev, int int_num)
+{
+ int err = -EFAULT;
+
+ if (int_num > MAX_MSI_NUM) {
+ mif_err("Too many MSI interrupts are requested(<=16)!!!\n");
+ return -EFAULT;
+ }
+
+ err = pci_alloc_irq_vectors_affinity(pdev, int_num, int_num, PCI_IRQ_MSI, NULL);
+ if (err <= 0) {
+ mif_err("Can't get msi IRQ!!!!!\n");
+ return -EFAULT;
+ }
+
+ return pdev->irq;
+}
+
+static void s51xx_pcie_event_cb(pcie_notify_t *noti)
+{
+ struct pci_dev *pdev = (struct pci_dev *)noti->user;
+ struct pci_driver *driver = pdev->driver;
+ struct modem_ctl *mc = container_of(driver, struct modem_ctl, pci_driver);
+ int event = noti->event;
+
+ mif_err("0x%X pcie event received!\n", event);
+
+ if (event & EXYNOS_PCIE_EVENT_LINKDOWN) {
+ if (mc->pcie_powered_on == false) {
+ mif_info("skip cp crash during dislink sequence\n");
+ pcie_set_perst_gpio(mc->pcie_ch_num, 0);
+ return;
+ }
+
+ mif_err("s51xx LINK_DOWN notification callback function!!!\n");
+ mif_err("LINK_DOWN: a=%d c=%d\n", mc->pcie_linkdown_retry_cnt_all++,
+ mc->pcie_linkdown_retry_cnt);
+
+ if (mc->pcie_linkdown_retry_cnt++ < 10) {
+ mif_err("[%d] retry pcie poweron !!!\n", mc->pcie_linkdown_retry_cnt);
+ queue_work_on(2, mc->wakeup_wq, &mc->wakeup_work);
+ } else {
+ mif_err("[%d] force crash !!!\n", mc->pcie_linkdown_retry_cnt);
+ pcie_dump_all_status(mc->pcie_ch_num);
+ s5100_force_crash_exit_ext(CRASH_REASON_PCIE_LINKDOWN_ERROR);
+ }
+ } else if (event & EXYNOS_PCIE_EVENT_CPL_TIMEOUT) {
+ mif_err("s51xx CPL_TIMEOUT notification callback function!!!\n");
+ mif_err("CPL: a=%d c=%d\n", mc->pcie_cto_retry_cnt_all++, mc->pcie_cto_retry_cnt);
+
+ if (mc->pcie_cto_retry_cnt++ < 10) {
+ mif_err("[%d] retry pcie poweron !!!\n", mc->pcie_cto_retry_cnt);
+ queue_work_on(2, mc->wakeup_wq, &mc->wakeup_work);
+ } else {
+ mif_err("[%d] force crash !!!\n", mc->pcie_cto_retry_cnt);
+ pcie_dump_all_status(mc->pcie_ch_num);
+ s5100_force_crash_exit_ext(CRASH_REASON_PCIE_CPL_TIMEOUT_ERROR);
+ }
+ } else if (event & EXYNOS_PCIE_EVENT_LINKDOWN_RECOVERY_FAIL) {
+ mif_err("Link Down recovery fail force crash !!!\n");
+ s5100_force_crash_exit_ext(CRASH_REASON_PCIE_LINKDOWN_RECOVERY_FAILURE);
+ }
+}
+
+static int s51xx_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int ret;
+ int __maybe_unused i;
+ struct s51xx_pcie *s51xx_pcie;
+ struct device *dev = &pdev->dev;
+ struct pci_driver *driver = pdev->driver;
+ struct modem_ctl *mc = container_of(driver, struct modem_ctl, pci_driver);
+ struct device *mc_dev = mc->dev;
+ struct pci_bus *bus = pdev->bus;
+ struct pci_dev *bus_self = bus->self;
+ struct resource *tmp_rsc;
+ int resno = PCI_BRIDGE_MEM_WINDOW;
+ u32 val, db_addr = 0;
+
+ dev_info(dev, "%s EP driver Probe(%s), chNum: %d\n",
+ driver->name, __func__, mc->pcie_ch_num);
+
+ s51xx_pcie = devm_kzalloc(dev, sizeof(*s51xx_pcie), GFP_KERNEL);
+ s51xx_pcie->s51xx_pdev = pdev;
+ s51xx_pcie->irq_num_base = pdev->irq;
+ s51xx_pcie->link_status = 1;
+ s51xx_pcie->pcie_channel_num = mc->pcie_ch_num;
+
+ mc->s51xx_pdev = pdev;
+
+ if (of_property_read_u32(mc_dev->of_node, "pci_db_addr", &db_addr))
+ dev_info(dev, "EP DB base address is not defined!\n");
+
+ if (db_addr != 0x0) {
+ pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, db_addr);
+ pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &val);
+ val &= PCI_BASE_ADDRESS_MEM_MASK;
+ s51xx_pcie->dbaddr_offset = db_addr - val;
+ s51xx_pcie->dbaddr_changed_base = val;
+ dev_info(dev, "db_addr : 0x%x , val : 0x%x, offset : 0x%x\n",
+ db_addr, val, (unsigned int)s51xx_pcie->dbaddr_offset);
+
+ mif_info("Disable BAR resources.\n");
+ for (i = 0; i < 6; i++) {
+ pdev->resource[i].start = 0x0;
+ pdev->resource[i].end = 0x0;
+ if (pci_assign_resource(pdev, i))
+ pr_warn("%s: failed to assign pci resource (i=%d)\n", __func__, i);
+ }
+
+ /* EP BAR setup: BAR0 (4kB) */
+ pdev->resource[0].start = val;
+ pdev->resource[0].end = val + SZ_4K;
+ if (pci_assign_resource(pdev, 0))
+ pr_warn("%s: failed to assign EP BAR0 pci resource\n", __func__);
+
+ /* get Doorbell base address from root bus range */
+ tmp_rsc = bus_self->resource + resno;
+ dev_info(&bus_self->dev, "[%s] BAR %d: tmp rsc : %pR\n", __func__, resno, tmp_rsc);
+ s51xx_pcie->dbaddr_base = tmp_rsc->start;
+
+ mif_info("Set Doorbell register address.\n");
+ s51xx_pcie->doorbell_addr = devm_ioremap(&pdev->dev,
+ s51xx_pcie->dbaddr_base + s51xx_pcie->dbaddr_offset, SZ_4);
+
+ /*
+ * ret = abox_pci_doorbell_paddr_set(s51xx_pcie->dbaddr_base +
+ * s51xx_pcie->dbaddr_offset);
+ * if (!ret)
+ * dev_err(dev, "PCIe doorbell setting for ABOX is failed\n");
+ */
+
+ mif_info("s51xx_pcie.doorbell_addr = %p (start 0x%lx offset : %lx)\n",
+ s51xx_pcie->doorbell_addr, (unsigned long)s51xx_pcie->dbaddr_base,
+ (unsigned long)s51xx_pcie->dbaddr_offset);
+ } else {
+ /* If CP's Class Code is not defined, assign resource directly.
+ ret = pci_assign_resource(pdev, 0);
+ if (ret)
+ ret = pci_assign_resource(pdev, 0);
+ */
+ /* Set doorbell base address as pcie outbound base address */
+ s51xx_pcie->dbaddr_base = pci_resource_start(pdev, 0);
+ s51xx_pcie->doorbell_addr = devm_ioremap(&pdev->dev,
+ s51xx_pcie->dbaddr_base, SZ_64K);
+
+ /*
+ ret = abox_pci_doorbell_paddr_set(s51xx_pcie->dbaddr_base);
+ if (!ret)
+ dev_err(dev, "PCIe doorbell setting for ABOX is failed \n");
+ */
+
+ pr_info("s51xx_pcie.doorbell_addr = %#lx (PHYSICAL %#lx)\n",
+ (unsigned long)s51xx_pcie->doorbell_addr,
+ (unsigned long)s51xx_pcie->dbaddr_base);
+ }
+
+ if (s51xx_pcie->doorbell_addr == NULL)
+ mif_err("Can't ioremap doorbell address!!!\n");
+
+ mif_info("Register PCIE notification LINKDOWN, CPL_TIMEOUT and LINKDOWN_RECOVERY_FAIL events...\n");
+ s51xx_pcie->pcie_event.events =
+ EXYNOS_PCIE_EVENT_LINKDOWN | EXYNOS_PCIE_EVENT_CPL_TIMEOUT | EXYNOS_PCIE_EVENT_LINKDOWN_RECOVERY_FAIL;
+ s51xx_pcie->pcie_event.user = pdev;
+ s51xx_pcie->pcie_event.mode = EXYNOS_PCIE_TRIGGER_CALLBACK;
+ s51xx_pcie->pcie_event.callback = s51xx_pcie_event_cb;
+ pcie_register_event(&s51xx_pcie->pcie_event);
+
+ mif_info("Enable PCI device...\n");
+ ret = pci_enable_device(pdev);
+ if (ret < 0) {
+ mif_err("pci_enable_device() failed, rc:%d\n", ret);
+ }
+
+ pci_set_master(pdev);
+
+ pci_set_drvdata(pdev, s51xx_pcie);
+
+ return 0;
+}
+
+void print_msi_register(struct pci_dev *pdev)
+{
+ struct s51xx_pcie *s51xx_pcie = pci_get_drvdata(pdev);
+ u32 msi_val;
+
+ pci_read_config_dword(pdev, 0x50, &msi_val);
+ mif_debug("MSI Control Reg(0x50) : 0x%x\n", msi_val);
+ pci_read_config_dword(pdev, 0x54, &msi_val);
+ mif_debug("MSI Message Reg(0x54) : 0x%x\n", msi_val);
+ pci_read_config_dword(pdev, 0x58, &msi_val);
+ mif_debug("MSI MsgData Reg(0x58) : 0x%x\n", msi_val);
+
+ if (msi_val == 0x0) {
+ mif_debug("MSI Message Reg == 0x0 - set MSI again!!!\n");
+
+ if (s51xx_pcie->pci_saved_configs != NULL) {
+ mif_debug("msi restore\n");
+ pci_restore_msi_state(pdev);
+ } else {
+ mif_debug("[skip] msi restore: saved configs is NULL\n");
+ }
+
+ mif_debug("exynos_pcie_msi_init_ext is not implemented\n");
+ /* exynos_pcie_msi_init_ext(s51xx_pcie.pcie_channel_num); */
+
+ pci_read_config_dword(pdev, 0x50, &msi_val);
+ mif_debug("Recheck - MSI Control Reg : 0x%x (0x50)\n", msi_val);
+ pci_read_config_dword(pdev, 0x54, &msi_val);
+ mif_debug("Recheck - MSI Message Reg : 0x%x (0x54)\n", msi_val);
+ pci_read_config_dword(pdev, 0x58, &msi_val);
+ mif_debug("Recheck - MSI MsgData Reg : 0x%x (0x58)\n", msi_val);
+ }
+}
+
+static void s51xx_pcie_remove(struct pci_dev *pdev)
+{
+ struct s51xx_pcie *s51xx_pcie = pci_get_drvdata(pdev);
+
+ mif_err("s51xx PCIe Remove!!!\n");
+
+ if (s51xx_pcie->pci_saved_configs)
+ kfree(s51xx_pcie->pci_saved_configs);
+
+ pci_release_regions(pdev);
+}
+
+/* For Test */
+static struct pci_device_id s51xx_pci_id_tbl[] = {
+ { PCI_VENDOR_ID_SAMSUNG, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, }, // SC Basic
+ { }
+};
+
+MODULE_DEVICE_TABLE(pci, s51xx_pci_id_tbl);
+
+static struct pci_driver s51xx_driver = {
+ .name = "s51xx",
+ .id_table = s51xx_pci_id_tbl,
+ .probe = s51xx_pcie_probe,
+ .remove = s51xx_pcie_remove,
+};
+
+/*
+ * Initialize PCIe s51xx EP driver.
+ */
+int s51xx_pcie_init(struct modem_ctl *mc)
+{
+ int ret;
+ int ch_num = mc->pcie_ch_num;
+
+ mif_info("Register PCIE drvier for s51xx.(chNum: %d, mc: 0x%p)\n", ch_num, mc);
+
+ mc->pci_driver = s51xx_driver;
+
+ ret = pci_register_driver(&mc->pci_driver);
+ if (ret < 0) {
+ mif_err("pci_register_driver() failed, rc:%d\n", ret);
+ }
+
+ return 0;
+}
diff --git a/s51xx_pcie.h b/s51xx_pcie.h
new file mode 100644
index 0000000..cc845de
--- /dev/null
+++ b/s51xx_pcie.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2010 Samsung Electronics.
+ *
+ */
+
+#ifndef __S51xx_PCIE_H__
+#define __S51xx_PCIE_H__
+
+#include <linux/exynos-pci-noti.h>
+
+#if IS_ENABLED(CONFIG_LINK_DEVICE_PCIE_SOC_EXYNOS)
+#include "cpif_pcie_shim_exynos.h"
+#endif
+
+#define MAX_MSI_NUM (16)
+
+extern void first_save_s51xx_status(struct pci_dev *pdev);
+extern int s51xx_pcie_init(struct modem_ctl *mc);
+
+struct s51xx_pcie {
+ unsigned int busdev_num;
+ int pcie_channel_num;
+ struct pci_dev *s51xx_pdev;
+ int irq_num_base;
+ void __iomem *doorbell_addr;
+ u32 __iomem *reg_base;
+ u64 dbaddr_base;
+ u32 dbaddr_offset;
+ u32 dbaddr_changed_base;
+
+ u32 link_status;
+ bool suspend_try;
+
+ pcie_register_event_t pcie_event;
+ pcie_register_event_t pcie_cpl_timeout_event;
+ struct pci_saved_state *pci_saved_configs;
+ struct pci_saved_state *first_pci_saved_configs;
+};
+
+#define AUTOSUSPEND_TIMEOUT 200
+
+/* AoC PCIe window used for voice calls, to be provided to S2MPU
+ * S2MPU memory windows need to be aligned to a 4K boundary
+ * 0x195FDF80 -> 0x195FD000
+ * 0x2080 -> F80 + 80 = 0x3000
+ */
+
+#define AOC_PCIE_WINDOW_START 0x195FD000
+#define AOC_PCIE_WINDOW_SIZE 0x3000
+
+int s51xx_pcie_request_msi_int(struct pci_dev *pdev, int int_num);
+void __iomem *s51xx_pcie_get_doorbell_address(void);
+int s51xx_pcie_send_doorbell_int(struct pci_dev *pdev, int int_num);
+void s51xx_pcie_save_state(struct pci_dev *pdev);
+void s51xx_pcie_restore_state(struct pci_dev *pdev, bool boot_on,
+ enum modem_variant variant);
+int s51xx_check_pcie_link_status(int ch_num);
+void s51xx_pcie_l1ss_ctrl(int enable, int ch_num);
+void disable_msi_int(struct pci_dev *pdev);
+void print_msi_register(struct pci_dev *pdev);
+#endif /* __S51xx_PCIE_H__ */
diff --git a/shm_ipc.c b/shm_ipc.c
new file mode 100644
index 0000000..5668298
--- /dev/null
+++ b/shm_ipc.c
@@ -0,0 +1,524 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2014-2019, Samsung Electronics.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/of_fdt.h>
+#include <linux/shm_ipc.h>
+
+#include "modem_utils.h"
+
+/*
+ * Reserved memory
+ */
+#define MAX_CP_RMEM 10
+
+struct cp_reserved_mem {
+ char *name;
+ u32 index;
+ unsigned long p_base;
+ u32 size;
+};
+
+static int _rmem_count;
+static struct cp_reserved_mem _cp_rmem[MAX_CP_RMEM];
+
+#if defined(MODULE)
+static int cp_rmem_setup_latecall(struct platform_device *pdev)
+{
+ struct device_node *np;
+ struct reserved_mem *rmem;
+ u32 rmem_index = 0;
+ int i;
+
+ for (i = 0; i < MAX_CP_RMEM; i++) {
+ np = of_parse_phandle(pdev->dev.of_node, "memory-region", i);
+ if (!np)
+ break;
+
+ mif_dt_read_u32(np, "rmem_index", rmem_index);
+
+ rmem = of_reserved_mem_lookup(np);
+ if (!rmem) {
+ mif_err("of_reserved_mem_lookup() failed\n");
+ break;
+ }
+
+ _cp_rmem[i].index = rmem_index;
+ _cp_rmem[i].name = (char *)rmem->name;
+ _cp_rmem[i].p_base = rmem->base;
+ _cp_rmem[i].size = rmem->size;
+
+ mif_info("rmem %d %s 0x%08lx 0x%08x\n",
+ _cp_rmem[i].index, _cp_rmem[i].name,
+ _cp_rmem[i].p_base, _cp_rmem[i].size);
+ }
+
+ return 0;
+}
+#else
+static int __init cp_rmem_setup(struct reserved_mem *rmem)
+{
+ const __be32 *prop;
+ int len;
+
+ if (_rmem_count >= MAX_CP_RMEM) {
+ mif_err("_cp_rmem is full for %s\n", rmem->name);
+ return -ENOMEM;
+ }
+
+ prop = of_get_flat_dt_prop(rmem->fdt_node, "rmem_index", &len);
+ if (!prop) {
+ mif_err("rmem_name is not defined for %s\n", rmem->name);
+ return -ENOENT;
+ }
+ _cp_rmem[be32_to_cpu(prop[0])].index = be32_to_cpu(prop[0]);
+ _cp_rmem[be32_to_cpu(prop[0])].name = (char *)rmem->name;
+ _cp_rmem[be32_to_cpu(prop[0])].p_base = rmem->base;
+ _cp_rmem[be32_to_cpu(prop[0])].size = rmem->size;
+ _rmem_count++;
+
+ mif_info("rmem %d %s 0x%08lx 0x%08x\n",
+ _cp_rmem[be32_to_cpu(prop[0])].index, _cp_rmem[be32_to_cpu(prop[0])].name,
+ _cp_rmem[be32_to_cpu(prop[0])].p_base, _cp_rmem[be32_to_cpu(prop[0])].size);
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(modem_if, "exynos,modem_if", cp_rmem_setup);
+
+#endif
+
+/*
+ * Shared memory
+ */
+struct cp_shared_mem {
+ char *name;
+ u32 index;
+ u32 rmem;
+ u32 cp_num;
+ unsigned long p_base;
+ u32 size;
+ bool cached;
+ void __iomem *v_base;
+};
+
+static struct cp_shared_mem _cp_shmem[MAX_CP_NUM][MAX_CP_SHMEM];
+
+static int cp_shmem_setup(struct device *dev)
+{
+ struct device_node *regions = NULL;
+ struct device_node *child = NULL;
+ u32 cp_num;
+ u32 shmem_index, rmem_index;
+ u32 offset;
+ u32 count = 0;
+
+ mif_dt_read_u32(dev->of_node, "cp_num", cp_num);
+
+ regions = of_get_child_by_name(dev->of_node, "regions");
+ if (!regions) {
+ mif_err("of_get_child_by_name() error:regions\n");
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(regions, child) {
+ if (count >= MAX_CP_SHMEM) {
+ mif_err("_cp_shmem is full for %d\n", count);
+ return -ENOMEM;
+ }
+ mif_dt_read_u32(child, "region,index", shmem_index);
+ _cp_shmem[cp_num][shmem_index].index = shmem_index;
+ _cp_shmem[cp_num][shmem_index].cp_num = cp_num;
+ mif_dt_read_string(child, "region,name", _cp_shmem[cp_num][shmem_index].name);
+
+ mif_dt_read_u32(child, "region,rmem", _cp_shmem[cp_num][shmem_index].rmem);
+ rmem_index = _cp_shmem[cp_num][shmem_index].rmem;
+ if (!_cp_rmem[rmem_index].p_base) {
+ mif_err("_cp_rmem[%d].p_base is null\n", rmem_index);
+ return -ENOMEM;
+ }
+ mif_dt_read_u32(child, "region,offset", offset);
+
+ _cp_shmem[cp_num][shmem_index].p_base = _cp_rmem[rmem_index].p_base + offset;
+ mif_dt_read_u32(child, "region,size", _cp_shmem[cp_num][shmem_index].size);
+ if ((_cp_shmem[cp_num][shmem_index].p_base + _cp_shmem[cp_num][shmem_index].size) >
+ (_cp_rmem[rmem_index].p_base + _cp_rmem[rmem_index].size)) {
+ mif_err("%d %d size error 0x%08lx 0x%08x 0x%08lx 0x%08x\n",
+ rmem_index, shmem_index,
+ _cp_shmem[cp_num][shmem_index].p_base,
+ _cp_shmem[cp_num][shmem_index].size,
+ _cp_rmem[rmem_index].p_base, _cp_rmem[rmem_index].size);
+ return -ENOMEM;
+ }
+
+ mif_dt_read_u32_noerr(child, "region,cached",
+ _cp_shmem[cp_num][shmem_index].cached);
+ count++;
+ }
+
+ return 0;
+}
+
+/*
+ * Memory map on CP binary
+ */
+#define MAX_MAP_ON_CP 10
+#define MAP_ON_CP_OFFSET 0xA0
+
+struct ns_map_info { /* Non secure map */
+ u32 name;
+ u32 offset;
+ u32 size;
+};
+
+struct cp_mem_map { /* Memory map on CP */
+ u32 version;
+ u32 secure_size;
+ u32 ns_size;
+ u32 ns_map_count;
+ struct ns_map_info ns_map[MAX_MAP_ON_CP];
+ u32 mem_guard;
+};
+
+struct cp_toc { /* CP TOC */
+ char name[12];
+ u32 img_offset;
+ u32 mem_offset;
+ u32 size;
+ u32 crc;
+ u32 reserved;
+} __packed;
+
+static int _mem_map_on_cp[MAX_CP_NUM] = {};
+
+static int cp_shmem_check_mem_map_on_cp(struct device *dev)
+{
+ u32 cp_num = 0;
+ void __iomem *base = NULL;
+ struct cp_toc *toc = NULL;
+ struct cp_mem_map map = {};
+ u32 name;
+ int i;
+ u32 rmem_index = 0;
+ u32 shmem_index = 0;
+ long long base_diff = 0;
+
+ mif_dt_read_u32(dev->of_node, "cp_num", cp_num);
+ base = phys_to_virt(cp_shmem_get_base(cp_num, SHMEM_CP));
+ if (!base) {
+ mif_err("base is null\n");
+ return -ENOMEM;
+ }
+
+ toc = (struct cp_toc *)(base + sizeof(struct cp_toc));
+ if (!toc) {
+ mif_err("toc is null\n");
+ return -ENOMEM;
+ }
+ mif_info("offset:0x%08x\n", toc->img_offset);
+
+ if (toc->img_offset > (cp_shmem_get_size(cp_num, SHMEM_CP) - MAP_ON_CP_OFFSET)) {
+ mif_info("Invalid img_offset:0x%08x. Use dt information\n", toc->img_offset);
+ return 0;
+ }
+
+ memcpy(&map, base + toc->img_offset + MAP_ON_CP_OFFSET, sizeof(struct cp_mem_map));
+ name = ntohl(map.version);
+ if (strncmp((const char *)&name, "MEM\0", sizeof(name))) {
+ mif_err("map.version error:0x%08x. Use dt information\n", map.version);
+ return 0;
+ }
+
+ _mem_map_on_cp[cp_num] = 1;
+
+ mif_info("secure_size:0x%08x ns_size:0x%08x count:%d\n",
+ map.secure_size, map.ns_size, map.ns_map_count);
+ _cp_shmem[cp_num][SHMEM_CP].size = map.secure_size;
+
+ for (i = 0; i < map.ns_map_count; i++) {
+ name = map.ns_map[i].name;
+ if (!strncmp((const char *)&name, "CPI\0", sizeof(name)))
+ shmem_index = SHMEM_IPC;
+ else if (!strncmp((const char *)&name, "SSV\0", sizeof(name)))
+ shmem_index = SHMEM_VSS;
+ else if (!strncmp((const char *)&name, "GOL\0", sizeof(name)))
+ shmem_index = SHMEM_BTL;
+ else if (!strncmp((const char *)&name, "EGOL\0", sizeof(name)))
+ shmem_index = SHMEM_BTL_EXT;
+ else if (!strncmp((const char *)&name, "B2L\0", sizeof(name)))
+ shmem_index = SHMEM_L2B;
+ else if (!strncmp((const char *)&name, "PKP\0", sizeof(name)))
+ shmem_index = SHMEM_PKTPROC;
+ else if (!strncmp((const char *)&name, "UKP\0", sizeof(name)))
+ shmem_index = SHMEM_PKTPROC_UL;
+ else if (!strncmp((const char *)&name, "MDD\0", sizeof(name)))
+ shmem_index = SHMEM_DDM;
+ else
+ continue;
+
+ rmem_index = _cp_shmem[cp_num][shmem_index].rmem;
+ if (!_cp_rmem[rmem_index].p_base) {
+ mif_err("_cp_rmem[%d].p_base is null\n", rmem_index);
+ return -ENOMEM;
+ }
+
+ base_diff = _cp_rmem[rmem_index].p_base - _cp_rmem[0].p_base;
+ _cp_shmem[cp_num][shmem_index].p_base =
+ _cp_rmem[rmem_index].p_base + map.ns_map[i].offset - base_diff;
+ _cp_shmem[cp_num][shmem_index].size = map.ns_map[i].size;
+
+ if ((_cp_shmem[cp_num][shmem_index].p_base + _cp_shmem[cp_num][shmem_index].size) >
+ (_cp_rmem[rmem_index].p_base + _cp_rmem[rmem_index].size)) {
+ mif_err("rmem:%d shmem_index:%d size error 0x%08lx 0x%08x 0x%08lx 0x%08x\n",
+ rmem_index, shmem_index,
+ _cp_shmem[cp_num][shmem_index].p_base,
+ _cp_shmem[cp_num][shmem_index].size,
+ _cp_rmem[rmem_index].p_base, _cp_rmem[rmem_index].size);
+ return -ENOMEM;
+ }
+
+ mif_info("rmem:%d shmem_index:%d base:0x%08lx offset:0x%08x size:0x%08x\n",
+ rmem_index, shmem_index, _cp_rmem[rmem_index].p_base,
+ map.ns_map[i].offset, map.ns_map[i].size);
+ }
+
+ return 0;
+}
+
+/*
+ * Export functions - legacy
+ */
+unsigned long shm_get_msi_base(void)
+{
+ return cp_shmem_get_base(0, SHMEM_MSI);
+}
+EXPORT_SYMBOL(shm_get_msi_base);
+
+void __iomem *shm_get_vss_region(void)
+{
+ return cp_shmem_get_region(0, SHMEM_VSS);
+}
+EXPORT_SYMBOL(shm_get_vss_region);
+
+unsigned long shm_get_vss_base(void)
+{
+ return cp_shmem_get_base(0, SHMEM_VSS);
+}
+EXPORT_SYMBOL(shm_get_vss_base);
+
+u32 shm_get_vss_size(void)
+{
+ return cp_shmem_get_size(0, SHMEM_VSS);
+}
+EXPORT_SYMBOL(shm_get_vss_size);
+
+void __iomem *shm_get_vparam_region(void)
+{
+ return cp_shmem_get_region(0, SHMEM_VPA);
+}
+EXPORT_SYMBOL(shm_get_vparam_region);
+
+unsigned long shm_get_vparam_base(void)
+{
+ return cp_shmem_get_base(0, SHMEM_VPA);
+}
+EXPORT_SYMBOL(shm_get_vparam_base);
+
+u32 shm_get_vparam_size(void)
+{
+ return cp_shmem_get_size(0, SHMEM_VPA);
+}
+EXPORT_SYMBOL(shm_get_vparam_size);
+
+/*
+ * Export functions
+ */
+int cp_shmem_get_mem_map_on_cp_flag(u32 cp_num)
+{
+ mif_info("cp:%d flag:%d\n", cp_num, _mem_map_on_cp[cp_num]);
+
+ return _mem_map_on_cp[cp_num];
+}
+EXPORT_SYMBOL(cp_shmem_get_mem_map_on_cp_flag);
+
+void __iomem *cp_shmem_get_nc_region(unsigned long base, u32 size)
+{
+ unsigned int num_pages = (unsigned int)DIV_ROUND_UP(size, PAGE_SIZE);
+ pgprot_t prot = pgprot_writecombine(PAGE_KERNEL);
+ struct page **pages;
+ void *v_addr;
+ unsigned int i;
+
+ if (!base)
+ return NULL;
+
+ pages = kvcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ return NULL;
+
+ for (i = 0; i < num_pages; i++) {
+ pages[i] = phys_to_page(base);
+ base += PAGE_SIZE;
+ }
+
+ v_addr = vmap(pages, num_pages, VM_MAP, prot);
+ if (!v_addr)
+ mif_err("Failed to vmap pages\n");
+
+ kvfree(pages);
+
+ return (void __iomem *)v_addr;
+}
+EXPORT_SYMBOL(cp_shmem_get_nc_region);
+
+void __iomem *cp_shmem_get_region(u32 cp, u32 idx)
+{
+ if (_cp_shmem[cp][idx].v_base)
+ return _cp_shmem[cp][idx].v_base;
+
+ if (_cp_shmem[cp][idx].cached)
+ _cp_shmem[cp][idx].v_base = phys_to_virt(_cp_shmem[cp][idx].p_base);
+ else
+ _cp_shmem[cp][idx].v_base = cp_shmem_get_nc_region(_cp_shmem[cp][idx].p_base,
+ _cp_shmem[cp][idx].size);
+
+ return _cp_shmem[cp][idx].v_base;
+}
+EXPORT_SYMBOL(cp_shmem_get_region);
+
+void cp_shmem_release_region(u32 cp, u32 idx)
+{
+ if (_cp_shmem[cp][idx].v_base)
+ vunmap(_cp_shmem[cp][idx].v_base);
+}
+EXPORT_SYMBOL(cp_shmem_release_region);
+
+void cp_shmem_release_rmem(u32 cp, u32 idx, u32 headroom)
+{
+ int i;
+ unsigned long base, offset = 0;
+ u32 size;
+ struct page *page;
+
+ base = cp_shmem_get_base(cp, idx);
+ size = cp_shmem_get_size(cp, idx);
+ mif_info("Release rmem base:0x%08lx size:0x%08x headroom:0x%08x\n", base, size, headroom);
+
+ for (i = 0; i < (size >> PAGE_SHIFT); i++) {
+ if (offset >= headroom) {
+ page = phys_to_page(base + offset);
+ free_reserved_page(page);
+ }
+ offset += PAGE_SIZE;
+ }
+}
+EXPORT_SYMBOL(cp_shmem_release_rmem);
+
+unsigned long cp_shmem_get_base(u32 cp, u32 idx)
+{
+ return _cp_shmem[cp][idx].p_base;
+}
+EXPORT_SYMBOL(cp_shmem_get_base);
+
+u32 cp_shmem_get_size(u32 cp, u32 idx)
+{
+ return _cp_shmem[cp][idx].size;
+}
+EXPORT_SYMBOL(cp_shmem_get_size);
+
+/*
+ * Platform driver
+ */
+static int cp_shmem_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret = 0;
+ u32 use_map_on_cp = 0;
+ int i, j;
+
+ mif_info("+++\n");
+
+#if defined(MODULE)
+ if (!_rmem_count) {
+ ret = cp_rmem_setup_latecall(pdev);
+ if (ret) {
+ mif_err("cp_rmem_setup_latecall() error:%d\n", ret);
+ goto fail;
+ }
+ }
+#endif
+
+ ret = cp_shmem_setup(dev);
+ if (ret) {
+ mif_err("cp_shmem_setup() error:%d\n", ret);
+ goto fail;
+ }
+
+ mif_dt_read_u32(dev->of_node, "use_mem_map_on_cp", use_map_on_cp);
+ if (use_map_on_cp) {
+ ret = cp_shmem_check_mem_map_on_cp(dev);
+ if (ret) {
+ mif_err("cp_shmem_check_mem_map_on_cp() error:%d\n", ret);
+ goto fail;
+ }
+ } else {
+ mif_info("use_mem_map_on_cp is disabled. Use dt information\n");
+ }
+
+ for (i = 0; i < MAX_CP_NUM; i++) {
+ for (j = 0; j < MAX_CP_SHMEM; j++) {
+ if (!_cp_shmem[i][j].name)
+ continue;
+
+ mif_info("cp_num:%d rmem:%d index:%d %s 0x%08lx 0x%08x %d\n",
+ _cp_shmem[i][j].cp_num, _cp_shmem[i][j].rmem,
+ _cp_shmem[i][j].index, _cp_shmem[i][j].name,
+ _cp_shmem[i][j].p_base, _cp_shmem[i][j].size,
+ _cp_shmem[i][j].cached);
+ }
+ }
+
+ mif_info("---\n");
+
+ return 0;
+
+fail:
+ panic("CP shmem probe failed\n");
+ return ret;
+}
+
+static int cp_shmem_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id cp_shmem_dt_match[] = {
+ { .compatible = "samsung,exynos-cp-shmem", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cp_shmem_dt_match);
+
+static struct platform_driver cp_shmem_driver = {
+ .probe = cp_shmem_probe,
+ .remove = cp_shmem_remove,
+ .driver = {
+ .name = "cp_shmem",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(cp_shmem_dt_match),
+ .suppress_bind_attrs = true,
+ },
+};
+module_platform_driver(cp_shmem_driver);
+
+MODULE_SOFTDEP("pre: spi-s3c64xx");
+MODULE_DESCRIPTION("Exynos CP shared memory driver");
+MODULE_LICENSE("GPL");